prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>connection.js<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | var s = "Connected"; |
<|file_name|>abdt_rbranchnaming__t.py<|end_file_name|><|fim▁begin|>"""Test suite for abdt_rbranchnaming."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# [XB] review names that are globally known to be bad are not accepted
# [XB] tracker names that are globally known to be bad are not accepted
# [XC] names that are known to be potential reviews aren't accepted as trackers
# [XC] names that are known to be potential trackers aren't accepted as reviews
# [XD] ReviewBranches created by the scheme have the expected attributes
# [XD] ReviewBranches created by the scheme can create expected TrackerBranches
# [XD] TrackerBranches created by the scheme have the expected attributes
# [XD] there is a 1-1 relationship between tracker params and tracker names
# -----------------------------------------------------------------------------
# Tests:
# [ A] XXX: test_A_Breathing
# [XA] check_XA_Breathing
# [XB] check_XB_globally_invalid_review_tracker_names
# [XC] check_XC_potentially_valid_review_tracker_names
# [XD] check_XD_valid_reviews
# =============================================================================
from __future__ import absolute_import
import unittest
import abdt_namingtester
import abdt_rbranchnaming
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_naming(self):
return abdt_rbranchnaming.Naming()
def test_A_Breathing(self):
pass
def test_XA_Breathing(self):
abdt_namingtester.check_XA_Breathing(self)
def test_XB_globally_invalid_review_tracker_names(self):
abdt_namingtester.check_XB_globally_invalid_review_tracker_names(
self, self.make_naming())
def test_XC_potentially_valid_review_tracker_names(self):
abdt_namingtester.check_XC_potentially_valid_review_tracker_names(
self, self.make_naming())
def test_XD_valid_reviews(self):<|fim▁hole|>
for properties in abdt_namingtester.VALID_REVIEW_PROPERTIES:
name = 'r/{base}/{description}'.format(
description=properties.description,
base=properties.base)
assert name not in names_to_properties
names_to_properties[name] = properties
abdt_namingtester.check_XD_valid_reviews(
self, self.make_naming(), names_to_properties)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------<|fim▁end|> | names_to_properties = {} |
<|file_name|>cli.py<|end_file_name|><|fim▁begin|>import os
import sys
import yaml
import argparse
from code import interact
def parse_options(argv):
parser = argparse.ArgumentParser(description='Macumba Shell',
prog='macumba-shell')
parser.add_argument('--v1', action='store_true', dest='v1',
help='Use Juju 1.x API')
parser.add_argument('--v2', action='store_true', dest='v2',
help='Use Juju 2.x API')
parser.add_argument('-m', '--model', dest='model',
help='The Environment(v1)/Model(v2) to connect to.')
return parser.parse_args(argv)
def main():
opts = parse_options(sys.argv[1:])
if not opts.model:
raise Exception("Must choose a Environment/Model.")
if opts.v1:
juju_home = os.getenv("JUJU_HOME", "~/.juju")
from .v1 import JujuClient # noqa
env = os.path.expanduser(
os.path.join(
juju_home,
"environments/{}.jenv".format(opts.model)))
if not os.path.isfile(env):
raise Exception("Unable to locate: {}".format(env))
env_yaml = yaml.load(open(env))
uuid = env_yaml['environ-uuid']
server = env_yaml['state-servers'][0]
password = env_yaml['password']
user = env_yaml['user']
url = os.path.join('wss://', server, 'environment', uuid, 'api')
elif opts.v2:
xdg_home = os.getenv("XDG_DATA_HOME", "~/.local/share")
juju_home = os.path.join(xdg_home, 'juju')
from .v2 import JujuClient # noqa
env = os.path.expanduser(
os.path.join(
juju_home,
"models/cache.yaml"))
if not os.path.isfile(env):
raise Exception("Unable to locate: {}".format(env))
env = yaml.load(open(env))
uuid = env['server-user'][opts.model]['server-uuid']
server = env['server-data'][uuid]['api-endpoints'][0]
password = env['server-data'][uuid]['identities']['admin']<|fim▁hole|> raise Exception("Could not determine Juju API Version to use.")
print('Connecting to {}'.format(url))
j = JujuClient(url=url, password=password)
j.login()
interact(banner="juju client logged in. Object is named 'j',"
" so j.status() will fetch current status as a dict.",
local=locals())<|fim▁end|> | url = os.path.join('wss://', server, 'model', uuid, 'api')
else: |
<|file_name|>SelectScriptPanel.java<|end_file_name|><|fim▁begin|>package whelk.gui;
import whelk.PortableScript;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
public class SelectScriptPanel extends WizardCard implements ActionListener
{
Wizard wizard;
JLabel description = new JLabel();
public SelectScriptPanel(Wizard wizard)
{
super(wizard);
this.wizard = wizard;
Box vBox = Box.createVerticalBox();
JButton loadButton = new JButton("Öppna script-fil");
loadButton.addActionListener(this);
loadButton.setActionCommand("load");
vBox.add(loadButton);
vBox.add(Box.createVerticalStrut(10));
vBox.add(new JLabel("Valt script:"));
vBox.add(description);<|fim▁hole|>
this.add(vBox);
}
@Override
void onShow(Object parameterFromPreviousCard)
{
setNextCard(Wizard.RUN);
disableNext();
}
@Override
public void actionPerformed(ActionEvent actionEvent)
{
if (actionEvent.getActionCommand().equals("load"))
{
JFileChooser chooser = new JFileChooser();
chooser.setPreferredSize(new Dimension(1024, 768));
int returnVal = chooser.showOpenDialog(wizard);
if(returnVal == JFileChooser.APPROVE_OPTION)
{
File chosenFile = chooser.getSelectedFile();
try (ObjectInputStream ois = new ObjectInputStream(new FileInputStream(chosenFile)))
{
Object loaded = ois.readObject();
if (loaded instanceof PortableScript)
{
PortableScript loadedScript = (PortableScript) loaded;
description.setText(loadedScript.comment);
setParameterForNextCard(loaded);
enableNext();
}
} catch (IOException | ClassNotFoundException ioe)
{
Wizard.exitFatal(ioe.getMessage());
}
}
}
}
}<|fim▁end|> | |
<|file_name|>MainFrame.java<|end_file_name|><|fim▁begin|>package net.robobalasko.dfa.gui;
import net.robobalasko.dfa.core.Automaton;
import net.robobalasko.dfa.core.exceptions.NodeConnectionMissingException;
import net.robobalasko.dfa.core.exceptions.StartNodeMissingException;
import javax.swing.*;
import javax.swing.border.EmptyBorder;
import javax.swing.event.DocumentEvent;
import javax.swing.event.DocumentListener;
import javax.swing.text.Document;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
public class MainFrame extends JFrame {
private final Automaton automaton;
private JButton checkButton;
public MainFrame(final Automaton automaton) throws HeadlessException {
super("DFA Simulator");
this.automaton = automaton;
setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
JPanel containerPanel = new JPanel();
containerPanel.setLayout(new BoxLayout(containerPanel, BoxLayout.PAGE_AXIS));
containerPanel.setBorder(new EmptyBorder(20, 20, 20, 20));
setContentPane(containerPanel);
CanvasPanel canvasPanel = new CanvasPanel(this, automaton);
containerPanel.add(canvasPanel);<|fim▁hole|> final JTextField inputText = new JTextField(40);
Document document = inputText.getDocument();
document.addDocumentListener(new DocumentListener() {
@Override
public void insertUpdate(DocumentEvent e) {
checkButton.setEnabled(e.getDocument().getLength() > 0);
}
@Override
public void removeUpdate(DocumentEvent e) {
checkButton.setEnabled(e.getDocument().getLength() > 0);
}
@Override
public void changedUpdate(DocumentEvent e) {
checkButton.setEnabled(e.getDocument().getLength() > 0);
}
});
checkInputPanel.add(inputText);
checkButton = new JButton("Check");
checkButton.setEnabled(false);
checkButton.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
try {
JOptionPane.showMessageDialog(MainFrame.this,
automaton.acceptsString(inputText.getText())
? "Input accepted."
: "Input rejected.");
} catch (StartNodeMissingException ex) {
JOptionPane.showMessageDialog(MainFrame.this, "Missing start node.");
} catch (NodeConnectionMissingException ex) {
JOptionPane.showMessageDialog(MainFrame.this, "Not a good string. Automat doesn't accept it.");
}
}
});
checkInputPanel.add(checkButton);
setResizable(false);
setVisible(true);
pack();
}
}<|fim▁end|> |
JPanel checkInputPanel = new JPanel(new FlowLayout(FlowLayout.CENTER));
containerPanel.add(checkInputPanel);
|
<|file_name|>theme.rs<|end_file_name|><|fim▁begin|>use handlebars::{Context, Handlebars, Helper, Output, RenderContext, RenderError};
pub fn theme_option(
h: &Helper<'_, '_>,
_r: &Handlebars<'_>,
ctx: &Context,
rc: &mut RenderContext<'_, '_>,
out: &mut dyn Output,
) -> Result<(), RenderError> {
trace!("theme_option (handlebars helper)");
let param = h.param(0).and_then(|v| v.value().as_str()).ok_or_else(|| {
RenderError::new("Param 0 with String type is required for theme_option helper.")
})?;
let default_theme = rc.evaluate(ctx, "@root/default_theme")?;
let default_theme_name = default_theme
.as_json()
.as_str()
.ok_or_else(|| RenderError::new("Type error for `default_theme`, string expected"))?;
<|fim▁hole|> out.write(" (default)")?;
}
Ok(())
}<|fim▁end|> | out.write(param)?;
if param.to_lowercase() == default_theme_name.to_lowercase() { |
<|file_name|>backup.py<|end_file_name|><|fim▁begin|>import os
import shutil
import boto
from boto.s3.key import Key
import subprocess
from io import StringIO
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.conf import settings
from django.utils import timezone
from django.core.mail import send_mail
from core import models
def copy_file(source, destination):
"""
:param source: The source of the folder for copying
:param destination: The destination folder for the file
:return:
"""
destination_folder = os.path.join(settings.BASE_DIR, os.path.dirname(destination))
if not os.path.exists(destination_folder):
os.mkdir(destination_folder)
print("Copying {0}".format(source))
shutil.copy(os.path.join(settings.BASE_DIR, source),
os.path.join(settings.BASE_DIR, destination))
def copy_files(src_path, dest_path):
"""
:param src_path: The source folder for copying
:param dest_path: The destination these files/folders should be copied to
:return: None
"""
if not os.path.exists(src_path):
os.makedirs(src_path)
files = os.listdir(src_path)
for file_name in files:
if not file_name == 'temp':
full_file_name = os.path.join(src_path, file_name)
print("Copying {0}".format(full_file_name))<|fim▁hole|> if os.path.exists(dir_dest):
shutil.rmtree(os.path.join(dir_dest))
shutil.copytree(full_file_name, dir_dest)
def mycb(so_far, total):
print('{0} kb transferred out of {1}'.format(so_far / 1024, total / 1024))
def handle_s3(tmp_path, start_time):
print("Sending to S3.")
file_name = '{0}.zip'.format(start_time)
file_path = os.path.join(settings.BASE_DIR, 'files', 'temp', file_name)
f = open(file_path, 'rb')
END_POINT = settings.END_POINT
S3_HOST = settings.S3_HOST
UPLOADED_FILENAME = 'backups/{0}.zip'.format(start_time)
# include folders in file path. If it doesn't exist, it will be created
s3 = boto.s3.connect_to_region(END_POINT,
aws_access_key_id=settings.S3_ACCESS_KEY,
aws_secret_access_key=settings.S3_SECRET_KEY,
host=S3_HOST)
bucket = s3.get_bucket(settings.S3_BUCKET_NAME)
k = Key(bucket)
k.key = UPLOADED_FILENAME
k.set_contents_from_file(f, cb=mycb, num_cb=200)
def handle_directory(tmp_path, start_time):
print("Copying to backup dir")
file_name = '{0}.zip'.format(start_time)
copy_file('files/temp/{0}'.format(file_name), settings.BACKUP_DIR)
def delete_used_tmp(tmp_path, start_time):
print("Deleting temp directory.")
shutil.rmtree(tmp_path)
file_path = "{0}/{1}.zip".format(os.path.join(settings.BASE_DIR, 'files', 'temp'), start_time)
os.unlink(file_path)
def send_email(start_time, e, success=False):
admins = models.Account.objects.filter(is_superuser=True)
message = ''
if not success:
message = 'There was an error during the backup process.\n\n '
send_mail(
'Backup',
'{0}{1}.'.format(message, e),
'backup@janeway',
[user.email for user in admins],
fail_silently=False,
)
class Command(BaseCommand):
"""
Pulls files together then sends them to aws bucket.
"""
help = "Deletes duplicate settings."
def handle(self, *args, **options):
"""Does a backup..
:param args: None
:param options: None
:return: None
"""
# Ensure temp dir exists:
if not os.path.exists(os.path.join(settings.BASE_DIR, 'files', 'temp')):
os.makedirs(os.path.join(settings.BASE_DIR, 'files', 'temp'))
start_time = str(timezone.now())
try:
tmp_path = os.path.join(settings.BASE_DIR, 'files', 'temp', start_time)
# dump database out to JSON and store in StringIO for saving
print('Dumping json db file')
json_out = StringIO()
call_command('dumpdata', '--indent=4', '--natural-foreign', '--exclude=contenttypes', stdout=json_out)
write_path = os.path.join(settings.BASE_DIR, 'files', 'temp', 'janeway.json')
with open(write_path, 'w', encoding="utf-8") as write:
json_out.seek(0)
shutil.copyfileobj(json_out, write)
os.mkdir(tmp_path)
copy_file('files/temp/janeway.json', 'files/temp/{0}/janeway.json'.format(start_time))
copy_files(os.path.join(settings.BASE_DIR, 'media'), os.path.join(tmp_path, 'media'))
copy_files(os.path.join(settings.BASE_DIR, 'files'), os.path.join(tmp_path, 'files'))
print("Creating archive.")
shutil.make_archive(os.path.join(settings.BASE_DIR, 'files', 'temp', start_time), 'zip', tmp_path)
if settings.BACKUP_TYPE == 's3':
handle_s3(tmp_path, start_time)
else:
handle_directory(tmp_path, start_time)
delete_used_tmp(tmp_path, start_time)
if settings.BACKUP_EMAIL:
send_email(start_time, 'Backup was successfully completed.')
except Exception as e:
send_email(start_time, e)<|fim▁end|> | if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest_path)
else:
dir_dest = os.path.join(dest_path, file_name) |
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# RBFOpt documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 11 00:01:21 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from mock import Mock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src/'))
# -- Mock modules for autodoc
MOCK_MODULES = ['argparse', 'numpy', 'scipy', 'pyDOE']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RBFOpt'
copyright = u'2015, Singapore University of Technology and Design'
author = u'Giacomo Nannicini'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#<|fim▁hole|># There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'RBFOptdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RBFOpt.tex', u'RBFOpt Documentation',
u'Giacomo Nannicini', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rbfopt', u'RBFOpt Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'RBFOpt', u'RBFOpt Documentation',
author, 'RBFOpt', 'Library for black-box optimization.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True<|fim▁end|> | # This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
|
<|file_name|>aql_deftool_cc.py<|end_file_name|><|fim▁begin|>import os.path
import SCons.Tool
import aql
#//---------------------------------------------------------------------------//<|fim▁hole|>
_Warning = aql.Warning
_Tool = SCons.Tool.Tool
#//---------------------------------------------------------------------------//
def generate( env ):
toolsets = (
"aql_tool_gcc",
"aql_tool_msvc",
#~ "aql_tool_bcc"
)
for tool in toolsets:
tool = _Tool( tool )
if tool.exists( env ):
tool( env )
return
_Warning("C/C++ toolchain has not been found.")
default_tool_name = os.path.splitext( os.path.basename( __file__ ) )[0]
env['TOOLS'].remove( default_tool_name )
#//---------------------------------------------------------------------------//
def exists(env):
return 1<|fim▁end|> | |
<|file_name|>version_base.go<|end_file_name|><|fim▁begin|>package version
func init() {
// The main version number that is being run at the moment.
Version = "0.10.3"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
// such as "dev" (in development), "beta", "rc1", etc.
VersionPrerelease = ""<|fim▁hole|><|fim▁end|> | } |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""
rohmu
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
from . errors import InvalidConfigurationError
IO_BLOCK_SIZE = 2 ** 20 # 1 MiB
<|fim▁hole|>def get_class_for_transfer(storage_type):
if storage_type == "azure":
from .object_storage.azure import AzureTransfer
return AzureTransfer
elif storage_type == "google":
from .object_storage.google import GoogleTransfer
return GoogleTransfer
elif storage_type == "local":
from .object_storage.local import LocalTransfer
return LocalTransfer
elif storage_type == "s3":
from .object_storage.s3 import S3Transfer
return S3Transfer
elif storage_type == "swift":
from .object_storage.swift import SwiftTransfer
return SwiftTransfer
raise InvalidConfigurationError("unsupported storage type {0!r}".format(storage_type))
def get_transfer(storage_config, *, storage_type=None):
# TODO: drop storage_type from the function signature, always read it from the config
if "storage_type" in storage_config:
storage_config = storage_config.copy()
storage_type = storage_config.pop("storage_type")
storage_class = get_class_for_transfer(storage_type)
return storage_class(**storage_config)<|fim▁end|> | |
<|file_name|>AccessOverview.cpp<|end_file_name|><|fim▁begin|>/*
Copyright (C) 2014-2016 Leosac
This file is part of Leosac.
Leosac is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Leosac is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "AccessOverview.hpp"
#include "core/auth/Door.hpp"
#include "core/auth/Door_odb.h"
#include "core/auth/User_odb.h"
#include "tools/JSONUtils.hpp"
#include "tools/db/DBService.hpp"
using namespace Leosac;
using namespace Leosac::Module;
using namespace Leosac::Module::WebSockAPI;
AccessOverview::AccessOverview(RequestContext ctx)
: MethodHandler(ctx)
{
}
MethodHandlerUPtr AccessOverview::create(RequestContext ctx)
{
return std::make_unique<AccessOverview>(ctx);
}
json AccessOverview::process_impl(const json &)
{
json rep;
DBPtr db = ctx_.dbsrv->db();
odb::transaction t(db->begin());
// todo: This probably doesn't scale very well...
auto doors = db->query<Auth::Door>();
// Since we'll be looping over users multiple time, we cannot use
// an odb::result object.
auto users_odb = db->query<Auth::User>();
// So we'll have to convert this to a vector of User, instead of
// odb::result::iterator.
std::vector<Auth::UserPtr> users;
for (auto itr_odb(users_odb.begin()); itr_odb != users_odb.end(); ++itr_odb)
users.push_back(itr_odb.load());
for (const auto &door : doors)
{
std::set<Auth::UserId> unique_user_ids;
json door_info = {{"door_id", door.id()}, {"user_ids", json::array()}};
for (const auto &lazy_mapping : door.lazy_mapping())
{
auto mapping = lazy_mapping.load();
for (const auto &user_ptr : users)
{
// Check the std::set in case the user is already authorized to
// access the door.
if (unique_user_ids.count(user_ptr->id()))
{
continue;
}
if (mapping->has_user_indirect(user_ptr))
{<|fim▁hole|> }
for (const auto &id : unique_user_ids)
door_info["user_ids"].push_back(id);
rep.push_back(door_info);
}
return rep;
}
std::vector<ActionActionParam>
AccessOverview::required_permission(const json &) const
{
std::vector<ActionActionParam> perm_;
SecurityContext::ActionParam ap;
perm_.push_back({SecurityContext::Action::ACCESS_OVERVIEW, ap});
return perm_;
}<|fim▁end|> | unique_user_ids.insert(user_ptr->id());
}
} |
<|file_name|>test_genus.py<|end_file_name|><|fim▁begin|>import pytest
import bauble.db as db
from bauble.model.family import Family
from bauble.model.genus import Genus, GenusSynonym, GenusNote
import test.api as api<|fim▁hole|>def setup(organization, session):
setup.organization = session.merge(organization)
setup.user = setup.organization.owners[0]
setup.session = session
db.set_session_schema(session, setup.organization.pg_schema)
return setup
def test_genus_json(setup):
session = setup.session
family = Family(family=api.get_random_name())
genus_name = api.get_random_name()
genus = Genus(family=family, genus=genus_name)
note = GenusNote(genus=genus, note="this is a test")
syn = GenusSynonym(genus=genus, synonym=genus)
session.add_all([family, genus, note, syn])
session.commit()
genus_json = genus.json()
assert 'id' in genus_json
assert genus_json['id'] == genus.id
assert 'genus' in genus_json
assert 'str' in genus_json
assert 'qualifier' in genus_json
note_json = note.json()
assert 'id' in note_json
assert 'genus_id' in note_json
assert note_json['genus_id'] == genus.id
syn_json = syn.json()
assert 'id' in syn_json
assert syn_json['genus_id'] == genus.id
assert syn_json['synonym_id'] == genus.id
session.delete(genus)
session.commit()
session.close()
def test_server(setup):
"""
Test the server properly handle /genus resources
"""
user = setup.user
family = api.create_resource('/family', {'family': api.get_random_name()}, user)
# create a genus
first_genus = api.create_resource('/genus', {'genus': api.get_random_name(), 'family': family},
user)
# create another genus and use the first as a synonym
data = {'genus': api.get_random_name(),
'family': family,
'notes': [{'user': 'me', 'category': 'test', 'date': '2001-1-1',
'note': 'test note'},
{'user': 'me', 'category': 'test', 'date': '2002-2-2',
'note': 'test note2'}],
'synonyms': [first_genus]
#'synonyms': [{'synonym': first_genus}]
}
second_genus = api.create_resource('/genus', data, user)
assert 'id' in second_genus # created
# update the genus
second_genus['genus'] = api.get_random_name()
second_id = second_genus['id']
second_genus = api.update_resource('/genus/' + str(second_id), second_genus, user=user)
assert second_genus['id'] == second_id # make sure they have the same id after the update
# get the genus
first_genus = api.get_resource('/genus/' + str(first_genus['id']), user=user)
# query for genera and make sure the second genus is in the results
genera = api.query_resource('/genus', q=second_genus['genus'], user=user)
# TODO: ** shouldn't len(genera) be 1 since the name should be unique
#assert second_genus['ref'] in [genus['ref'] for genus in genera]
assert second_genus['id'] in [genus['id'] for genus in genera]
# test getting the genus relative to its family
# ** TODO: now we just embed the relation in the /genera/:id
# ** request....need to create a test to make sure it's happening
# genera = api.get_resource('/family/' + str(family['id']) + "/genera", user=user)
# assert first_genus['id'] in [genus['id'] for genus in genera]
# test getting a family with its genera relations
# ** TODO: now we just embed the relation in the /genera/:id
# ** request....need to create a test to make sure it's happening
#response_json = api.query_resource('/family', q=family['family'], relations="genera,notes", user=user)
#families = response_json
# TODO: *** i don't know if we still support returning relations like this...do
# we need to
# print(families[0]['genera'])
# assert first_genus['ref'] in [genus['ref'] for genus in families[0]['genera']]
# count the number of genera on a family
# TODO: ** count is temporarily disabled
# count = api.count_resource(family['ref'] + "/genera")
# assert count == "2"
# delete the created resources
api.delete_resource('/genus/' + str(first_genus['id']), user)
api.delete_resource('/genus/' + str(second_genus['id']), user)
api.delete_resource('/family/' + str(family['id']), user)<|fim▁end|> |
@pytest.fixture |
<|file_name|>constructorTypeNodeTests.ts<|end_file_name|><|fim▁begin|>import { SyntaxKind } from "@ts-morph/common";
import { expect } from "chai";
import { ConstructorTypeNode } from "../../../../compiler";
import { getInfoFromTextWithDescendant } from "../../testHelpers";
describe(nameof(ConstructorTypeNode), () => {
function getNode(text: string) {
return getInfoFromTextWithDescendant<ConstructorTypeNode>(text, SyntaxKind.ConstructorType);
}
describe(nameof<ConstructorTypeNode>(d => d.getReturnTypeNodeOrThrow), () => {
it("should get the return type", () => {
const { descendant } = getNode("var t: new() => SomeClass;");
expect(descendant.getReturnTypeNodeOrThrow().getText()).to.equal("SomeClass");
});
});
describe(nameof<ConstructorTypeNode>(d => d.getParameters), () => {
it("should get the parameters", () => {
const { descendant } = getNode("var t: new(param1, param2) => SomeClass;");
expect(descendant.getParameters().map(p => p.getText())).to.deep.equal(["param1", "param2"]);
});
});
describe(nameof<ConstructorTypeNode>(d => d.isAbstract), () => {
function doTest(text: string, value: boolean) {<|fim▁hole|> const { descendant } = getNode(text);
expect(descendant.isAbstract()).to.equal(value);
}
it("should get if abstract", () => {
doTest("type Test = abstract new() => String;", true);
});
it("should get when not abstract", () => {
doTest("type Test = new() => String;", false);
});
});
describe(nameof<ConstructorTypeNode>(d => d.setIsAbstract), () => {
function doTest(text: string, value: boolean, expectedText: string) {
const { sourceFile, descendant } = getNode(text);
descendant.setIsAbstract(value);
expect(sourceFile.getFullText()).to.equal(expectedText);
}
it("should set as abstract", () => {
doTest("type Test = new() => String;", true, "type Test = abstract new() => String;");
});
it("should set as not abstract", () => {
doTest("type Test = abstract new() => String;", false, "type Test = new() => String;");
});
});
});<|fim▁end|> | |
<|file_name|>core.py<|end_file_name|><|fim▁begin|># This Python file uses the following encoding: utf-8
# This file is part of InputShare
#
# Copyright © 2015 Patrick VanDusen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,<|fim▁hole|># GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.<|fim▁end|> | # but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8 -*-<|fim▁hole|>
from __future__ import unicode_literals
default_app_config = 'yepes.contrib.slugs.apps.SlugsConfig'<|fim▁end|> | |
<|file_name|>flanking_features.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#By: Guruprasad Ananda
"""
Fetch closest up/downstream interval from features corresponding to every interval in primary
usage: %prog primary_file features_file out_file direction
-1, --cols1=N,N,N,N: Columns for start, end, strand in first file
-2, --cols2=N,N,N,N: Columns for start, end, strand in second file
"""
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
import sys, traceback, fileinput
from warnings import warn
from bx.cookbook import doc_optparse
from galaxy.tools.util.galaxyops import *
from bx.intervals.io import *
from bx.intervals.operations import quicksect
assert sys.version_info[:2] >= ( 2, 4 )
def get_closest_feature (node, direction, threshold_up, threshold_down, report_func_up, report_func_down):
#direction=1 for +ve strand upstream and -ve strand downstream cases; and it is 0 for +ve strand downstream and -ve strand upstream cases
#threhold_Up is equal to the interval start for +ve strand, and interval end for -ve strand
#threhold_down is equal to the interval end for +ve strand, and interval start for -ve strand
if direction == 1:
if node.maxend < threshold_up:
if node.end == node.maxend:
report_func_up(node)
elif node.right and node.left:
if node.right.maxend == node.maxend:
get_closest_feature(node.right, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.left.maxend == node.maxend:
get_closest_feature(node.left, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.right and node.right.maxend == node.maxend:
get_closest_feature(node.right, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.left and node.left.maxend == node.maxend:
get_closest_feature(node.left, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.minend < threshold_up:
if node.end < threshold_up:
report_func_up(node)
if node.left and node.right:
if node.right.minend < threshold_up:
get_closest_feature(node.right, direction, threshold_up, threshold_down, report_func_up, report_func_down)
if node.left.minend < threshold_up:
get_closest_feature(node.left, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.left:
if node.left.minend < threshold_up:
get_closest_feature(node.left, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif node.right:
if node.right.minend < threshold_up:
get_closest_feature(node.right, direction, threshold_up, threshold_down, report_func_up, report_func_down)
elif direction == 0:
if node.start > threshold_down:
report_func_down(node)
if node.left:
get_closest_feature(node.left, direction, threshold_up, threshold_down, report_func_up, report_func_down)
else:
if node.right:
get_closest_feature(node.right, direction, threshold_up, threshold_down, report_func_up, report_func_down)
def proximal_region_finder(readers, region, comments=True):
primary = readers[0]
features = readers[1]
either = False
if region == 'Upstream':
up, down = True, False
elif region == 'Downstream':
up, down = False, True
else:
up, down = True, True
if region == 'Either':
either = True
# Read features into memory:
rightTree = quicksect.IntervalTree()
for item in features:
if type( item ) is GenomicInterval:
rightTree.insert( item, features.linenum, item.fields )
for interval in primary:
if type( interval ) is Header:
yield interval
if type( interval ) is Comment and comments:
yield interval
elif type( interval ) == GenomicInterval:
chrom = interval.chrom
start = int(interval.start)
end = int(interval.end)
strand = interval.strand
if chrom not in rightTree.chroms:
continue
else:
root = rightTree.chroms[chrom] #root node for the chrom tree
result_up = []
result_down = []
if (strand == '+' and up) or (strand == '-' and down):
#upstream +ve strand and downstream -ve strand cases
get_closest_feature (root, 1, start, None, lambda node: result_up.append( node ), None)
if (strand == '+' and down) or (strand == '-' and up):
#downstream +ve strand and upstream -ve strand case
get_closest_feature (root, 0, None, end, None, lambda node: result_down.append( node ))<|fim▁hole|> outfields = list(interval)
if len(result_up) > 1: #The results_up list has a list of intervals upstream to the given interval.
ends = []
for n in result_up:
ends.append(n.end)
res_ind = ends.index(max(ends)) #fetch the index of the closest interval i.e. the interval with the max end from the results_up list
else:
res_ind = 0
if not(either):
map(outfields.append, result_up[res_ind].other)
yield outfields
if result_down:
outfields = list(interval)
if not(either):
map(outfields.append, result_down[-1].other) #The last element of result_down will be the closest element to the given interval
yield outfields
if either and (result_up or result_down):
if result_up and result_down:
if abs(start - int(result_up[res_ind].end)) <= abs(end - int(result_down[-1].start)):
map(outfields.append, result_up[res_ind].other)
else:
map(outfields.append, result_down[-1].other) #The last element of result_down will be the closest element to the given interval
elif result_up:
map(outfields.append, result_up[res_ind].other)
elif result_down:
map(outfields.append, result_down[-1].other) #The last element of result_down will be the closest element to the given interval
yield outfields
def main():
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col_1 = parse_cols_arg( options.cols1 )
chr_col_2, start_col_2, end_col_2, strand_col_2 = parse_cols_arg( options.cols2 )
in_fname, in2_fname, out_fname, direction = args
except:
doc_optparse.exception()
g1 = NiceReaderWrapper( fileinput.FileInput( in_fname ),
chrom_col=chr_col_1,
start_col=start_col_1,
end_col=end_col_1,
strand_col=strand_col_1,
fix_strand=True )
g2 = NiceReaderWrapper( fileinput.FileInput( in2_fname ),
chrom_col=chr_col_2,
start_col=start_col_2,
end_col=end_col_2,
strand_col=strand_col_2,
fix_strand=True )
out_file = open( out_fname, "w" )
try:
for line in proximal_region_finder([g1,g2], direction):
if type( line ) is list:
out_file.write( "%s\n" % "\t".join( line ) )
else:
out_file.write( "%s\n" % line )
except ParseError, exc:
fail( "Invalid file format: %s" % str( exc ) )
print "Direction: %s" %(direction)
if g1.skipped > 0:
print skipped( g1, filedesc=" of 1st dataset" )
if g2.skipped > 0:
print skipped( g2, filedesc=" of 2nd dataset" )
if __name__ == "__main__":
main()<|fim▁end|> |
if result_up: |
<|file_name|>peers_api.rs<|end_file_name|><|fim▁begin|>// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::utils::w;
use crate::p2p::types::{PeerAddr, PeerInfoDisplay, ReasonForBan};
use crate::p2p::{self, PeerData};
use crate::rest::*;
use crate::router::{Handler, ResponseFuture};
use crate::web::*;
use hyper::{Body, Request, StatusCode};
use std::net::SocketAddr;
use std::sync::Weak;
pub struct PeersAllHandler {
pub peers: Weak<p2p::Peers>,
}
impl Handler for PeersAllHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers = &w_fut!(&self.peers).all_peer_data();
json_response_pretty(&peers)
}
}
pub struct PeersConnectedHandler {<|fim▁hole|>
impl PeersConnectedHandler {
pub fn get_connected_peers(&self) -> Result<Vec<PeerInfoDisplay>, Error> {
let peers = w(&self.peers)?
.iter()
.connected()
.into_iter()
.map(|p| p.info.clone().into())
.collect::<Vec<PeerInfoDisplay>>();
Ok(peers)
}
}
impl Handler for PeersConnectedHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers: Vec<PeerInfoDisplay> = w_fut!(&self.peers)
.iter()
.connected()
.into_iter()
.map(|p| p.info.clone().into())
.collect();
json_response(&peers)
}
}
/// Peer operations
/// GET /v1/peers/10.12.12.13
/// POST /v1/peers/10.12.12.13/ban
/// POST /v1/peers/10.12.12.13/unban
pub struct PeerHandler {
pub peers: Weak<p2p::Peers>,
}
impl PeerHandler {
pub fn get_peers(&self, addr: Option<SocketAddr>) -> Result<Vec<PeerData>, Error> {
if let Some(addr) = addr {
let peer_addr = PeerAddr(addr);
let peer_data: PeerData = w(&self.peers)?.get_peer(peer_addr).map_err(|e| {
let e: Error = ErrorKind::Internal(format!("get peer error: {:?}", e)).into();
e
})?;
return Ok(vec![peer_data]);
}
let peers = w(&self.peers)?.all_peer_data();
Ok(peers)
}
pub fn ban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.ban_peer(peer_addr, ReasonForBan::ManualBan)
.map_err(|e| ErrorKind::Internal(format!("ban peer error: {:?}", e)).into())
}
pub fn unban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.unban_peer(peer_addr)
.map_err(|e| ErrorKind::Internal(format!("unban peer error: {:?}", e)).into())
}
}
impl Handler for PeerHandler {
fn get(&self, req: Request<Body>) -> ResponseFuture {
let command = right_path_element!(req);
// We support both "ip" and "ip:port" here for peer_addr.
// "ip:port" is only really useful for local usernet testing on loopback address.
// Normally we map peers to ip and only allow a single peer per ip address.
let peer_addr;
if let Ok(ip_addr) = command.parse() {
peer_addr = PeerAddr::from_ip(ip_addr);
} else if let Ok(addr) = command.parse() {
peer_addr = PeerAddr(addr);
} else {
return response(
StatusCode::BAD_REQUEST,
format!("peer address unrecognized: {}", req.uri().path()),
);
}
match w_fut!(&self.peers).get_peer(peer_addr) {
Ok(peer) => json_response(&peer),
Err(_) => response(StatusCode::NOT_FOUND, "peer not found"),
}
}
fn post(&self, req: Request<Body>) -> ResponseFuture {
let mut path_elems = req.uri().path().trim_end_matches('/').rsplit('/');
let command = match path_elems.next() {
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
Some(c) => c,
};
let addr = match path_elems.next() {
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
Some(a) => {
if let Ok(ip_addr) = a.parse() {
PeerAddr::from_ip(ip_addr)
} else if let Ok(addr) = a.parse() {
PeerAddr(addr)
} else {
return response(
StatusCode::BAD_REQUEST,
format!("invalid peer address: {}", req.uri().path()),
);
}
}
};
match command {
"ban" => match w_fut!(&self.peers).ban_peer(addr, ReasonForBan::ManualBan) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("ban failed: {:?}", e),
),
},
"unban" => match w_fut!(&self.peers).unban_peer(addr) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("unban failed: {:?}", e),
),
},
_ => response(StatusCode::BAD_REQUEST, "invalid command"),
}
}
}<|fim▁end|> | pub peers: Weak<p2p::Peers>,
} |
<|file_name|>FindIndexIntro.java<|end_file_name|><|fim▁begin|>package algorithms.sorting;
import java.util.Scanner;
/*
* Sample Challenge
* This is a simple challenge to get things started. Given a sorted array (ar)
* and a number (V), can you print the index location of V in the array?
*
* Input Format:
* There will be three lines of input:
*
* V - the value that has to be searched.
* n - the size of the array.
* ar - n numbers that make up the array.
*
* Output Format:
* Output the index of V in the array.
*
* Constraints:
* 1 <= n <= 1000
* -1000 <= V <= 1000, V is an element of ar
* It is guaranteed that V will occur in ar exactly once.
*
* Sample Input:
* 4
* 6
* 1 4 5 7 9 12
*
* Sample Output:
* 1
*/
public class FindIndexIntro {
public static void main(String[] args) {
Scanner sc = new Scanner(System.in);
int target = sc.nextInt();
int arraySize = sc.nextInt();
int targetIndex = -1;
<|fim▁hole|> int arrayIndex = 0;
while (arrayIndex < arraySize) {
if (target == sc.nextInt()) {
targetIndex = arrayIndex;
break;
}
arrayIndex++;
}
sc.close();
System.out.println(targetIndex);
}
}<|fim▁end|> | |
<|file_name|>datetime-field-response.js<|end_file_name|><|fim▁begin|>/** @jsx jsx */
import React from "react";
import PropTypes from "prop-types";<|fim▁hole|>
import constants from "../../../constants";
import { RegularText } from "../../atoms/typography";
const DatetimeFieldResponse = props => {
return (
<RegularText
css={css`
margin: 8px 0 16px 0;
`}
>
{moment(props.value).format(
props.fieldConfig.display_format ||
constants.DEFAULT_DATE_DISPLAY_FORMAT,
)}
</RegularText>
);
};
DatetimeFieldResponse.propTypes = {
fieldConfig: PropTypes.object.isRequired,
value: PropTypes.string.isRequired,
theme: PropTypes.object.isRequired,
};
export default withTheme(DatetimeFieldResponse);<|fim▁end|> | import moment from "moment";
import { css, jsx } from "@emotion/core";
import { withTheme } from "emotion-theming"; |
<|file_name|>block.py<|end_file_name|><|fim▁begin|>from __future__ import with_statement
from xdrdef.pnfs_block_pack import PNFS_BLOCKPacker as Packer
from xdrdef.pnfs_block_pack import PNFS_BLOCKUnpacker as Unpacker
from xdrdef.pnfs_block_type import *
from xdrdef.pnfs_block_const import *
import fs_base
from threading import Lock
import struct
# draft 8
# All sizes are in bytes unless otherwise indicated
"""
Need to be able to set topology in server_exports
From topology, need to create device
"""
id = 0
id_lock = Lock()
def getid(d):
"""Get a new unique id. These are used only internally for printing"""
global id
id_lock.acquire()
out = id
id += 1
id_lock.release()
return out
class BlockVolume(fs_base.LayoutFile):
"""Deals with disk topology information"""
class FakeFs(object):
def _find_extent(self, pos, inode):
# inode here is the topology root block.Volume
vol, v_pos, limit = inode.extent(pos, 1 << 64)
return fs_base.Extent(fs_base.VALID, v_pos, pos, limit, vol._fd)
def __init__(self, volume):
self._component_list = [vol for vol in volume._dump()
if type(vol) == Simple]
self._openlist = []
self.address_body = volume.get_addr()
super(BlockVolume, self).__init__(volume, self.FakeFs(), volume._size)
def open(self, mode="rb+"):
# STUB - need care with mode, for example--append would not work as is
for vol in self._component_list:
# STUB - rewrite in terms of context managers
if vol.backing_dev is None:
raise IOError("No backing device for Simple Volume %i" % vol.id)
vol._fd = open(vol.backing_dev, mode)
self._openlist.append(vol._fd)
return self
<|fim▁hole|> for fd in reversed(self._openlist):
fd.close()
__enter__ = open
def __exit__(self, t, v, tb):
self.close()
class Volume(object):
"""Superclass used to represent topology components."""
def get_addr(self):
"""Generate the opaque part of device_addr4 used by NFS4.1.
Note this corresponds to device.address_body property used by
op_getdeviceinfo.
"""
# Create list of all volumes referenced, in order of reference
list = self._dump()
# Create mapping from device to its index in list
mapping = dict(zip(list, range(len(list))))
# Create (unpacked) pnfs_block_volume4 structure for each volume
addr = pnfs_block_deviceaddr4([d.get_xdr(mapping) for d in list])
# Create packed xdr string
p = Packer()
p.pack_pnfs_block_deviceaddr4(addr)
return p.get_buffer()
def _dump(self):
"""Recursively scan for all devices in tree.
They are returned in order of reference, to build volume array.
"""
out = []
for v in self.volumes:
out.extend(v._dump())
out = remove_dups(out)
out.append(self)
return out
def get_xdr(self, mapping):
"""Returns filled (and unpacked) pnfs_block_volume4 structure.
Need mapping from device:to top-level array index to do the conversion.
"""
raise NotImplementedError
def resolve(self, i):
"""Map a byte offset to the corresponding Simple volume and byte offset.
"""
return NotImplementedError
def extent(self, i, limit):
"""Same as resolve, with addition of how far mapping extends."""
return NotImplementedError
class Simple(Volume):
"""Represents an actual disk. Always a leaf node in the topology tree."""
def __init__(self, signature, size=None, backing_dev=None):
self.type = PNFS_BLOCK_VOLUME_SIMPLE
self.id = getid(self)
if type(signature[0]) == int:
# Make it easy to send a single component
signature = [signature]
self.sig = [pnfs_block_sig_component4(i, s) for i, s in signature]
self._size = size # in bytes
self.backing_dev = backing_dev
if backing_dev is None:
if size is None:
raise ValueError("Must set either size or backing_dev")
return
self._fd = None
with open(backing_dev, "rb+") as fd:
# Determine device's actual size
fd.seek(0, 2)
true_size = fd.tell()
if size is None:
self._size = true_size
elif true_size < size:
raise ValueError("backing dev size %r < %r" % (true_size, size))
self._write_sig(fd)
def _write_sig(self, fd):
"""Write out disk signature to open fd."""
for comp in self.sig:
offset = comp.bsc_sig_offset
if offset < 0:
offset += self._size
fd.seek(offset)
fd.write(comp.bsc_contents)
def __repr__(self):
return "Simple %i" % self.id
def _dump(self):
"""Since this is always a leaf node of tree, end recursive scan."""
return (self, )
def get_xdr(self, mapping):
info = pnfs_block_simple_volume_info4(self.sig)
return pnfs_block_volume4(PNFS_BLOCK_VOLUME_SIMPLE, bv_simple_info=info)
def resolve(self, i):
# print "resolve(%i) %r" % (i, self)
if i < 0 or i >= self._size:
raise ValueError("Asked for %i of %i" % (i, self._size))
return (self, i)
def extent(self, i, limit):
return (self, i, min(limit, self._size - i))
class Slice(Volume):
"""A contiguous slice from a single volume."""
def __init__(self, volume, start, length):
self.type = PNFS_BLOCK_VOLUME_SLICE
self.id = getid(self)
self.start = start # block offset
self.length = length # length in blocks
self.volumes = [volume] # volume which is sliced
self._size = length
def __repr__(self):
return "Slice %i (from vol %i)" % (self.id, self.volumes[0].id)
def get_xdr(self, mapping):
info = pnfs_block_slice_volume_info4(self.start, self.length,
mapping[self.volumes[0]])
return pnfs_block_volume4(PNFS_BLOCK_VOLUME_SLICE, bv_slice_info=info)
def resolve(self, i):
# print "resolve(%i) %r" % (i, self)
# print self.start, self._size, self.length
if i < 0 or i >= self._size:
raise ValueError("Asked for %i of %i" % (i, self._size))
return self.volumes[0].resolve(self.start + i)
def extent(self, i, limit):
return self.volumes[0].extent(self.start + i,
min(limit, self._size - i))
class Concat(Volume):
"""A simple concatanation of several volumes."""
def __init__(self, volumes):
self.type = PNFS_BLOCK_VOLUME_CONCAT
self.id = getid(self)
self.volumes = volumes
self._size = sum([v._size for v in volumes])
def get_xdr(self, mapping):
info = pnfs_block_concat_volume_info4([mapping[v] for v in self.volumes])
return pnfs_block_volume4(PNFS_BLOCK_VOLUME_CONCAT, bv_concat_info=info)
def __repr__(self):
return "Concat %i of %r" % (self.id, [v.id for v in self.volumes])
def resolve(self, i):
# print "resolve(%i) %r" % (i, self)
if i < 0 or i >= self._size:
raise ValueError("Asked for %i of %i" % (i, self._size))
sum = 0
for v in self.volumes:
next = sum + v._size
if i < next:
return v.resolve(i - sum)
sum = next
# Shouldn't get here
raise RuntimeError
def extent(self, i, limit):
sum = 0
for v in self.volumes:
next = sum + v._size
if i < next:
return v.extent(i - sum, min(limit, next - i))
sum = next
# Shouldn't get here
raise RuntimeError
class Stripe(Volume):
"""Stripe of several volumes, all of the same size."""
def __init__(self, size, volumes):
self.type = PNFS_BLOCK_VOLUME_STRIPE
self.id = getid(self)
self.stripe_unit = size # in blocks?
self.volumes = volumes
self._size = sum([v._size for v in volumes]) # XXX All same size?
def get_xdr(self, mapping):
info = pnfs_block_stripe_volume_info4(self.stripe_unit,
[mapping[v] for v in self.volumes])
return pnfs_block_volume4(PNFS_BLOCK_VOLUME_STRIPE, bv_stripe_info=info)
def __repr__(self):
return "Slice %i (size=%i) of %r" % (self.id, self.stripe_unit,
[v.id for v in self.volumes])
def resolve(self, i):
"""
0 1 2 3 4 5 6 7 8 global_stripe_number
| | | |
| | | | | | | | | |
| | | |
0 1 2 local_stripe_number
0 1 2 0 1 2 0 1 2 disk_number
"""
def split(x, mod):
return (x // mod, x % mod)
if i < 0 or i >= self._size:
raise ValueError("Asked for %i of %i" % (i, self._size))
global_stripe_number, stripe_pos = split(i, self.stripe_unit)
local_stripe_number, disk_number = split(global_stripe_number,
len(self.volumes))
disk_pos = local_stripe_number * self.stripe_unit + stripe_pos
return self.volumes[disk_number].resolve(disk_pos)
def extent(self, i, limit):
def split(x, mod):
return (x // mod, x % mod)
global_stripe_number, stripe_pos = split(i, self.stripe_unit)
local_stripe_number, disk_number = split(global_stripe_number,
len(self.volumes))
disk_pos = local_stripe_number * self.stripe_unit + stripe_pos
return self.volumes[disk_number].extent(disk_pos, min(limit, self.stripe_unit - stripe_pos))
def remove_dups(l):
# XXX Again, is a better way
out = []
while l:
i = l.pop(0)
if i not in out:
out.append(i)
return out
if __name__=="__main__":
pass<|fim▁end|> | def close(self):
# XXX Careful here - what if errors on a close, or previously on open? |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>extern crate rusoto_core;
extern crate rusoto_ec2;
use std::env;
use rusoto_core::default_tls_client;
use rusoto_core::{ProfileProvider, Region};
use rusoto_ec2::{Ec2, Ec2Client, DescribeInstancesRequest};
fn region_selector(acc: &str) -> Region {
match acc {
"eu-west-1" => {
let region: Region = Region::EuWest1;
region
},
"eu-west-2" => {
let region: Region = Region::EuWest2;
region
},
"us-east-1" => {
let region: Region = Region::UsEast1;
region
},
"us-east-2" => {
let region: Region = Region::UsEast2;
region
},
"us-west-1" => {
let region: Region = Region::UsWest1;
region
},
"us-west-2" => {
let region: Region = Region::UsWest2;
region
},
_ => {
let region = Region::UsEast1;
region
},
}
}
fn main() {
let args: Vec<String> = env::args().collect();<|fim▁hole|> // println!("{:?}", args);
let account: &str = args[1].as_ref();
let region_name: &str = args[2].as_ref();
let region: Region = region_selector(region_name);
let aws_creds_dir: String = env::home_dir().unwrap().to_str().unwrap().to_owned() + "/.aws/credentials";
let provider: ProfileProvider = ProfileProvider::with_configuration(aws_creds_dir, account);
let client = Ec2Client::new(default_tls_client().unwrap(), provider, region);
let list_ec2_input: DescribeInstancesRequest = Default::default();
match client.describe_instances(&list_ec2_input) {
Ok(output) => match output.reservations {
Some(reservations_list) => for reservations in reservations_list {
// println!("{:?}", reservations.instances.iter());
for instance in reservations.instances.iter() {
let ec2_details: &rusoto_ec2::Instance = instance.get(0).unwrap();
let ec2_tags: Vec<rusoto_ec2::Tag> = ec2_details.tags.clone()
.unwrap_or(vec![rusoto_ec2::Tag { key: Some(String::from("Name")),
value: Some(String::from("NO_NAME"))}]);
for tags in ec2_tags {
match tags.key {
Some(tag_key) => if tag_key == "Name" {
let instance_name: String =
tags.value.unwrap_or(String::from("NO_NAME"));
let public_ip_address: String =
ec2_details
.public_ip_address
.clone()
.unwrap_or(String::from("NONE"));
let private_ip_address: String =
ec2_details
.private_ip_address
.clone()
.unwrap_or(String::from("NONE"));
let status: String =
ec2_details.state.clone().unwrap()
.name.unwrap_or(String::from("status_unknown"));
println!("{} has public IP {} and private IP {} - {}",
instance_name,
public_ip_address,
private_ip_address,
status,)
}
_ => (),
}
}
}
}
None => println!("No instances found!"),
}
Err(error) => {
println!("Error: {:?}", error);
}
}
}<|fim▁end|> | |
<|file_name|>issue-46604.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// revisions: ast mir
//[mir]compile-flags: -Z borrowck=mir
static buf: &mut [u8] = &mut [1u8,2,3,4,5,7]; //[ast]~ ERROR E0017<|fim▁hole|> //[mir]~^ ERROR E0017
fn write<T: AsRef<[u8]>>(buffer: T) { }
fn main() {
write(&buf);
buf[0]=2; //[ast]~ ERROR E0389
//[mir]~^ ERROR E0594
}<|fim▁end|> | |
<|file_name|>Code.js<|end_file_name|><|fim▁begin|>var CLIENT_ID = '';
var CLIENT_SECRET = '';
var OWNER = "";
var REPO = "";
/**
* Manage Form Answer
* Create a trigger by going to Resources > Current projet's triggers
* Select function manageAnswer() and create a trigger at form submission
*/
function manageAnswer(e) {
var form = e.source;
var rep = {
"Title":"",
"Message":"",
"Email":""
};
var itemResponses = e.response.getItemResponses();
for (var i = 0; i < itemResponses.length; i++) {
var itemTitle = itemResponses[i].getItem().getTitle();
var itemResponse = itemResponses[i].getResponse();
rep[itemTitle] = itemResponse;
Logger.log(itemTitle + ': ' + itemResponse );
}
try{
var issue = submitIssue(rep);
var body = "<p>Hi,</p>"
+"<p>Thank you for submitting your issue, you can follow it on this page : <a href='"+issue.html_url+"'>link</a>.</p>"
+"<p>Title : "+rep.Title+"<br>"
+"Message : "+rep.Message+"</p>"
+"Regards";
GmailApp.sendEmail(rep.Email, 'Issue posted on GitHub', '', {
htmlBody:body,
});
}catch(e){
GmailApp.sendEmail(Session.getEffectiveUser().getEmail(), 'Error issue submission', '', {
htmlBody:JSON.stringify(rep),
});
}
}<|fim▁hole|>
function submitIssue(data){
var service = getService();
if (service.hasAccess()) {
var url = 'https://api.github.com/repos/'+OWNER+'/'+REPO+'/issues';
var bodyRequest = {
"title":data.Title,
"body":"_## Issue created anonymously for a user ##_\n"+data.Message
};
var response = UrlFetchApp.fetch(url, {
method : "post",
headers: {
Authorization: 'Bearer ' + service.getAccessToken()
},
payload : JSON.stringify(bodyRequest)
});
var result = JSON.parse(response.getContentText());
Logger.log(JSON.stringify(result, null, 2));
return result;
} else {
var authorizationUrl = service.getAuthorizationUrl();
Logger.log('Open the following URL and re-run the script: %s',
authorizationUrl);
}
}
/**
* Authorizes and makes a request to the GitHub API.
*/
function run() {
var service = getService();
if (service.hasAccess()) {
var url = 'https://api.github.com/user/repos';
var response = UrlFetchApp.fetch(url, {
headers: {
Authorization: 'Bearer ' + service.getAccessToken()
}
});
var result = JSON.parse(response.getContentText());
Logger.log(JSON.stringify(result, null, 2));
} else {
var authorizationUrl = service.getAuthorizationUrl();
Logger.log('Open the following URL and re-run the script: %s',
authorizationUrl);
}
}
/**
* Configures the service.
*/
function getService() {
return OAuth2.createService('GitHub')
// Set the endpoint URLs.
.setAuthorizationBaseUrl('https://github.com/login/oauth/authorize')
.setTokenUrl('https://github.com/login/oauth/access_token')
// Set the client ID and secret.
.setClientId(CLIENT_ID)
.setClientSecret(CLIENT_SECRET)
// Set the name of the callback function that should be invoked to complete
// the OAuth flow.
.setCallbackFunction('authCallback')
//scope for app
.setScope('repo')
// Set the property store where authorized tokens should be persisted.
.setPropertyStore(PropertiesService.getUserProperties())
}
/**
* Handles the OAuth callback.
*/
function authCallback(request) {
var service = getService();
var authorized = service.handleCallback(request);
if (authorized) {
return HtmlService.createHtmlOutput('Success!');
} else {
return HtmlService.createHtmlOutput('Denied');
}
}<|fim▁end|> |
/**
* Function to send issue to GitHub
*/ |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#
# Module loader file for /ciscripts/check/python.
#
# See /LICENCE.md for Copyright information
"""Module loader file for /ciscripts/check/python."""<|fim▁end|> | # /ciscripts/check/python/__init__.py |
<|file_name|>single_task_evaluator_test.py<|end_file_name|><|fim▁begin|><|fim▁hole|># Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the single_task_evaluator."""
from mint.ctl import single_task_evaluator
from mint.ctl import single_task_trainer
from third_party.tf_models import orbit
import tensorflow as tf
import tensorflow_datasets as tfds
class SingleTaskEvaluatorTest(tf.test.TestCase):
def test_single_task_evaluation(self):
iris = tfds.load('iris')
train_ds = iris['train'].batch(32)
model = tf.keras.Sequential([
tf.keras.Input(shape=(4,), name='features'),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3)
])
trainer = single_task_trainer.SingleTaskTrainer(
train_ds,
label_key='label',
model=model,
loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.SGD(
learning_rate=tf.keras.optimizers.schedules.PiecewiseConstantDecay(
[0], [0.01, 0.01])))
evaluator = single_task_evaluator.SingleTaskEvaluator(
train_ds,
label_key='label',
model=model,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
controller = orbit.Controller(
trainer=trainer,
evaluator=evaluator,
steps_per_loop=100,
global_step=trainer.optimizer.iterations)
controller.train(train_ds.cardinality().numpy())
controller.evaluate()
accuracy = evaluator.metrics[0].result().numpy()
self.assertGreater(0.925, accuracy)
if __name__ == '__main__':
tf.test.main()<|fim▁end|> | # Copyright 2021, Google LLC.
# |
<|file_name|>default.py<|end_file_name|><|fim▁begin|>import xbmc, xbmcgui, xbmcaddon, xbmcplugin, re
import urllib, urllib2
import re, string
import threading
import os
import base64
#from t0mm0.common.addon import Addon
#from t0mm0.common.net import Net
import urlparse
import xbmcplugin
import cookielib
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
__icon__ = __addon__.getAddonInfo('icon')
addon_id = 'plugin.video.f4mTester'
selfAddon = xbmcaddon.Addon(id=addon_id)
#addon = Addon('plugin.video.f4mTester', sys.argv)
#net = Net()
mode =None
play=False
#play = addon.queries.get('play', None)
paramstring=sys.argv[2]
#url = addon.queries.get('playurl', None)
print paramstring
name=''
proxy_string=None
proxy_use_chunks=True
auth_string=''
streamtype='HDS'
setResolved=False
if paramstring:
paramstring="".join(paramstring[1:])
params=urlparse.parse_qs(paramstring)
url = params['url'][0]
try:
name = params['name'][0]
except:pass
try:
proxy_string = params['proxy'][0]
except:pass
try:
auth_string = params['auth'][0]
except:pass
print 'auth_string',auth_string
try:
streamtype = params['streamtype'][0]
except:pass
print 'streamtype',streamtype
try:
proxy_use_chunks_temp = params['proxy_for_chunks'][0]
import json
proxy_use_chunks=json.loads(proxy_use_chunks_temp)
except:pass
simpleDownloader=False
try:
simpleDownloader_temp = params['simpledownloader'][0]
import json
simpleDownloader=json.loads(simpleDownloader_temp)
except:pass
mode='play'
try:
mode = params['mode'][0]
except: pass
maxbitrate=0
try:
maxbitrate = int(params['maxbitrate'][0])
except: pass
play=True
try:
setResolved = params['setresolved'][0]
import json
setResolved=json.loads(setResolved)
except:setResolved=False
def playF4mLink(url,name,proxy=None,use_proxy_for_chunks=False,auth_string=None,streamtype='HDS',setResolved=False):
from F4mProxy import f4mProxyHelper
player=f4mProxyHelper()
#progress = xbmcgui.DialogProgress()
#progress.create('Starting local proxy')
if setResolved:
urltoplay,item=player.playF4mLink(url, name, proxy, use_proxy_for_chunks,maxbitrate,simpleDownloader,auth_string,streamtype,setResolved)
item.setProperty("IsPlayable", "true")
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
else:
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)
player.playF4mLink(url, name, proxy, use_proxy_for_chunks,maxbitrate,simpleDownloader,auth_string,streamtype,setResolved)
return
def getUrl(url, cookieJar=None,post=None,referer=None,isJsonPost=False, acceptsession=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if isJsonPost:
req.add_header('Content-Type','application/json')
if acceptsession:
req.add_header('Accept-Session',acceptsession)
if referer:
req.add_header('Referer',referer)
response = opener.open(req,post,timeout=30)
link=response.read()
response.close()
return link;
def getBBCUrl(urlToFetch):
text=getUrl(urlToFetch)
bitRate="1500"
overrideBitrate=selfAddon.getSetting( "bbcBitRateMax" )
if overrideBitrate<>"": bitRate=overrideBitrate
bitRate=int(bitRate)
regstring='href="(.*?)" bitrate="(.*?)"'
birates=re.findall(regstring, text)
birates=[(int(j),f) for f,j in birates]
birates=sorted(birates, key=lambda f: f[0])
ratesel, urlsel=birates[0]
for r, url in birates:
if r<=bitRate:
ratesel, urlsel=r, url
else:
break
print 'xxxxxxxxx',ratesel, urlsel
return urlsel<|fim▁hole|>
def GUIEditExportName(name):
exit = True
while (exit):
kb = xbmc.Keyboard('default', 'heading', True)
kb.setDefault(name)
kb.setHeading('Enter Url')
kb.setHiddenInput(False)
kb.doModal()
if (kb.isConfirmed()):
name = kb.getText()
#name_correct = name_confirmed.count(' ')
#if (name_correct):
# GUIInfo(2,__language__(33224))
#else:
# name = name_confirmed
# exit = False
#else:
# GUIInfo(2,__language__(33225))
exit = False
return(name)
if mode ==None:
videos=[[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifests/hds/pc/llnw/bbc_one_hd.f4m') +'|Referer=http://www.bbc.co.uk/iplayer/live/bbcone&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc1 (uk)','http://www.parker1.co.uk/myth/icons/tv/bbc1.png',0,'',False],
[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifests/hds/pc/llnw/bbc_two_hd.f4m')+'|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc2 (uk)','http://www.parker1.co.uk/myth/icons/tv/bbc2.png',0,'',False],
[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifests/hds/pc/llnw/bbc_three_hd.f4m')+'|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc3 (uk)','http://www.parker1.co.uk/myth/icons/tv/bbc3.png',0,'',False],
[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifests/hds/pc/llnw/bbc_four_hd.f4m')+'|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc4 (uk)','http://www.parker1.co.uk/myth/icons/tv/bbc4.png',0,'',False],
[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifesto/audio_video/simulcast/hds/uk/pc/llnw/bbc_news24.f4m')+'|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc news (uk)','http://www.parker1.co.uk/myth/icons/tv/bbcnews.png',0,'',False],
[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifesto/audio_video/simulcast/hds/uk/pc/llnw/bbc_parliament.f4m')+'|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc parliment (uk)','',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/cbbc/cbbc_1500.f4m','cbbc (uk) 1500kbps','',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/cbeebies/cbeebies_1500.f4m','cbeebeies (uk) 1500kbps','',0,'',False],
# ['http://vs-hds-uk-live.edgesuite.net/pool_1/live/bbc_parliament/bbc_parliament.isml/bbc_parliament-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbcparliament&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc parliment (uk) 1500kbps','',0,'',False],
# ['http://vs-hds-uk-live.bbcfmt.vo.llnwd.net/pool_5/live/bbc_news_channel_hd/bbc_news_channel_hd.isml/bbc_news_channel_hd-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbcnews&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc news (uk) 1500kbps','',0,'',False],
# ['http://vs-hds-uk-live.bbcfmt.vo.llnwd.net/pool_5/live/bbc_one_london/bbc_one_london.isml/bbc_one_london-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbcone&X-Requested-With=ShockwaveFlash/18.0.0.160&X-Forwarded-For=212.58.241.131','bbc1 (outside uk) 1500kbps','http://www.parker1.co.uk/myth/icons/tv/bbc1.png',0,'',False],
# ['http://vs-hds-uk-live.bbcfmt.vo.llnwd.net/pool_5/live/bbc_two_hd/bbc_two_hd.isml/bbc_two_hd-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc2 (outside uk) 1500kbps','http://www.parker1.co.uk/myth/icons/tv/bbc2.png',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/bbc3/bbc3_1500.f4m|X-Forwarded-For=212.58.241.131','bbc3 (outside uk) 1500kbps [link not valid]','',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/bbc4/bbc4_1500.f4m|X-Forwarded-For=212.58.241.131','bbc4 (outside uk) 1500kbps [link not valid]','',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/cbbc/cbbc_1500.f4m|X-Forwarded-For=212.58.241.131','cbbc (outside uk) 1500kbps','',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/cbeebies/cbeebies_1500.f4m|X-Forwarded-For=212.58.241.131','cbeebeies (outside uk) 1500kbps','',0,'',False],
# ['http://vs-hds-uk-live.edgesuite.net/pool_1/live/bbc_parliament/bbc_parliament.isml/bbc_parliament-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbcparliament&X-Requested-With=ShockwaveFlash/18.0.0.160|X-Forwarded-For=212.58.241.131','bbc parliment (outside uk) 1500kbps','',0,'',False],
# ['http://vs-hds-uk-live.bbcfmt.vo.llnwd.net/pool_5/live/bbc_news_channel_hd/bbc_news_channel_hd.isml/bbc_news_channel_hd-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbcnews&X-Requested-With=ShockwaveFlash/18.0.0.160&X-Forwarded-For=212.58.241.131','bbc news (outside uk) 1500kbps','',0,'',False],
['http://nhkworld-hds-live1.hds1.fmslive.stream.ne.jp/hds-live/nhkworld-hds-live1/_definst_/livestream/nhkworld-live-128.f4m','nhk 128','',0,'',False],
['http://nhkworld-hds-live1.hds1.fmslive.stream.ne.jp/hds-live/nhkworld-hds-live1/_definst_/livestream/nhkworld-live-256.f4m','nhk 256','',0,'',False],
['http://nhkworld-hds-live1.hds1.fmslive.stream.ne.jp/hds-live/nhkworld-hds-live1/_definst_/livestream/nhkworld-live-512.f4m','nhk 512','',0,'',False],
['http://77.245.150.95/hds-live/livepkgr/_definst_/liveevent/livestream.f4m','Turkish','',0,'',False],
['http://88.157.194.246/live/ramdisk/zrtp1/HDS/zrtp1.f4m','j0anita','',0,'',False],
['http://ak.live.cntv.cn/z/cctv9_1@139238/manifest.f4m?hdcore=2.11.3&g=OUVOVEOVETYH','cntv.cn','',0,'',False],
['http://mlghds-lh.akamaihd.net/z/mlg17_1@167001/manifest.f4m?hdcore=2.11.3&g=TOFRPVFGXLFS','alibaba','',0,'',False],
['http://peer-stream.com/api/get_manifest.f4m?groupspec=G:0101010c050e6f72663200','streamtivi.com','',0,'',False],
['http://164.100.31.234/hds-live/livepkgr/_definst_/rstvlive.f4m','Rajya Sabha TV','',0,'',False],
['http://fmssv1.merep.com/hds-live/livepkgr/_definst_/liveevent/livestream.f4m?blnpc20130909042035_1061880273','media center','',0,'',False],
['http://fms01.stream.internetone.it/hds-live/livepkgr/_definst_/8fm/8fm1.f4m','Italy otto 8 FMTV','',0,'',False],
['http://88.150.239.241/hds-live/livepkgr/_definst_/liveevent/livestream.f4m','Son Araba','',0,'',False],
['http://202.162.123.172/hds-live/livepkgr/_definst_/liveevent/livestream4.f4m','Chine Live event 4','',0,'',False],
['http://zb.wyol.com.cn/hds-live/livepkgr/_definst_/wslhevent/hls_pindao_1_350.f4m','CCTV 1 China','',0,'',False],
['http://zb.zghhzx.net/hds-live/livepkgr/_definst_/wslhevent/hls_pindao_1_350.f4m','CCTV13 China','',0,'',False],
['http://zb.sygd.tv/hds-live/livepkgr/_definst_/wslhevent/hls_pindao_1_350.f4m','SYGD TV china','',0,'',False],
['http://zb.pudongtv.cn/hds-live/livepkgr/_definst_/wslhevent/hls_pindao_1_500.f4m','Pudong TV China','',0,'',False],
['http://88.150.239.241/hds-live/livepkgr/_definst_/liveevent/livestream.f4m','AKS TV Turkey','',0,'',False],
['http://fms.quadrant.uk.com/hds-live/livepkgr/_definst_/liveevent/livestream.f4m','Quadrant live streams UK','',0,'',False],
['http://cdn3.1internet.tv/hds-live11/livepkgr/_definst_/1tv-hd.f4m','1 HD cdn1 Russia','',0,'',False],
['http://cdn2.1internet.tv/hds-live/livepkgr/_definst_/1tv.f4m','1 HD cdn2 Russia','',0,'',False],
['http://193.232.151.135/hds-live-not-protected/livepkgr/_1099_/1099/1099-70.f4m','ndtv plus - proxy needed','',0,'',False],
['http://bbcwshdlive01-lh.akamaihd.net/z/atv_1@61433/manifest.f4m?hdcore=2.11.3','BBC Arabic','',0,'',False],
['http://skaihd-f.akamaihd.net/z/advert/ORAL_B_SHAKIRA_20-SKAI.mp4/manifest.f4m?hdcore=2.6.8&g=OGEJOEGNJICP','Greek Oral B advert','',0,'',False],
['http://srgssr_uni_11_ww-lh.akamaihd.net/z/enc11uni_ww@112996/manifest.f4m?g=XTJVOORDBMQF&hdcore=2.11.3','RTS Swiss a proxy needed?','',0,'',False],
['http://ccr.cim-jitp.top.comcast.net/cimomg04/OPUS/83/162/119271491507/1389989008837/119271491507_1389986611184_1850000_4.f4m','aliakrep DRM not working','',0,'',False],
['http://stream1-prod.spectar.tv:1935/mrt-edge/_definst_/mrt3/smil:all-streams.isml/manifest.f4m','mrt3/all-streams.isml','',0,'',False],
['http://hdv.gamespotcdn.net/z/d5/2013/10/16/Gameplay_GettingRevengeinGTAOnline_101613_,600,1000,1800,3200,4000,.mp4.csmil/manifest.f4m?hdcore=2.10.3&g=JNMDDRCQSDCH','Recorded..Getting Revenge in GTA maxbitrate 2000','',2006,'',False],
['http://hdv.gamespotcdn.net/z/d5/2013/10/16/Gameplay_GettingRevengeinGTAOnline_101613_,600,1000,1800,3200,4000,.mp4.csmil/manifest.f4m?hdcore=2.10.3&g=JNMDDRCQSDCH','Recorded..Getting Revenge in GTA maxbitrate Highest','',0,'',False],
['http://hdv.gamespotcdn.net/z/d5/2014/04/24/GSNews_Apr24_20140424a_,600,1000,1800,3200,4000,.mp4.csmil/manifest.f4m?hdcore=2.10.3&g=KUVLMGTKPJFF','Recorded..Gamespot news highest bitrate','',0,'',False],
['http://hdv.gamespotcdn.net/z/d5/2014/04/24/GSNews_Apr24_20140424a_,600,.mp4.csmil/manifest.f4m?hdcore=2.10.3&g=KUVLMGTKPJFF','Recorded..Gamespot news 600 bitrate','',0,'',False],
['http://202.125.131.170:1935/pitelevision/smil:geokahani.smil/manifest.f4m','Pitelevision geo kahani','',0,'',False],
['http://stream.flowplayer.org/flowplayer-700.flv','TESTING not F4M','',0,'',False],
['http://hlscache.fptplay.net.vn/live/htvcmovieHD_2500.stream/manifest.f4m|Referer=http://play.fpt.vn/static/mediaplayer/FPlayer.swf','Viet 2500bitrate','',0,'',False],
['http://hlscache.fptplay.net.vn/live/onetv_1000.stream/manifest.f4m|Referer=http://play.fpt.vn/static/mediaplayer/FPlayer.swf','Viet 1000bitrate','',0,'',False],
['http://88.157.194.246/live/ramdisk/zsic/HDS/zviseu.f4m','Sic http://viseu.es.tl/','',0,'',False],
['http://www.rte.ie/manifests/rte1.f4m','Rte.ie multi nested manifests','',0,'',False],
['http://olystreameast.nbcolympics.com/vod/157717c8-9c74-4fd1-ab1a-7daca5246324/geo1-lucas-oil-pro-motocross0531120959-ua.ism/manifest(format=f4m-f4f).f4m','NBc olypics','',900,'108.163.254.214:7808',False],
['http://olystreameast.nbcolympics.com/vod/31883e54-e85b-4551-a24a-46accc4a9d49/nbc-sports-live-extra0601123118-ua.ism/manifest(format=f4m-f4f,filtername=vodcut).f4m','NBc extra olypics','',900,'108.163.254.214:7808',False],
['http://77.245.150.95/hds-live/livepkgr/_definst_/liveevent/livestream.f4m','something else','',0,'',False]]
#['http://dummy','Custom']]
#print videos
if 1==2: #disable it as these links are not working, not sure why
req = urllib2.Request('http://www.gzcbn.tv/app/?app=ios&controller=cmsapi&action=pindao')
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
response.close()
## print link
s='title\":\"(.*?)\",\"stream\":\"(.*?)\"'
#
match=re.compile(s).findall(link)
i=0
for i in range(len(match)):
match[i]= (match[i][1].replace('\\/','/'),match[i][0])
videos+=match #disabled for time being as these are not working
#print videos
for (file_link,name,imgurl,maxbitrate,proxy,usechunks) in videos:
liz=xbmcgui.ListItem(name,iconImage=imgurl, thumbnailImage=imgurl)
liz.setInfo( type="Video", infoLabels={ "Title": name} )
#liz.setProperty("IsPlayable","true")
u = sys.argv[0] + "?" + urllib.urlencode({'url': file_link,'mode':'play','name':name,'maxbitrate':maxbitrate,'proxy':proxy,'proxy_for_chunks':usechunks})
print u
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False, )
elif mode == "play":
print 'PLAying ',mode,url,setResolved
if not name in ['Custom','TESTING not F4M'] :
playF4mLink(url,name, proxy_string, proxy_use_chunks,auth_string,streamtype,setResolved)
else:
listitem = xbmcgui.ListItem( label = str(name), iconImage = "DefaultVideo.png", thumbnailImage = xbmc.getInfoImage( "ListItem.Thumb" ), path=url )
xbmc.Player().play( url,listitem)
#newUrl=GUIEditExportName('')
#if not newUrl=='':
# playF4mLink(newUrl,name)
if not play:
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)<|fim▁end|> | |
<|file_name|>test_models.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import numpy.random
import scipy.stats
import functools
from collections import defaultdict
from nose import SkipTest
from nose.tools import assert_greater
from nose.tools import assert_in
from nose.tools import assert_is_instance
from nose.tools import assert_not_equal
from nose.tools import assert_true
from goftests import density_goodness_of_fit
from goftests import discrete_goodness_of_fit
from goftests import vector_density_goodness_of_fit
from distributions.dbg.random import sample_discrete
from distributions.util import scores_to_probs
from distributions.tests.util import assert_all_close
from distributions.tests.util import assert_close
from distributions.tests.util import assert_hasattr
from distributions.tests.util import import_model
from distributions.tests.util import list_models
from distributions.tests.util import seed_all
try:
import distributions.io.schema_pb2
has_protobuf = True
except ImportError:
has_protobuf = False
DATA_COUNT = 20
SAMPLE_COUNT = 1000
MIN_GOODNESS_OF_FIT = 1e-3
MODULES = {
'{flavor}.models.{name}'.format(**spec): import_model(spec)
for spec in list_models()
}
IS_FAST = {'dbg': False, 'hp': True, 'lp': True}
def model_is_fast(model):
flavor = model.__name__.split('.')[1]
return IS_FAST[flavor]
def iter_examples(module):
assert_hasattr(module, 'EXAMPLES')
EXAMPLES = module.EXAMPLES
assert_is_instance(EXAMPLES, list)
assert_true(EXAMPLES, 'no examples provided')
for i, EXAMPLE in enumerate(EXAMPLES):
print 'example {}/{}'.format(1 + i, len(EXAMPLES))
assert_in('shared', EXAMPLE)
assert_in('values', EXAMPLE)
values = EXAMPLE['values']
assert_is_instance(values, list)
count = len(values)
assert_true(
count >= 7,
'Add more example values (expected >= 7, found {})'.format(count))
yield EXAMPLE
def for_each_model(*filters):
'''
Run one test per Model, filtering out inappropriate Models for test.
'''
def filtered(test_fun):
@functools.wraps(test_fun)
def test_one_model(name):
module = MODULES[name]
assert_hasattr(module, 'Shared')
for EXAMPLE in iter_examples(module):
test_fun(module, EXAMPLE)
@functools.wraps(test_fun)
def test_all_models():
for name in MODULES:
module = MODULES[name]
if all(f(module) for f in filters):
yield test_one_model, name
return test_all_models
return filtered
@for_each_model()
def test_value(module, EXAMPLE):
assert_hasattr(module, 'Value')
assert_is_instance(module.Value, type)
values = EXAMPLE['values']
for value in values:
assert_is_instance(value, module.Value)
@for_each_model()
def test_shared(module, EXAMPLE):
assert_hasattr(module, 'Shared')
assert_is_instance(module.Shared, type)
shared1 = module.Shared.from_dict(EXAMPLE['shared'])
shared2 = module.Shared.from_dict(EXAMPLE['shared'])
assert_close(shared1.dump(), EXAMPLE['shared'])
values = EXAMPLE['values']
seed_all(0)
for value in values:
shared1.add_value(value)
seed_all(0)
for value in values:
shared2.add_value(value)
assert_close(shared1.dump(), shared2.dump())
for value in values:
shared1.remove_value(value)
assert_close(shared1.dump(), EXAMPLE['shared'])
@for_each_model()
def test_group(module, EXAMPLE):
assert_hasattr(module, 'Group')
assert_is_instance(module.Group, type)
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
for value in values:
shared.add_value(value)
group1 = module.Group()
group1.init(shared)
for value in values:
group1.add_value(shared, value)
group2 = module.Group.from_values(shared, values)
assert_close(group1.dump(), group2.dump())
group = module.Group.from_values(shared, values)
dumped = group.dump()
group.init(shared)
group.load(dumped)
assert_close(group.dump(), dumped)
for value in values:
group2.remove_value(shared, value)
assert_not_equal(group1, group2)
group2.merge(shared, group1)
for value in values:
group1.score_value(shared, value)
for _ in xrange(10):
value = group1.sample_value(shared)
group1.score_value(shared, value)
module.sample_group(shared, 10)
group1.score_data(shared)
group2.score_data(shared)
@for_each_model(lambda module: hasattr(module.Shared, 'protobuf_load'))
def test_protobuf(module, EXAMPLE):
if not has_protobuf:
raise SkipTest('protobuf not available')
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
Message = getattr(distributions.io.schema_pb2, module.NAME)
message = Message.Shared()
shared.protobuf_dump(message)
shared2 = module.Shared()
shared2.protobuf_load(message)
assert_close(shared2.dump(), shared.dump())
message.Clear()
dumped = shared.dump()
module.Shared.to_protobuf(dumped, message)
assert_close(module.Shared.from_protobuf(message), dumped)
if hasattr(module.Group, 'protobuf_load'):
for value in values:
shared.add_value(value)
group = module.Group.from_values(shared, values)
message = Message.Group()
group.protobuf_dump(message)
group2 = module.Group()
group2.protobuf_load(message)
assert_close(group2.dump(), group.dump())
message.Clear()
dumped = group.dump()
module.Group.to_protobuf(dumped, message)
assert_close(module.Group.from_protobuf(message), dumped)
@for_each_model()
def test_add_remove(module, EXAMPLE):
# Test group_add_value, group_remove_value, score_data, score_value
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
values = []
group = module.Group.from_values(shared)
score = 0.0
assert_close(group.score_data(shared), score, err_msg='p(empty) != 1')
for _ in range(DATA_COUNT):
value = group.sample_value(shared)
values.append(value)
score += group.score_value(shared, value)
group.add_value(shared, value)
group_all = module.Group.from_dict(group.dump())
assert_close(
score,
group.score_data(shared),
err_msg='p(x1,...,xn) != p(x1) p(x2|x1) p(xn|...)')
numpy.random.shuffle(values)
for value in values:
group.remove_value(shared, value)
group_empty = module.Group.from_values(shared)
assert_close(
group.dump(),
group_empty.dump(),
err_msg='group + values - values != group')
numpy.random.shuffle(values)
for value in values:
group.add_value(shared, value)
assert_close(
group.dump(),
group_all.dump(),
err_msg='group - values + values != group')
@for_each_model()
def test_add_repeated(module, EXAMPLE):
# Test add_repeated value vs n * add
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
for value in EXAMPLE['values']:
group = module.Group.from_values(shared)
for _ in range(DATA_COUNT):
group.add_value(shared, value)
group_repeated = module.Group.from_values(shared)
group_repeated.add_repeated_value(shared, value, count=DATA_COUNT)
assert_close(
group.dump(),
group_repeated.dump(),
err_msg='n * add_value != add_repeated_value n')
@for_each_model()
def test_add_merge(module, EXAMPLE):
# Test group_add_value, group_merge
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values'][:]
for value in values:
shared.add_value(value)
numpy.random.shuffle(values)
group = module.Group.from_values(shared, values)
for i in xrange(len(values) + 1):
numpy.random.shuffle(values)
group1 = module.Group.from_values(shared, values[:i])
group2 = module.Group.from_values(shared, values[i:])
group1.merge(shared, group2)
assert_close(group.dump(), group1.dump())
@for_each_model()
def test_group_merge(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
group1 = module.Group.from_values(shared)
group2 = module.Group.from_values(shared)
expected = module.Group.from_values(shared)
actual = module.Group.from_values(shared)
for _ in xrange(100):
value = expected.sample_value(shared)
expected.add_value(shared, value)
group1.add_value(shared, value)
value = expected.sample_value(shared)
expected.add_value(shared, value)
group2.add_value(shared, value)
actual.load(group1.dump())
actual.merge(shared, group2)
assert_close(actual.dump(), expected.dump())
@for_each_model(lambda module: module.Value in [bool, int])
def test_group_allows_debt(module, EXAMPLE):
# Test that group.add_value can safely go into data debt
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
values = []
group1 = module.Group.from_values(shared, values)
for _ in range(DATA_COUNT):
value = group1.sample_value(shared)
values.append(value)
group1.add_value(shared, value)
group2 = module.Group.from_values(shared)
pos_values = [(v, +1) for v in values]<|fim▁hole|> signed_values = pos_values * 3 + neg_values * 2
numpy.random.shuffle(signed_values)
for value, sign in signed_values:
if sign > 0:
group2.add_value(shared, value)
else:
group2.remove_value(shared, value)
assert_close(group1.dump(), group2.dump())
@for_each_model()
def test_sample_seed(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
seed_all(0)
group1 = module.Group.from_values(shared)
values1 = [group1.sample_value(shared) for _ in xrange(DATA_COUNT)]
seed_all(0)
group2 = module.Group.from_values(shared)
values2 = [group2.sample_value(shared) for _ in xrange(DATA_COUNT)]
assert_close(values1, values2, err_msg='values')
@for_each_model()
def test_sample_value(module, EXAMPLE):
seed_all(0)
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
for values in [[], EXAMPLE['values']]:
group = module.Group.from_values(shared, values)
sample_count = SAMPLE_COUNT
if module.Value == numpy.ndarray:
sample_count *= 10
samples = [group.sample_value(shared) for _ in xrange(sample_count)]
if module.Value in [bool, int]:
probs_dict = {
value: math.exp(group.score_value(shared, value))
for value in set(samples)
}
gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
elif module.Value == float:
probs = numpy.exp([
group.score_value(shared, value)
for value in samples
])
gof = density_goodness_of_fit(samples, probs, plot=True)
elif module.Value == numpy.ndarray:
if module.__name__ == 'distributions.lp.models.niw':
raise SkipTest('FIXME known sampling bug')
probs = numpy.exp([
group.score_value(shared, value)
for value in samples
])
gof = vector_density_goodness_of_fit(samples, probs, plot=True)
else:
raise SkipTest('Not implemented for {}'.format(module.Value))
print '{} gof = {:0.3g}'.format(module.__name__, gof)
assert_greater(gof, MIN_GOODNESS_OF_FIT)
@for_each_model()
def test_sample_group(module, EXAMPLE):
seed_all(0)
SIZE = 2
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
for values in [[], EXAMPLE['values']]:
if module.Value in [bool, int]:
samples = []
probs_dict = {}
for _ in xrange(SAMPLE_COUNT):
values = module.sample_group(shared, SIZE)
sample = tuple(values)
samples.append(sample)
group = module.Group.from_values(shared, values)
probs_dict[sample] = math.exp(group.score_data(shared))
gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
else:
raise SkipTest('Not implemented for {}'.format(module.Value))
print '{} gof = {:0.3g}'.format(module.__name__, gof)
assert_greater(gof, MIN_GOODNESS_OF_FIT)
def _append_ss(group, aggregator):
ss = group.dump()
for key, val in ss.iteritems():
if isinstance(val, list):
for i, v in enumerate(val):
aggregator['{}_{}'.format(key, i)].append(v)
elif isinstance(val, dict):
for k, v in val.iteritems():
aggregator['{}_{}'.format(key, k)].append(v)
else:
aggregator[key].append(val)
def sample_marginal_conditional(module, shared, value_count):
values = module.sample_group(shared, value_count)
group = module.Group.from_values(shared, values)
return group
def sample_successive_conditional(module, shared, group, value_count):
sampler = module.Sampler()
sampler.init(shared, group)
values = [sampler.eval(shared) for _ in xrange(value_count)]
new_group = module.Group.from_values(shared, values)
return new_group
@for_each_model(model_is_fast)
def test_joint(module, EXAMPLE):
# \cite{geweke04getting}
seed_all(0)
SIZE = 10
SKIP = 100
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
marginal_conditional_samples = defaultdict(lambda: [])
successive_conditional_samples = defaultdict(lambda: [])
cond_group = sample_marginal_conditional(module, shared, SIZE)
for _ in xrange(SAMPLE_COUNT):
marg_group = sample_marginal_conditional(module, shared, SIZE)
_append_ss(marg_group, marginal_conditional_samples)
for __ in range(SKIP):
cond_group = sample_successive_conditional(
module,
shared,
cond_group,
SIZE)
_append_ss(cond_group, successive_conditional_samples)
for key in marginal_conditional_samples.keys():
gof = scipy.stats.ttest_ind(
marginal_conditional_samples[key],
successive_conditional_samples[key])[1]
if isinstance(gof, numpy.ndarray):
raise SkipTest('XXX: handle array case, gof = {}'.format(gof))
print '{}:{} gof = {:0.3g}'.format(module.__name__, key, gof)
if not numpy.isfinite(gof):
raise SkipTest('Test fails with gof = {}'.format(gof))
assert_greater(gof, MIN_GOODNESS_OF_FIT)
@for_each_model(lambda module: hasattr(module.Shared, 'scorer_create'))
def test_scorer(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
group = module.Group.from_values(shared)
scorer1 = shared.scorer_create()
scorer2 = shared.scorer_create(group)
for value in values:
score1 = shared.scorer_eval(scorer1, value)
score2 = shared.scorer_eval(scorer2, value)
score3 = group.score_value(shared, value)
assert_all_close([score1, score2, score3])
@for_each_model(lambda module: hasattr(module, 'Mixture'))
def test_mixture_runs(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
mixture = module.Mixture()
for value in values:
shared.add_value(value)
mixture.append(module.Group.from_values(shared, [value]))
mixture.init(shared)
groupids = []
for value in values:
scores = numpy.zeros(len(mixture), dtype=numpy.float32)
mixture.score_value(shared, value, scores)
probs = scores_to_probs(scores)
groupid = sample_discrete(probs)
mixture.add_value(shared, groupid, value)
groupids.append(groupid)
mixture.add_group(shared)
assert len(mixture) == len(values) + 1
scores = numpy.zeros(len(mixture), dtype=numpy.float32)
for value, groupid in zip(values, groupids):
mixture.remove_value(shared, groupid, value)
mixture.remove_group(shared, 0)
if module.__name__ == 'distributions.lp.models.dpd':
raise SkipTest('FIXME known segfault here')
mixture.remove_group(shared, len(mixture) - 1)
assert len(mixture) == len(values) - 1
for value in values:
scores = numpy.zeros(len(mixture), dtype=numpy.float32)
mixture.score_value(shared, value, scores)
probs = scores_to_probs(scores)
groupid = sample_discrete(probs)
mixture.add_value(shared, groupid, value)
@for_each_model(lambda module: hasattr(module, 'Mixture'))
def test_mixture_score(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
for value in values:
shared.add_value(value)
groups = [module.Group.from_values(shared, [value]) for value in values]
mixture = module.Mixture()
for group in groups:
mixture.append(group)
mixture.init(shared)
def check_score_value(value):
expected = [group.score_value(shared, value) for group in groups]
actual = numpy.zeros(len(mixture), dtype=numpy.float32)
noise = numpy.random.randn(len(actual))
actual += noise
mixture.score_value(shared, value, actual)
actual -= noise
assert_close(actual, expected, err_msg='score_value {}'.format(value))
another = [
mixture.score_value_group(shared, i, value)
for i in xrange(len(groups))
]
assert_close(
another,
expected,
err_msg='score_value_group {}'.format(value))
return actual
def check_score_data():
expected = sum(group.score_data(shared) for group in groups)
actual = mixture.score_data(shared)
assert_close(actual, expected, err_msg='score_data')
print 'init'
for value in values:
check_score_value(value)
check_score_data()
print 'adding'
groupids = []
for value in values:
scores = check_score_value(value)
probs = scores_to_probs(scores)
groupid = sample_discrete(probs)
groups[groupid].add_value(shared, value)
mixture.add_value(shared, groupid, value)
groupids.append(groupid)
check_score_data()
print 'removing'
for value, groupid in zip(values, groupids):
groups[groupid].remove_value(shared, value)
mixture.remove_value(shared, groupid, value)
scores = check_score_value(value)
check_score_data()<|fim▁end|> | neg_values = [(v, -1) for v in values] |
<|file_name|>baMenuItem.component.ts<|end_file_name|><|fim▁begin|>import {Component, Input, Output, EventEmitter} from '@angular/core';
@Component({
selector: 'ba-menu-item',
templateUrl: 'baMenuItem.html',
styleUrls: ['./baMenuItem.scss'],
})
export class BaMenuItem {
@Input() menuItem:any;
@Input() child: boolean = false;
@Output() itemHover = new EventEmitter<any>();
@Output() toggleSubMenu = new EventEmitter<any>();
public onHoverItem($event):void {
this.itemHover.emit($event);
}
public onToggleSubMenu($event, item):boolean {<|fim▁hole|> }
}<|fim▁end|> | $event.item = item;
this.toggleSubMenu.emit($event);
return false; |
<|file_name|>work_mode.rs<|end_file_name|><|fim▁begin|>use std::str::FromStr;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum WorkMode {
Normal, // generate widgets etc.
Sys, // generate -sys with FFI
Doc, // generate documentation file
DisplayNotBound, // Show not bound types
}
impl WorkMode {
pub fn is_normal(self) -> bool {
match self {
WorkMode::Normal => true,
_ => false,
}
}
pub fn is_generate_rust_files(self) -> bool {
match self {
WorkMode::Normal => true,
WorkMode::Sys => true,
_ => false,
}
}
}
impl Default for WorkMode {
fn default() -> WorkMode {
WorkMode::Normal
}
}
impl FromStr for WorkMode {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"normal" => Ok(WorkMode::Normal),
"sys" => Ok(WorkMode::Sys),
"doc" => Ok(WorkMode::Doc),<|fim▁hole|> }
}<|fim▁end|> | "not_bound" => Ok(WorkMode::DisplayNotBound),
_ => Err(format!("Wrong work mode '{}'", s)),
} |
<|file_name|>fileinfo.py<|end_file_name|><|fim▁begin|># This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Computes and caches various information about files.
"""
import itertools
import re
import os
import atexit
import ly.document
import lydocinfo
import ly.lex
import filecache
import util
import variables
_document_cache = filecache.FileCache()
_suffix_chars_re = re.compile(r'[^-\w]', re.UNICODE)
### XXX otherwise I get a segfault on shutdown when very large music trees
### are made (and every node references the document).
### (The segfault is preceded by a "corrupted double-linked list" message.)
atexit.register(_document_cache.clear)
class _CachedDocument(object):<|fim▁hole|> docinfo = None
music = None
def _cached(filename):
"""Return a _CachedDocument instance for the filename, else creates one."""
filename = os.path.realpath(filename)
try:
c = _document_cache[filename]
except KeyError:
with open(filename, 'rb') as f:
text = util.decode(f.read())
c = _document_cache[filename] = _CachedDocument()
c.variables = v = variables.variables(text)
c.document = ly.document.Document(text, v.get("mode"))
c.filename = c.document.filename = filename
return c
def document(filename):
"""Return a (cached) ly.document.Document for the filename."""
return _cached(filename).document
def docinfo(filename):
"""Return a (cached) LyDocInfo instance for the specified file."""
c = _cached(filename)
if c.docinfo is None:
c.docinfo = lydocinfo.DocInfo(c.document, c.variables)
return c.docinfo
def music(filename):
"""Return a (cached) music.Document instance for the specified file."""
c = _cached(filename)
if c.music is None:
import music
c.music = music.Document(c.document)
return c.music
def textmode(text, guess=True):
"""Returns the type of the given text ('lilypond, 'html', etc.).
Checks the mode variable and guesses otherwise if guess is True.
"""
mode = variables.variables(text).get("mode")
if mode in ly.lex.modes:
return mode
if guess:
return ly.lex.guessMode(text)
def includefiles(dinfo, include_path=()):
"""Returns a set of filenames that are included by the DocInfo's document.
The specified include path is used to find files. The own filename
is NOT added to the set. Included files are checked recursively,
relative to our file, relative to the including file, and if that
still yields no file, relative to the directories in the include_path.
If the document has no local filename, only the include_path is
searched for files.
"""
filename = dinfo.document.filename
basedir = os.path.dirname(filename) if filename else None
files = set()
def tryarg(directory, arg):
path = os.path.realpath(os.path.join(directory, arg))
if path not in files and os.path.isfile(path):
files.add(path)
args = docinfo(path).include_args()
find(args, os.path.dirname(path))
return True
def find(incl_args, directory):
for arg in incl_args:
# new, recursive, relative include
if not (directory and tryarg(directory, arg)):
# old include (relative to master file)
if not (basedir and tryarg(basedir, arg)):
# if path is given, also search there:
for p in include_path:
if tryarg(p, arg):
break
find(dinfo.include_args(), basedir)
return files
def basenames(dinfo, includefiles=(), filename=None, replace_suffix=True):
"""Returns the list of basenames a document is expected to create.
The list is created based on includefiles and the define output-suffix and
\bookOutputName and \bookOutputSuffix commands.
You should add '.ext' and/or '-[0-9]+.ext' to find created files.
If filename is given, it is regarded as the filename LilyPond is run on.
Otherwise, the filename of the info's document is read.
If replace_suffix is True (the default), special characters and spaces
in the suffix are replaced with underscores (in the same way as LilyPond
does it), using the replace_suffix_chars() function.
"""
basenames = []
basepath = os.path.splitext(filename or dinfo.document.filename)[0]
dirname, basename = os.path.split(basepath)
if basepath:
basenames.append(basepath)
def args():
yield dinfo.output_args()
for filename in includefiles:
yield docinfo(filename).output_args()
for type, arg in itertools.chain.from_iterable(args()):
if type == "suffix":
if replace_suffix:
# LilyPond (lily-library.scm:223) does this, too
arg = replace_suffix_chars(arg)
arg = basename + '-' + arg
path = os.path.normpath(os.path.join(dirname, arg))
if path not in basenames:
basenames.append(path)
return basenames
def replace_suffix_chars(s):
"""Replace spaces and most non-alphanumeric characters with underscores.
This is used to mimic the behaviour of LilyPond, which also does this,
for the output-suffix. (See scm/lily-library.scm:223.)
"""
return _suffix_chars_re.sub('_', s)<|fim▁end|> | """Contains a document and related items."""
filename = None
document = None
variables = None |
<|file_name|>CharCounterMain.java<|end_file_name|><|fim▁begin|>import java.io.BufferedReader;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
public class CharCounterMain{
final static Charset enc = StandardCharsets.US_ASCII ;
public CharCounterMain(String ch, String filedir){
if(ch.length() != 1){
System.out.println("The first argument needs to be a char, found string of length "+ch.length());
System.exit(1);
}
char c = ch.charAt(0);
if( c != ' ' && c != '.' && Character.getNumericValue(c) < 97 && Character.getNumericValue(c) > 122 ){ //compare against the ascii integer values
System.out.println("Need a character in range a-z (lowercase only) or a whitespace or a dot, found "+c+"!");
System.exit(1);
}
Path p = Paths.get(filedir);
try {
BufferedReader bf = Files.newBufferedReader(p,enc);
String line;
String line2 = null ;
while((line = bf.readLine()) != null){
line2 += line ;
}
<|fim▁hole|>
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("Finished, exiting...");
}
public static void main(String[] args){
if(args.length != 2){
System.out.println("Usage : CharCounterMain <char-to-look-for> <text-file-dir>");
}else{
new CharCounterMain(args[0],args[1]);
}
}
}<|fim▁end|> | CharCounter cc = new CharCounter(c,line2);
int freq = cc.getFrequency();
System.out.println(String.format("Frequency of character %c was %d", c,freq));
|
<|file_name|>WindowProcessorExtensionHolder.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software<|fim▁hole|> * limitations under the License.
*/
package org.wso2.siddhi.core.util.extension.holder;
import org.wso2.siddhi.core.config.ExecutionPlanContext;
import org.wso2.siddhi.core.query.processor.stream.window.WindowProcessor;
public class WindowProcessorExtensionHolder extends AbstractExtensionHolder {
private static WindowProcessorExtensionHolder instance;
private WindowProcessorExtensionHolder(ExecutionPlanContext executionPlanContext) {
super(WindowProcessor.class, executionPlanContext);
}
public static WindowProcessorExtensionHolder getInstance(ExecutionPlanContext executionPlanContext) {
if (instance == null) {
instance = new WindowProcessorExtensionHolder(executionPlanContext);
}
return instance;
}
}<|fim▁end|> | * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from .. util import deprecated
if deprecated.allowed():<|fim▁hole|> from . channel_order import ChannelOrder<|fim▁end|> | |
<|file_name|>uploadfiles.client.controller.js<|end_file_name|><|fim▁begin|>(function () {<|fim▁hole|> .module('uploadfiles')
.controller('UploadfilesController', UploadfilesController);
UploadfilesController.$inject = ['$scope', '$state', '$window', 'Authentication', 'uploadfileResolve'];
function UploadfilesController ($scope, $state, $window, Authentication, uploadfile) {
var vm = this;
vm.authentication = Authentication;
vm.uploadfile = uploadfile;
vm.error = null;
vm.form = {};
vm.remove = remove;
vm.save = save;
// Remove existing Uploadfile
function remove() {
if ($window.confirm('Are you sure you want to delete?')) {
vm.uploadfile.$remove($state.go('uploadfiles.list'));
}
}
// Save Uploadfile
function save(isValid) {
if (!isValid) {
$scope.$broadcast('show-errors-check-validity', 'vm.form.uploadfileForm');
return false;
}
// TODO: move create/update logic to service
if (vm.uploadfile._id) {
vm.uploadfile.$update(successCallback, errorCallback);
} else {
vm.uploadfile.$save(successCallback, errorCallback);
}
function successCallback(res) {
$state.go('uploadfiles.view', {
uploadfileId: res._id
});
}
function errorCallback(res) {
vm.error = res.data.message;
}
}
}
}());<|fim▁end|> | 'use strict';
// Uploadfiles controller
angular |
<|file_name|>project.py<|end_file_name|><|fim▁begin|>"""
virtstrap.project
-----------------
This module contains all the abstractions for dealing with a Project.
Using this object simplifies creating commands that are used to manage
the project.
"""
import os
import sys
from virtstrap import constants
from virtstrap.config import VirtstrapConfig
from virtstrap.utils import call_subprocess
VIRTSTRAP_DIR = constants.VIRTSTRAP_DIR
class Project(object):
@classmethod
def load(cls, options):
"""Creates a project and loads it's configuration immediately"""
project = cls()
project.load_settings(options)
return project
def __init__(self):
self._options = None
self._config = None
def load_settings(self, options):
# Check if project directory is specified
project_dir = getattr(options, 'project_dir', None)
if not project_dir:
project_dir = self._find_project_dir()
project_dir = os.path.abspath(project_dir)
self._project_dir = project_dir
config_file = os.path.join(project_dir, options.config_file)
config = VirtstrapConfig.from_file(config_file,
profiles=options.profiles)
processor = ProjectNameProcessor(project_dir)
project_name = config.process_section('project_name', processor)
self._project_name = project_name
self._config = config
self._config_file = config_file
self._options = options
def _find_project_dir(self):
return find_project_dir()
@property
def name(self):
return self._project_name
@property
def config_file(self):
if not os.path.isfile(self._config_file):
return None
return self._config_file
def set_options(self, options):
self._options = options
def path(self, *paths):
"""Create a path relative to the project"""
return os.path.join(self._project_dir, *paths)
def env_path(self, *paths):
"""Create a path relative to the virtstrap-dir"""
return os.path.join(self._project_dir,
self._options.virtstrap_dir, *paths)
def bin_path(self, *paths):
"""Create a path relative to the virtstrap-dir's bin directory"""
bin_py = 'bin'
if sys.platform == 'win32':
bin_py = 'Scripts'
return self.env_path(bin_py, *paths)
def process_config_section(self, section, processor):
return self._config.process_section(section, processor)
def config(self, section):
"""Grabs processed section data"""
return self._config.processed(section)
def call_bin(self, command_name, args, **options):
command = [self.bin_path(command_name)]
command.extend(args)
return call_subprocess(command, **options)
class NoProjectFound(Exception):
pass
def find_project_dir(current_dir=None):
"""Finds the project directory for the current directory"""
current_dir = current_dir or os.path.abspath(os.curdir)
if VIRTSTRAP_DIR in os.listdir(current_dir):
vs_dir = os.path.join(current_dir, VIRTSTRAP_DIR)
if os.path.islink(vs_dir) or os.path.isdir(vs_dir):
return current_dir
parent_dir = os.path.abspath(os.path.join(current_dir, os.pardir))
if parent_dir == current_dir:
raise NoProjectFound('No project found')
return find_project_dir(parent_dir)
class ProjectNameProcessor(object):
def __init__(self, project_dir):
self._project_dir = os.path.abspath(project_dir)
<|fim▁hole|><|fim▁end|> | def __call__(self, project_name):
return project_name or os.path.basename(self._project_dir) |
<|file_name|>mac_tool.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import struct
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.<|fim▁hole|> # single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest)
else:
if os.path.exists(dest):
os.unlink(dest)
shutil.copy(source, dest)
if extension in ('.plist', '.strings') and convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices']
if os.environ['XCODE_VERSION_ACTUAL'] > '0700':
args.extend(['--auto-activate-custom-fonts'])
if 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ:
args.extend([
'--target-device', 'iphone', '--target-device', 'ipad',
'--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'],
])
else:
args.extend([
'--target-device', 'mac',
'--minimum-deployment-target',
os.environ['MACOSX_DEPLOYMENT_TARGET'],
])
args.extend(['--output-format', 'human-readable-text', '--compile', dest,
source])
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: (?:for architecture: \S* )?'
r'file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageIosFramework(self, framework):
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
module_path = os.path.join(framework, 'Modules');
if not os.path.exists(module_path):
os.mkdir(module_path)
module_template = 'framework module %s {\n' \
' umbrella header "%s.h"\n' \
'\n' \
' export *\n' \
' module * { export * }\n' \
'}\n' % (binary, binary)
module_file = open(os.path.join(module_path, 'module.modulemap'), "w")
module_file.write(module_template)
module_file.close()
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileIosFrameworkHeaderMap(self, out, framework, *all_headers):
framework_name = os.path.basename(framework).split('.')[0]
all_headers = map(os.path.abspath, all_headers)
filelist = {}
for header in all_headers:
filename = os.path.basename(header)
filelist[filename] = header
filelist[os.path.join(framework_name, filename)] = header
WriteHmap(out, filelist)
def ExecCopyIosFrameworkHeaders(self, framework, *copy_headers):
header_path = os.path.join(framework, 'Headers');
if not os.path.exists(header_path):
os.makedirs(header_path)
for header in copy_headers:
shutil.copy(header, os.path.join(header_path, os.path.basename(header)))
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
2. copy Entitlements.plist from user or SDK next to the bundle,
3. code sign the bundle.
"""
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--entitlements',
entitlements_path, '--timestamp=none', os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
def NextGreaterPowerOf2(x):
return 2**(x-1).bit_length()
def WriteHmap(output_name, filelist):
"""Generates a header map based on |filelist|.
Per Mark Mentovai:
A header map is structured essentially as a hash table, keyed by names used
in #includes, and providing pathnames to the actual files.
The implementation below and the comment above comes from inspecting:
http://www.opensource.apple.com/source/distcc/distcc-2503/distcc_dist/include_server/headermap.py?txt
while also looking at the implementation in clang in:
https://llvm.org/svn/llvm-project/cfe/trunk/lib/Lex/HeaderMap.cpp
"""
magic = 1751998832
version = 1
_reserved = 0
count = len(filelist)
capacity = NextGreaterPowerOf2(count)
strings_offset = 24 + (12 * capacity)
max_value_length = len(max(filelist.items(), key=lambda (k,v):len(v))[1])
out = open(output_name, "wb")
out.write(struct.pack('<LHHLLLL', magic, version, _reserved, strings_offset,
count, capacity, max_value_length))
# Create empty hashmap buckets.
buckets = [None] * capacity
for file, path in filelist.items():
key = 0
for c in file:
key += ord(c.lower()) * 13
# Fill next empty bucket.
while buckets[key & capacity - 1] is not None:
key = key + 1
buckets[key & capacity - 1] = (file, path)
next_offset = 1
for bucket in buckets:
if bucket is None:
out.write(struct.pack('<LLL', 0, 0, 0))
else:
(file, path) = bucket
key_offset = next_offset
prefix_offset = key_offset + len(file) + 1
suffix_offset = prefix_offset + len(os.path.dirname(path) + os.sep) + 1
next_offset = suffix_offset + len(os.path.basename(path)) + 1
out.write(struct.pack('<LLL', key_offset, prefix_offset, suffix_offset))
# Pad byte since next offset starts at 1.
out.write(struct.pack('<x'))
for bucket in buckets:
if bucket is not None:
(file, path) = bucket
out.write(struct.pack('<%ds' % len(file), file))
out.write(struct.pack('<s', '\0'))
base = os.path.dirname(path) + os.sep
out.write(struct.pack('<%ds' % len(base), base))
out.write(struct.pack('<s', '\0'))
path = os.path.basename(path)
out.write(struct.pack('<%ds' % len(path), path))
out.write(struct.pack('<s', '\0'))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))<|fim▁end|> | # TODO(thakis): This copies file attributes like mtime, while the |
<|file_name|>frame.go<|end_file_name|><|fim▁begin|>package hashgraph
import (
"bytes"
"sort"
"github.com/mosaicnetworks/babble/src/crypto"
"github.com/mosaicnetworks/babble/src/peers"
"github.com/ugorji/go/codec"
)
// Frame represents a section of the hashgraph.
type Frame struct {
Round int // RoundReceived
Peers []*peers.Peer // the authoritative peer-set at Round
Roots map[string]*Root // Roots on top of which Frame Events can be inserted
Events []*FrameEvent // Events with RoundReceived = Round
PeerSets map[int][]*peers.Peer // full peer-set history ([round] => Peers)
Timestamp int64 // unix timestamp (median of round-received famous witnesses)
}
// SortedFrameEvents returns all the events in the Frame, including event is
// roots, sorted by LAMPORT timestamp
func (f *Frame) SortedFrameEvents() []*FrameEvent {
sorted := SortedFrameEvents{}
for _, r := range f.Roots {
sorted = append(sorted, r.Events...)
}
sorted = append(sorted, f.Events...)
sort.Sort(sorted)
return sorted
}
// Marshal returns the JSON encoding of Frame.
func (f *Frame) Marshal() ([]byte, error) {
b := new(bytes.Buffer)
jh := new(codec.JsonHandle)
jh.Canonical = true
enc := codec.NewEncoder(b, jh)
if err := enc.Encode(f); err != nil {
return nil, err
}
return b.Bytes(), nil
}
// Unmarshal parses a JSON encoded Frame.
func (f *Frame) Unmarshal(data []byte) error {
b := bytes.NewBuffer(data)
jh := new(codec.JsonHandle)
jh.Canonical = true
dec := codec.NewDecoder(b, jh)
if err := dec.Decode(f); err != nil {
return err
}
return nil
}
// Hash returns the SHA256 hash of the marshalled Frame.
func (f *Frame) Hash() ([]byte, error) {<|fim▁hole|> if err != nil {
return nil, err
}
return crypto.SHA256(hashBytes), nil
}<|fim▁end|> | hashBytes, err := f.Marshal() |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>fn main() {
println!("Hello Rust!");<|fim▁hole|><|fim▁end|> | } |
<|file_name|>test_empty_dataset.py<|end_file_name|><|fim▁begin|>import unittest
import numpy as np
from chainermn.datasets import create_empty_dataset
import chainerx as chx
class TestEmptyDataset(unittest.TestCase):
def setUp(self):
pass
def check_create_empty_dataset(self, original_dataset):
empty_dataset = create_empty_dataset(original_dataset)
self.assertEqual(len(original_dataset), len(empty_dataset))
for i in range(len(original_dataset)):
self.assertEqual((), empty_dataset[i])
def test_empty_dataset_numpy(self):
self.check_empty_dataset(np)
def test_empty_dataset_chx(self):
self.check_empty_dataset(chx)
def check_empty_dataset(self, xp):
n = 10
self.check_create_empty_dataset([])
self.check_create_empty_dataset([0])<|fim▁hole|> self.check_create_empty_dataset(list(range(n)))
self.check_create_empty_dataset(list(range(n * 5 - 1)))
self.check_create_empty_dataset(xp.array([]))
self.check_create_empty_dataset(xp.array([0]))
self.check_create_empty_dataset(xp.arange(n))
self.check_create_empty_dataset(xp.arange(n * 5 - 1))<|fim▁end|> | |
<|file_name|>poplog.py<|end_file_name|><|fim▁begin|>import sys, datetime, os, time
from mcstatus import MinecraftServer
# GLOBALS
# List containing all the mcstatus server objects
serverList = []
# appendLog() polls the server for population data and appends it to the log
# In: The hostname and population of a server, as well as the timestamp for its retrieval time
# Out: Nada.
def appendLog(host, time, pop):
fileName = host.replace(".", "")+ ".csv"
with open(fileName, "a") as logFile:
# If the file is empty add the column names
if not os.stat(fileName).st_size > 0:
logFile.write('"time","pop"')
# Write the data to the file
logFile.write("\n"+ '"'+ time+ '",'+ str(pop))
# getPopulations() Gets all the populations of the servers and calls appendLog() for each
# In: the list of servers
# Out: Nada
def getPopulations():
global serverList
print("Oy")
# Loop through all the servers
for serverObject in serverList:
status = serverObject.status()
pop = status.players.online
time = datetime.datetime.now().isoformat()
appendLog(serverObject.host, time, pop)
# Entry point.
def main():
global serverList
print("[mpl] Minecraft Population Logger")
print("[mpl] Version 0.1.0")
# No server URLs?
if len(sys.argv[1:]) < 1:
print("[mpl] Error: At least one URL must be specified.")
return 0
# Populate server list
for url in sys.argv[1:]:
serverObject = MinecraftServer.lookup(url)
# If the server was found
if serverObject != None:
print("[mpl]", url, "was found!")
serverList.append(serverObject)
else:
print("[mpl]", url, "not found! It will not be tracked.")
while 1:
getPopulations()
time.sleep(300)<|fim▁hole|><|fim▁end|> |
# Run the main.
main() |
<|file_name|>numeric-method-autoexport.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-pretty-expanded
// This file is intended to test only that methods are automatically
// reachable for each numeric type, for each exported impl, with no imports
// necessary. Testing the methods of the impls is done within the source
// file for each numeric type.
#![feature(core)]
use std::ops::Add;
use std::num::ToPrimitive;
pub fn main() {
// ints
// num
assert_eq!(15_isize.add(6_isize), 21_isize);
assert_eq!(15_i8.add(6i8), 21_i8);<|fim▁hole|>// uints
// num
assert_eq!(15_usize.add(6_usize), 21_usize);
assert_eq!(15_u8.add(6u8), 21_u8);
assert_eq!(15_u16.add(6u16), 21_u16);
assert_eq!(15_u32.add(6u32), 21_u32);
assert_eq!(15_u64.add(6u64), 21_u64);
// floats
// num
assert_eq!(10_f32.to_i32().unwrap(), 10);
assert_eq!(10_f64.to_i32().unwrap(), 10);
}<|fim▁end|> | assert_eq!(15_i16.add(6i16), 21_i16);
assert_eq!(15_i32.add(6i32), 21_i32);
assert_eq!(15_i64.add(6i64), 21_i64);
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|>const Twitter = require('twitter')
const twitterOpts = require('./auth.json')
const client = new Twitter(twitterOpts)
const twttr = require('./twttr/')
twttr.getTrendingTopics(client).then((tt) => {
tt.forEach((topic, idx) => {
twttr.searchTopic(client, topic).then((tweets) => {
let statuses = twttr.transformTweets(tweets)
console.log(statuses)
// insights: word count, graphos etc.
})
})
})<|fim▁end|> | 'use strict'
|
<|file_name|>generate_api_rst.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Read doxygen xml files to find all members of the dolfin
# name space and generate API doc files per subdirectory of
# dolfin
#
# Written by Tormod Landet, 2017
#
from __future__ import print_function
import sys, os
import parse_doxygen
DOXYGEN_XML_DIR = 'doxygen/xml'
API_GEN_DIR = 'generated_rst_files'
SWIG_DIR = '../dolfin/swig/'
SWIG_FILE = 'docstrings.i'
MOCK_PY = 'mock_cpp_modules.py'
def get_subdir(hpp_file_name):
"""
Return "subdir" for a path name like
/path/to/dolfin/subdir/a_header.h
"""
path_components = hpp_file_name.split(os.sep)
path_components_rev = path_components[::-1]
idx = path_components_rev.index('dolfin')
subdir = path_components_rev[idx - 1]
return subdir
def get_short_path(hpp_file_name):
"""
Return "dolfin/subdir/a_header.h" for a path name like
/path/to/dolfin/subdir/a_header.h
"""
path_components = hpp_file_name.split(os.sep)
if 'dolfin' in path_components:
# dolfin header files
path_components_rev = path_components[::-1]
idx = path_components_rev.index('dolfin')
short_path = path_components_rev[:idx + 1]
else:
# ufc header files
short_path = path_components[-1:]
return os.sep.join(short_path[::-1])
def write_rst(subdir, subdir_members, api_gen_dir):
"""
Write files for Sphinx C++ API documentation
"""
rst_name = os.path.join(api_gen_dir, 'api_gen_%s.rst' % subdir)
print('Generating', rst_name)
# Make output directory
if not os.path.isdir(api_gen_dir):
os.mkdir(api_gen_dir)
prev_short_name = ''
with open(rst_name, 'wt') as rst:
rst.write('.. automatically generated by generate_api_rst.py and parse_doxygen.py\n')
#rst.write('dolfin/%s\n%s' % (subdir, '=' * 80))
#rst.write('\nDocumentation for C++ code found in dolfin/%s/*.h\n\n' % subdir)
rst.write('\n.. contents::\n\n\n')
kinds = [('typedef', 'Type definitions', 'doxygentypedef'),
('enum', 'Enumerations', 'doxygenenum'),
('function', 'Functions', 'doxygenfunction'),
('struct', 'Structures', 'doxygenstruct'),
('variable', 'Variables', 'doxygenvariable'),
('class', 'Classes', 'doxygenclass')]
for kind, kind_name, directive in kinds:
if kind in subdir_members:
# Write header H2
rst.write('%s\n%s\n\n' % (kind_name, '-'*70))
for name, member in sorted(subdir_members[kind].items()):
short_name = member.short_name
fn = get_short_path(member.hpp_file_name)
# Write header H3
if short_name != prev_short_name:
rst.write('%s\n%s\n\n' % (short_name, '~'*60))
prev_short_name = short_name
# Info about filename
rst.write('C++ documentation for ``%s`` from ``%s``:\n\n' % (short_name, fn))
# Write documentation for this item
rst.write(member.to_rst())
rst.write('\n\n')
def write_swig(subdir, subdir_members, swig_dir, swig_file_name, swig_header=''):
"""
Write files for SWIG so that we get docstrings in Python
"""
swig_subdir = os.path.join(swig_dir, subdir)
if not os.path.isdir(swig_subdir):
os.mkdir(swig_subdir)
swig_iface_name = os.path.join(swig_subdir, swig_file_name)
print('Generating', swig_iface_name)
with open(swig_iface_name, 'wt') as out:
out.write(swig_header)
out.write('// SWIG docstrings generated by doxygen and generate_api_rst.py / parse_doxygen.py\n\n')
for kind in subdir_members:
for name, member in sorted(subdir_members[kind].items()):<|fim▁hole|>def write_mock_modules(namespace_members, mock_py_module):
"""
Write a mock module so that we can create documentation for
dolfin on ReadTheDocs where we cannot compile so that the
dolfin.cpp.* module are not available. We fake those, but
include the correct docstrings
"""
print('Generating', mock_py_module)
mydir = os.path.dirname(os.path.abspath(__file__))
swig_module_dir = os.path.join(mydir, '..', 'dolfin', 'swig', 'modules')
swig_module_dir = os.path.abspath(swig_module_dir)
if not os.path.isdir(swig_module_dir):
print('SWIG module directory is not present,', swig_module_dir)
print('No mock Python code will be generated')
return
with open(mock_py_module, 'wt') as out:
out.write('#!/usr/bin/env python\n')
out.write('#\n')
out.write('# This file is AUTO GENERATED!\n')
out.write('# This file is fake, full of mock stubs\n')
out.write('# This file is made by generate_api_rst.py\n')
out.write('#\n\n')
out.write('from __future__ import print_function\n')
out.write('from types import ModuleType\n')
out.write('import sys\n')
out.write('\n\nWARNING = "This is a mock object!"\n')
# Loop over SWIG modules and generate mock Python modules
for module_name in os.listdir(swig_module_dir):
module_i = os.path.join(swig_module_dir, module_name, 'module.i')
if not os.path.isfile(module_i):
continue
# Find out which headers are included in this SWIG module
included_headers = set()
for line in open(module_i):
if line.startswith('#include'):
header = line[8:].strip()[1:-1]
included_headers.add(header)
elif line.startswith('%import'):
header = line.split(')')[1].strip()[1:-1]
included_headers.add(header)
module_py_name = '_' + module_name
full_module_py_name = 'dolfin.cpp.' + module_py_name
out.write('\n\n' + '#'*80 + '\n')
out.write('%s = ModuleType("%s")\n' % (module_py_name, full_module_py_name))
out.write('sys.modules["%s"] = %s\n' % (full_module_py_name, module_py_name))
out.write('\n')
print(' Generating module', full_module_py_name)
for member in namespace_members:
# Check if this member is included in the given SWIG module
hpp_file_name = get_short_path(member.hpp_file_name)
if hpp_file_name not in included_headers:
continue
out.write(member.to_mock(modulename=module_py_name))
out.write('\n\n')
def parse_doxygen_xml_and_generate_rst_and_swig(xml_dir, api_gen_dir, swig_dir, swig_file_name,
swig_header='', mock_py_module=''):
# Read doxygen XML files and split namespace members into
# groups based on subdir and kind (class, function, enum etc)
create_subdir_groups_if_missing = False
if os.path.isdir(xml_dir):
namespaces = parse_doxygen.read_doxygen_xml_files(xml_dir, ['dolfin', 'ufc'])
else:
raise OSError('Missing doxygen XML directory %r' % xml_dir)
# Group all documented members into subdir groups (io, la, mesh, fem etc)
sorted_members = list(namespaces['dolfin'].members.values())
sorted_members.sort(key=lambda m: m.name)
all_members = {}
for member in sorted_members:
subdir = get_subdir(member.hpp_file_name)
sd = all_members.setdefault(subdir, {})
kd = sd.setdefault(member.kind, {})
kd[member.name] = member
# Generate Sphinx RST files and SWIG interface files
for subdir, subdir_members in sorted(all_members.items()):
if subdir:
if api_gen_dir:
write_rst(subdir, subdir_members, api_gen_dir)
if swig_dir:
write_swig(subdir, subdir_members, swig_dir, swig_file_name, swig_header)
# Write UFC documenttation, no SWIG for UFC, only RST
if api_gen_dir:
ufc_members = {}
for member in namespaces['ufc'].members.values():
kd = ufc_members.setdefault(member.kind, {})
kd[member.name] = member
write_rst('ufc', ufc_members, api_gen_dir)
# Generate a mock Python module
if mock_py_module:
write_mock_modules(sorted_members, mock_py_module)
if __name__ == '__main__':
swig_dir = SWIG_DIR
allow_empty_xml = False
if '--no-swig' in sys.argv:
swig_dir = None
parse_doxygen_xml_and_generate_rst_and_swig(DOXYGEN_XML_DIR, API_GEN_DIR, swig_dir,
SWIG_FILE, '', MOCK_PY)<|fim▁end|> | out.write(member.to_swig())
out.write('\n')
|
<|file_name|>css_provider.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2015, The Rust-GNOME Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use std::fmt::{self, Display, Formatter};
use ffi::{self, GtkCssProvider};
use glib::translate::{ToGlibPtr, from_glib_full};
use glib::{self, GlibContainer};
#[repr(C)]
pub struct CssProvider {
pointer: *mut GtkCssProvider
}
impl ::StyleProviderTrait for CssProvider {}
impl CssProvider {<|fim▁hole|> unsafe { CssProvider { pointer: ffi::gtk_css_provider_new() } }
}
pub fn get_default() -> Self {
unsafe { CssProvider { pointer: ffi::gtk_css_provider_get_default() } }
}
pub fn get_named(name: &str, variant: &str) -> Self {
unsafe {
CssProvider { pointer: ffi::gtk_css_provider_get_named(name.to_glib_none().0,
variant.to_glib_none().0) }
}
}
pub fn load_from_path(path: &str) -> Result<CssProvider, glib::Error> {
unsafe {
let pointer = ffi::gtk_css_provider_new();
let mut error = ::std::ptr::null_mut();
ffi::gtk_css_provider_load_from_path(pointer, path.to_glib_none().0, &mut error);
if error.is_null() {
Ok(CssProvider { pointer: pointer })
} else {
Err(glib::Error::wrap(error))
}
}
}
}
impl Display for CssProvider {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let tmp: String = unsafe { from_glib_full(ffi::gtk_css_provider_to_string(self.pointer)) };
write!(f, "{}", tmp)
}
}
impl_GObjectFunctions!(CssProvider, GtkCssProvider);
impl_TraitObject!(CssProvider, GtkCssProvider);<|fim▁end|> | pub fn new() -> Self { |
<|file_name|>Node.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2012-2014 The Khronos Group Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and/or associated documentation files (the
* "Materials"), to deal in the Materials without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Materials, and to
* permit persons to whom the Materials are furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
#define LOG_TAG "Node_JNI"
#include "openvx_jni.h"
namespace android
{
//**************************************************************************
// LOCAL VARIABLES
//**************************************************************************
//**************************************************************************
// EXPORTED FUNCTIONS
//**************************************************************************
static void Initialize(JNIEnv *env, jobject obj, jlong g, jlong k)
{
vx_graph graph = (vx_graph)g;
vx_kernel kernel = (vx_kernel)k;
vx_node n = vxCreateGenericNode(graph, kernel);
SetHandle(env, obj, NodeClass, parentName, g);
SetHandle(env, obj, NodeClass, handleName, (jlong)n);
}
static void Finalize(JNIEnv *env, jobject obj)
{
vx_node n = (vx_node)GetHandle(env, obj, NodeClass, handleName);
vxReleaseNode(&n);
SetHandle(env, obj, NodeClass, handleName, 0);
}
static jobject getParameter(JNIEnv *env, jobject obj, jint i)
{
vx_node n = (vx_node)GetHandle(env, obj, NodeClass, handleName);
jclass c = env->FindClass(ParameterClass);
jmethodID id = env->GetMethodID(c, "<init>", "(JI)"OBJECT(Parameter));
jobject p = env->NewObject(c, id, (jlong)n, i);
return p;
}
static jint setParameter(JNIEnv *env, jobject obj, jint index, jlong ref)
{
vx_node n = (vx_node)GetHandle(env, obj, NodeClass, handleName);
return vxSetParameterByIndex(n, index, (vx_reference)ref);
}
static JNINativeMethod method_table[] = {
// { name, signature, function_pointer }
{ "create", "(JJ)V", (void *)Initialize },
{ "destroy", "()V", (void *)Finalize },
{ "getParameter", "(I)"OBJECT(Parameter), (void *)getParameter },
{ "_setParameter", "(IJ)I", (void *)setParameter },
};
int register_org_khronos_OpenVX_Node(JNIEnv *env)
{
PrintJNITable(LOG_TAG, NodeClass, method_table, NELEM(method_table));<|fim▁hole|><|fim▁end|> | return jniRegisterNativeMethods(env, NodeClass, method_table, NELEM(method_table));
}
}; |
<|file_name|>resources.rs<|end_file_name|><|fim▁begin|>use derivative::Derivative;
use eyre::Result;
use hash_utils::{file_sha256, str_sha256};
use path_slash::PathExt;
use schemars::JsonSchema;
use serde::Serialize;
use serde_with::skip_serializing_none;
use std::{
fmt::Display,
path::{Path, PathBuf},
};
use stencila_schema::{CodeChunkExecuteAuto, CodeExecutableExecuteStatus};
use crate::{Pairs, Relation};
/// A resource in a dependency graph (the nodes of the graph)
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, JsonSchema, Serialize)]
#[serde(tag = "type")]
pub enum Resource {
/// A symbol within code, within a document
Symbol(Symbol),
/// A node containing code, or associated with code, within a document
Code(Code),
/// A node within a document
Node(Node),
/// A file within the project
File(File),
/// A declared project `Source`
Source(Source),
/// A programming language module, usually part of an external package
Module(Module),
/// A URL to a remote resource
Url(Url),
}
/// The id of a resource
pub type ResourceId = String;
impl Resource {
/// Get the [`ResourceId`] for a resource
pub fn resource_id(&self) -> ResourceId {
match self {
Resource::Symbol(Symbol { path, name, .. }) => {
["symbol://", &path.to_slash_lossy(), "#", name].concat()
}
Resource::Code(Code { path, id, .. }) => {
["code://", &path.to_slash_lossy(), "#", id].concat()
}
Resource::Node(Node { path, id, .. }) => {
["node://", &path.to_slash_lossy(), "#", id].concat()
}
Resource::File(File { path, .. }) => ["file://", &path.to_slash_lossy()].concat(),
Resource::Source(Source { name, .. }) => ["source://", name].concat(),
Resource::Module(Module { language, name, .. }) => {
["module://", language, "#", name].concat()
}
Resource::Url(Url { url }) => url.clone(),
}
}
/// Generate a [`ResourceDigest`] for a resource.
///
/// If the resource variant does not support generation of a digest,
/// a default (empty) digest is returned.
pub fn digest(&self) -> ResourceDigest {
match self {
Resource::File(File { path }) => ResourceDigest::from_file(path),
_ => ResourceDigest::default(),
}
}
/// Get the [`ResourceInfo`] for a resource
pub fn resource_info(&self) -> ResourceInfo {
ResourceInfo::new(
self.clone(),
None,
None,
None,
Some(self.digest()),
None,
None,
)
}
/// Get the type of [`Node`] for resources that have it
pub fn node_type(&self) -> Option<&str> {
match self {
Resource::Code(Code { kind, .. }) | Resource::Node(Node { kind, .. }) => {
Some(kind.as_str())
}
_ => None,
}
}
/// Get the [`NodeId`] for resources that have it
pub fn node_id(&self) -> Option<&str> {
match self {
Resource::Code(Code { id, .. }) | Resource::Node(Node { id, .. }) => Some(id.as_str()),
_ => None,
}
}
}
/// A digest representing the state of a [`Resource`] and its dependencies.
///
/// The digest is separated into several parts. Although initially it may seem that the
/// parts are redundant ("can't they all be folded into a single digest?"), each
/// part provides useful information. For example, it is useful to store
/// the `content_digest`, in addition to `semantic_digest`, to be able
/// to indicate to the user that a change in the resource has been detected but
/// that it does not appear to change its semantics.
#[derive(Debug, Default, Clone)]
pub struct ResourceDigest {
/// A digest that captures the content of the resource (e.g the `text`
/// of a `CodeChunk`, or the bytes of a file).
pub content_digest: String,
/// A digest that captures the "semantic intent" of the resource
/// with respect to the dependency graph.
///
/// For example, for `Code` resources it is preferably derived from the AST
/// of the code and should only change when the semantics of the code change.
pub semantic_digest: String,
/// A digest of the `dependencies_digest`s of the dependencies of a resource.
///
/// If there are no dependencies then `dependencies_digest` is an empty string.
pub dependencies_digest: String,
/// The count of the number of code dependencies that are stale (i.e. are out of sync with the `KernelSpace`).
///
/// If there are no dependencies then `dependencies_stale` is zero. May include
/// duplicates for diamond shaped dependency graphs so this represents a maximum number.
pub dependencies_stale: u32,
/// The count of the number of code dependencies that had `execute_status == Failed`
///
/// If there are no dependencies then `dependencies_failed` is zero. May include
/// duplicates for diamond shaped dependency graphs so this represents a maximum number.
pub dependencies_failed: u32,
}
impl ResourceDigest {
/// Create a new `ResourceDigest` from its string representation
pub fn from_string(string: &str) -> Self {
let parts: Vec<&str> = string.split('.').collect();
let content_digest = parts.get(0).map_or_else(String::new, |str| str.to_string());
let semantic_digest = parts.get(1).map_or_else(String::new, |str| str.to_string());
let dependencies_digest = parts.get(2).map_or_else(String::new, |str| str.to_string());
let dependencies_stale = parts
.get(3)
.map_or(0, |str| str.parse().unwrap_or_default());
let dependencies_failed = parts
.get(4)
.map_or(0, |str| str.parse().unwrap_or_default());
Self {
content_digest,
semantic_digest,
dependencies_digest,
dependencies_stale,
dependencies_failed,
}
}
/// Create a new `ResourceDigest` from strings for content and semantics.
///
/// Before generating the hash of strings remove carriage returns from strings to avoid
/// cross platform differences in generated digests.
pub fn from_strings(content_str: &str, semantic_str: Option<&str>) -> Self {
let content_digest = Self::base64_encode(&str_sha256(&Self::strip_chars(content_str)));
let semantic_digest = semantic_str.map_or_else(String::default, |str| {
Self::base64_encode(&str_sha256(&Self::strip_chars(str)))
});
Self {
content_digest,
semantic_digest,
..Default::default()
}
}
/// Create a new `ResourceDigest` from a file
///
/// If there is an error when hashing the file, a default (empty) digest is returned.
pub fn from_file(path: &Path) -> Self {
match file_sha256(path) {
Ok(bytes) => Self::from_bytes(&bytes, None),
Err(..) => Self::default(),
}
}
/// Create a new `ResourceDigest` from bytes for content and semantics
///
/// To minimize the size of the digest while maintaining uniqueness, the bytes are usually,
/// but not necessarily, the output of a hashing function.
pub fn from_bytes(content_bytes: &[u8], semantic_bytes: Option<&[u8]>) -> Self {
let content_digest = Self::base64_encode(content_bytes);
let semantic_digest = semantic_bytes.map_or_else(String::default, Self::base64_encode);
Self {
content_digest,
semantic_digest,
..Default::default()
}
}
/// Strip carriage returns (and possibly other problematic characters) from strings
pub fn strip_chars(bytes: &str) -> String {
bytes.replace("\r", "")
}
/// Encode bytes as Base64
///
/// Uses a URL safe (https://tools.ietf.org/html/rfc3548#section-4) character set
/// and does not include padding (because it is unnecessary in this use case).
pub fn base64_encode(bytes: &[u8]) -> String {
base64::encode_config(bytes, base64::URL_SAFE_NO_PAD)
}
}
// String representation of `ResourceDigest`
impl Display for ResourceDigest {
fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
formatter,
"{}.{}.{}.{}.{}",
self.content_digest,
self.semantic_digest,
self.dependencies_digest,
self.dependencies_stale,
self.dependencies_failed
)
}
}
// Use `Display` for serialization
impl Serialize for ResourceDigest {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.collect_str(&self.to_string())
}
}
#[skip_serializing_none]
#[derive(Debug, Clone, Serialize)]
pub struct ResourceInfo {
/// The resource (the "subject") that this information is for
pub resource: Resource,
/// The [`Relation`]-[`Resource`] pairs between the resource (the "subject") and
/// other resources (the "objects").
///
/// This is the primary data used to build the dependency graph between resources.
pub relations: Option<Pairs>,
/// The dependencies of the resource
///
/// Derived during graph `update()`.
/// Used when generating an execution plan to determine which of
/// a resource's dependencies need to be executed as well.
pub dependencies: Option<Vec<Resource>>,
/// The direct dependents of the resource
///
/// Derived during graph `update()`.
/// However, since that is done in topological order, we are unable to get all dependents.
/// Doing so would require `update()` to be more time consuming, so at this stage we're avoiding that.
pub dependents: Option<Vec<Resource>>,
/// The depth of the resource in the dependency graph.
///
/// Derived during graph `update()` from the depths of the
/// resource's `dependencies`.
/// A resource that has no dependencies has a depth of zero.
/// Otherwise, the depth is the maximum depth of dependencies plus one.
pub depth: Option<usize>,
/// Under which circumstances the resource should be automatically executed
///
/// In the below descriptions:
///
/// - "run" means that the user made an explicit request to execute the specific resource
/// (e.g. presses the run button on a `CodeChunk`), or the containing resource (e.g. presses
/// the run button on the parent `Article`).
///
/// - "autorun" means that the resource is automatically executed, without an explicit
/// user request do so (but in some cases in response to one).
///
/// ## `Never`
///
/// Never automatically execute the resource.
/// Only execute when the user explicitly runs the resource (or its containing resource).
///
/// e.g. a user may tag a `CodeBlock` as `@autorun never` if it is long running
/// and they want to check the outputs of previous code chunks before proceeding
///
/// When generating an execution `Plan`s using:
///
/// - the `PlanOrdering::Topological` option: the resource, and any of its downstream
/// dependents should be excluded from the plan.
///
/// - the `PlanOrdering::Appearance` option: the resource, and any following resources
/// should be excluded from the plan.
///
/// ## `Needed`
///
/// Execute the resource if it is an upstream dependency of a resource that has been run.
/// This is the default.
///
/// e.g. `CodeExpression` #1 depends upon a variable assigned in `CodeChunk` #2.
/// If #2 is run, and #1 is stale, then #1 will be autorun before #2.
///
/// This only affects execution `Plan`s generated with the `PlanOrdering::Topological` option.
///
/// ## `Always`
///
/// Always execute the resource
///
/// e.g. a user may tag a `CodeBlock` as `@autorun always` if it assigns a random variable
/// (i.e. is non-deterministic) and everytime one of its downstream dependents is run, they
/// want it to be updated.
///
pub execute_auto: Option<CodeChunkExecuteAuto>,
/// Whether the resource is marked as pure or impure.
///
/// Pure resources do not modify other resources (i.e. they have no side effects).
/// This can be determined from whether the resource has any `Declare`, `Assign`, `Alter` or `Write`
/// in its `relations`. Additionally, the user may mark the resource as pure or impure
/// either using `@pure` or `@impure` tags in code comments or via user interfaces.
/// This property stores that explicit mark. If it is `None` then the resources "purity"
/// will be inferred from its `relations`.
pub execute_pure: Option<bool>,
/// The [`ResourceDigest`] of the resource when it was last compiled
pub compile_digest: Option<ResourceDigest>,
/// The [`ResourceDigest`] of the resource when it was last executed
pub execute_digest: Option<ResourceDigest>,
/// Whether the last execution of the resource succeeded
pub execute_status: Option<CodeExecutableExecuteStatus>,
}
impl ResourceInfo {
/// Create a default `ResourceInfo` object with only a reference to a `Resource`
pub fn default(resource: Resource) -> Self {
Self {
resource,
relations: None,
dependencies: None,
dependents: None,
depth: None,
execute_auto: None,
execute_pure: None,
compile_digest: None,
execute_digest: None,
execute_status: None,
}
}
/// Create a new `ResourceInfo` object
pub fn new(
resource: Resource,
relations: Option<Pairs>,
execute_auto: Option<CodeChunkExecuteAuto>,
execute_pure: Option<bool>,
compile_digest: Option<ResourceDigest>,
execute_digest: Option<ResourceDigest>,
execute_status: Option<CodeExecutableExecuteStatus>,
) -> Self {
Self {
resource,
relations,
dependencies: None,
dependents: None,
depth: None,
execute_auto,
execute_pure,
compile_digest,
execute_digest,
execute_status,
}
}
/// Is the resource pure (i.e. has no side effects)?
///
/// If the resource has not been explicitly tagged as pure or impure then
/// returns `true` if there are no side-effect causing relations.
pub fn is_pure(&self) -> bool {
self.execute_pure.unwrap_or_else(|| match &self.relations {
Some(relations) => !relations.iter().any(|(relation, ..)| {
matches!(
relation,
Relation::Declare(..)
| Relation::Assign(..)
| Relation::Alter(..)
| Relation::Import(..)
| Relation::Write(..)
)
}),
None => false,
})
}
/// Get a list of symbols used by the resource
pub fn symbols_used(&self) -> Vec<Symbol> {
match &self.relations {
Some(relations) => relations
.iter()
.filter_map(|pair| match pair {
(Relation::Use(..), Resource::Symbol(symbol)) => Some(symbol),
_ => None,
})
.cloned()
.collect(),
None => Vec::new(),
}
}
/// Get a list of symbols modified by the resource
pub fn symbols_modified(&self) -> Vec<Symbol> {
match &self.relations {
Some(relations) => relations
.iter()
.filter_map(|pair| match pair {
(Relation::Declare(..), Resource::Symbol(symbol))
| (Relation::Assign(..), Resource::Symbol(symbol))
| (Relation::Alter(..), Resource::Symbol(symbol)) => Some(symbol),
_ => None,
})
.cloned()
.collect(),
None => Vec::new(),<|fim▁hole|> }
/// Is the resource stale?
///
/// Note that, when comparing the `execute_digest` and `compile_digest` for this determination,
/// the `content_digest` part is ignored. This avoids re-execution in situations such as when
/// the user removes a `@autorun always` comment (they probably don't want it to be run again
/// automatically next time). We currently include `dependencies_stale` in the comparison but
/// that may also be unnecessary/inappropriate as well?
pub fn is_stale(&self) -> bool {
if let (Some(compile_digest), Some(execute_digest)) =
(&self.compile_digest, &self.execute_digest)
{
compile_digest.semantic_digest != execute_digest.semantic_digest
|| compile_digest.dependencies_digest != execute_digest.dependencies_digest
|| compile_digest.dependencies_stale != execute_digest.dependencies_stale
} else {
true
}
}
/// Did execution fail the last time the resource was executed
///
/// Returns `false` if the resource has not been executed or was executed
/// and succeeded.
pub fn is_fail(&self) -> bool {
matches!(
self.execute_status,
Some(CodeExecutableExecuteStatus::Failed)
)
}
/// The resource was executed, so update the `execute_digest` to the `compile_digest`,
/// and `execute_succeeded` property.
pub fn did_execute(&mut self, execute_status: Option<CodeExecutableExecuteStatus>) {
self.execute_digest = self.compile_digest.clone();
self.execute_status = execute_status;
}
}
#[derive(Debug, Clone, Derivative, JsonSchema, Serialize)]
#[derivative(PartialEq, Eq, PartialOrd, Ord, Hash)]
#[schemars(deny_unknown_fields)]
pub struct Symbol {
/// The path of the file that the symbol is defined in
#[serde(serialize_with = "serialize_path")]
pub path: PathBuf,
/// The name/identifier of the symbol
pub name: String,
/// The type of the object that the symbol refers to (e.g `Number`, `Function`)
///
/// Should be used as a hint only, and as such is excluded from
/// equality and hash functions.
#[derivative(PartialEq = "ignore")]
#[derivative(PartialOrd = "ignore")]
#[derivative(Hash = "ignore")]
pub kind: String,
}
/// Create a new `Symbol` resource
pub fn symbol(path: &Path, name: &str, kind: &str) -> Resource {
Resource::Symbol(Symbol {
path: path.to_path_buf(),
name: name.into(),
kind: kind.into(),
})
}
#[derive(Debug, Clone, Derivative, JsonSchema, Serialize)]
#[derivative(PartialEq, Eq, PartialOrd, Ord, Hash)]
#[schemars(deny_unknown_fields)]
pub struct Node {
/// The path of the file that the node is defined in
#[serde(serialize_with = "serialize_path")]
pub path: PathBuf,
/// The id of the node with the document
pub id: String,
/// The type of node e.g. `Link`, `ImageObject`
///
/// Should be used as a hint only, and as such is excluded from
/// equality and hash functions.
#[derivative(PartialEq = "ignore")]
#[derivative(PartialOrd = "ignore")]
#[derivative(Hash = "ignore")]
pub kind: String,
}
/// Create a new `Node` resource
pub fn node(path: &Path, id: &str, kind: &str) -> Resource {
Resource::Node(Node {
path: path.to_path_buf(),
id: id.into(),
kind: kind.into(),
})
}
#[skip_serializing_none]
#[derive(Debug, Clone, Derivative, JsonSchema, Serialize)]
#[derivative(PartialEq, Eq, PartialOrd, Ord, Hash)]
#[schemars(deny_unknown_fields)]
pub struct Code {
/// The path of the file that the node is defined in
#[serde(serialize_with = "serialize_path")]
pub path: PathBuf,
/// The id of the node with the document
pub id: String,
/// The type of node e.g. `Parameter`, `CodeChunk`
#[derivative(PartialEq = "ignore")]
#[derivative(PartialOrd = "ignore")]
#[derivative(Hash = "ignore")]
pub kind: String,
/// The programming language associated with the node (if any)
#[derivative(PartialEq = "ignore")]
#[derivative(PartialOrd = "ignore")]
#[derivative(Hash = "ignore")]
pub language: Option<String>,
}
/// Create a new `Code` resource
pub fn code(path: &Path, id: &str, kind: &str, language: Option<String>) -> Resource {
Resource::Code(Code {
path: path.to_path_buf(),
id: id.into(),
kind: kind.into(),
language,
})
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, JsonSchema, Serialize)]
#[schemars(deny_unknown_fields)]
pub struct File {
/// The path of the file
#[serde(serialize_with = "serialize_path")]
pub path: PathBuf,
}
/// Create a new `File` resource
pub fn file(path: &Path) -> Resource {
Resource::File(File {
path: path.to_path_buf(),
})
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, JsonSchema, Serialize)]
#[schemars(deny_unknown_fields)]
pub struct Source {
/// The name of the project source
pub name: String,
}
/// Create a new `Source` resource
pub fn source(name: &str) -> Resource {
Resource::Source(Source { name: name.into() })
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, JsonSchema, Serialize)]
#[schemars(deny_unknown_fields)]
pub struct Module {
/// The programming language of the module
pub language: String,
/// The name of the module
pub name: String,
}
/// Create a new `Module` resource
pub fn module(language: &str, name: &str) -> Resource {
Resource::Module(Module {
language: language.into(),
name: name.into(),
})
}
#[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, JsonSchema, Serialize)]
#[schemars(deny_unknown_fields)]
pub struct Url {
/// The URL of the external resource
pub url: String,
}
/// Create a new `Url` resource
pub fn url(url: &str) -> Resource {
Resource::Url(Url { url: url.into() })
}
/// Serialize the `path` fields of resources so that they use Unix forward slash
/// separators on all platforms.
fn serialize_path<S>(path: &Path, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
path.to_slash_lossy().serialize(serializer)
}<|fim▁end|> | } |
<|file_name|>chevron-circle-right.d.ts<|end_file_name|><|fim▁begin|>import * as React from 'react';
import { IconBaseProps } from 'react-icon-base';<|fim▁hole|><|fim▁end|> | export default class FaChevronCircleRight extends React.Component<IconBaseProps, any> { } |
<|file_name|>_private_link_resources_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations:
"""PrivateLinkResourcesOperations async operations.<|fim▁hole|>
:ivar models: Alias to model classes used in this operation group.
:type models: ~device_update.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_account(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PrivateLinkResourceListResult"]:
"""List all private link resources in a device update account.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: Account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkResourceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~device_update.models.PrivateLinkResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_account.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeviceUpdate/accounts/{accountName}/privateLinkResources'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
group_id: str,
**kwargs: Any
) -> "_models.GroupInformation":
"""Get the specified private link resource associated with the device update account.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: Account name.
:type account_name: str
:param group_id: The group ID of the private link resource.
:type group_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GroupInformation, or the result of cls(response)
:rtype: ~device_update.models.GroupInformation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GroupInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
'groupId': self._serialize.url("group_id", group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GroupInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeviceUpdate/accounts/{accountName}/privateLinkResources/{groupId}'} # type: ignore<|fim▁end|> |
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute. |
<|file_name|>lru-cache.js<|end_file_name|><|fim▁begin|>/**
* Copyright 2018 The AMP HTML Authors. All Rights Reserved.<|fim▁hole|> *
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {dev} from '../log';
/** @const {string} */
const TAG = 'lru-cache';
/**
* @template T
*/
export class LruCache {
/**
* @param {number} capacity
*/
constructor(capacity) {
/** @private @const {number} */
this.capacity_ = capacity;
/** @private {number} */
this.size_ = 0;
/**
* An incrementing counter to define the last access.
* @private {number}
*/
this.access_ = 0;
/** @private {!Object<(number|string), {payload: T, access: number}>} */
this.cache_ = Object.create(null);
}
/**
* Returns whether key is cached.
*
* @param {number|string} key
* @return {boolean}
*/
has(key) {
return !!this.cache_[key];
}
/**
* @param {number|string} key
* @return {T} The cached payload.
*/
get(key) {
const cacheable = this.cache_[key];
if (cacheable) {
cacheable.access = ++this.access_;
return cacheable.payload;
}
return undefined;
}
/**
* @param {number|string} key
* @param {T} payload The payload to cache.
*/
put(key, payload) {
if (!this.has(key)) {
this.size_++;
}
this.cache_[key] = {payload, access: this.access_};
this.evict_();
}
/**
* Evicts the oldest cache entry, if we've exceeded capacity.
*/
evict_() {
if (this.size_ <= this.capacity_) {
return;
}
dev().warn(TAG, 'Trimming LRU cache');
const cache = this.cache_;
let oldest = this.access_ + 1;
let oldestKey;
for (const key in cache) {
const {access} = cache[key];
if (access < oldest) {
oldest = access;
oldestKey = key;
}
}
if (oldestKey !== undefined) {
delete cache[oldestKey];
this.size_--;
}
}
}<|fim▁end|> | *
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at |
<|file_name|>NonlinearRZ.C<|end_file_name|><|fim▁begin|>/****************************************************************/
/* MOOSE - Multiphysics Object Oriented Simulation Environment */
/* */
/* All contents are licensed under LGPL V2.1 */
/* See LICENSE for full restrictions */
/****************************************************************/
#include "NonlinearRZ.h"
#include "SolidModel.h"
#include "Problem.h"
#include "SymmIsotropicElasticityTensor.h"
namespace SolidMechanics
{
NonlinearRZ::NonlinearRZ( SolidModel & solid_model,
const std::string & name,
InputParameters parameters )
:Nonlinear( solid_model, name, parameters ),
_grad_disp_r(coupledGradient("disp_r")),
_grad_disp_z(coupledGradient("disp_z")),
_grad_disp_r_old(coupledGradientOld("disp_r")),
_grad_disp_z_old(coupledGradientOld("disp_z")),
_disp_r(coupledValue("disp_r")),
_disp_r_old(coupledValueOld("disp_r"))
{
}
////////////////////////////////////////////////////////////////////////
NonlinearRZ::~NonlinearRZ()
{
}
////////////////////////////////////////////////////////////////////////
void
NonlinearRZ::computeIncrementalDeformationGradient( std::vector<ColumnMajorMatrix> & Fhat )
{
// A = grad(u(k+1) - u(k))
// Fbar = 1 + grad(u(k))
// Fhat = 1 + A*(Fbar^-1)
ColumnMajorMatrix A;
ColumnMajorMatrix Fbar;
ColumnMajorMatrix Fbar_inverse;
ColumnMajorMatrix Fhat_average;<|fim▁hole|> for ( unsigned qp= 0; qp < _solid_model.qrule()->n_points(); ++qp )
{
fillMatrix( qp, _grad_disp_r, _grad_disp_z, _disp_r, A );
fillMatrix( qp, _grad_disp_r_old, _grad_disp_z_old, _disp_r_old, Fbar);
A -= Fbar;
Fbar.addDiag( 1 );
_Fbar[qp] = Fbar;
// Get Fbar^(-1)
// Computing the inverse is generally a bad idea.
// It's better to compute LU factors. For now at least, we'll take
// a direct route.
invertMatrix( Fbar, Fbar_inverse );
Fhat[qp] = A * Fbar_inverse;
Fhat[qp].addDiag( 1 );
// Now include the contribution for the integration of Fhat over the element
Fhat_average += Fhat[qp] * _solid_model.JxW(qp);
volume += _solid_model.JxW(qp); // Accumulate original configuration volume
}
Fhat_average /= volume;
const Real det_Fhat_average( detMatrix( Fhat_average ) );
const Real third( 1./3. );
// Finalize volumetric locking correction
for ( unsigned qp=0; qp < _solid_model.qrule()->n_points(); ++qp )
{
const Real det_Fhat( detMatrix( Fhat[qp] ) );
const Real factor( std::pow( det_Fhat_average/det_Fhat, third ) );
Fhat[qp] *= factor;
}
// Moose::out << "Fhat(0,0)" << Fhat[0](0,0) << std::endl;
}
////////////////////////////////////////////////////////////////////////
void
NonlinearRZ::computeDeformationGradient( unsigned int qp, ColumnMajorMatrix & F)
{
mooseAssert(F.n() == 3 && F.m() == 3, "computeDefGrad requires 3x3 matrix");
F(0,0) = _grad_disp_r[qp](0) + 1;
F(0,1) = _grad_disp_r[qp](1);
F(0,2) = 0;
F(1,0) = _grad_disp_z[qp](0);
F(1,1) = _grad_disp_z[qp](1) + 1;
F(1,2) = 0;
F(2,0) = 0;
F(2,1) = 0;
F(2,2) = (_solid_model.q_point(qp)(0) != 0.0 ? _disp_r[qp]/_solid_model.q_point(qp)(0) : 0.0) + 1;
}
////////////////////////////////////////////////////////////////////////
void
NonlinearRZ::fillMatrix( unsigned int qp,
const VariableGradient & grad_r,
const VariableGradient & grad_z,
const VariableValue & u,
ColumnMajorMatrix & A) const
{
mooseAssert(A.n() == 3 && A.m() == 3, "computeDefGrad requires 3x3 matrix");
A(0,0) = grad_r[qp](0);
A(0,1) = grad_r[qp](1);
A(0,2) = 0;
A(1,0) = grad_z[qp](0);
A(1,1) = grad_z[qp](1);
A(1,2) = 0;
A(2,0) = 0;
A(2,1) = 0;
A(2,2) = (_solid_model.q_point(qp)(0) != 0.0 ? u[qp]/_solid_model.q_point(qp)(0) : 0.0);
}
//////////////////////////////////////////////////////////////////////////
Real
NonlinearRZ::volumeRatioOld(unsigned int qp) const
{
ColumnMajorMatrix Fnm1T;
fillMatrix( qp, _grad_disp_r_old, _grad_disp_z_old, _disp_r_old, Fnm1T);
Fnm1T.addDiag( 1 );
return detMatrix(Fnm1T);
}
//////////////////////////////////////////////////////////////////////////
}<|fim▁end|> | Real volume(0);
_Fbar.resize(_solid_model.qrule()->n_points());
|
<|file_name|>environmental_probe.py<|end_file_name|><|fim▁begin|># ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from pychron.hardware.core.core_device import CoreDevice
class TempHumMicroServer(CoreDevice):
"""
http://www.omega.com/Manuals/manualpdf/M3861.pdf
iServer MicroServer<|fim▁hole|>
tested with iTHX-W
"""
scan_func = 'read_temperature'
def read_temperature(self, **kw):
v = self.ask('*SRTF', timeout=1.0, **kw)
return self._parse_response(v)
def read_humidity(self, **kw):
v = self.ask('*SRH', timeout=1.0, **kw)
return self._parse_response(v)
def _parse_response(self, v):
try:
return float(v)
except (AttributeError, ValueError, TypeError):
return self.get_random_value()
# ============= EOF =============================================<|fim▁end|> | |
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for bluebird 3.5
// Project: https://github.com/petkaantonov/bluebird
// Definitions by: Leonard Hecker <https://github.com/lhecker>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.3
/*!
* The code following this comment originates from:
* https://github.com/types/npm-bluebird
*
* Note for browser users: use bluebird-global typings instead of this one
* if you want to use Bluebird via the global Promise symbol.
*
* Licensed under:
* The MIT License (MIT)
*
* Copyright (c) 2016 unional
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
type CatchFilter<E> = (new (...args: any[]) => E) | ((error: E) => boolean) | (object & E);
declare class Bluebird<R> implements PromiseLike<R>, Bluebird.Inspection<R> {
/**
* Create a new promise. The passed in function will receive functions `resolve` and `reject` as its arguments which can be called to seal the fate of the created promise.
* If promise cancellation is enabled, passed in function will receive one more function argument `onCancel` that allows to register an optional cancellation callback.
*/
constructor(callback: (resolve: (thenableOrResult?: R | PromiseLike<R>) => void, reject: (error?: any) => void, onCancel?: (callback: () => void) => void) => void);
/**
* Promises/A+ `.then()`. Returns a new promise chained from this promise.
*
* The new promise will be rejected or resolved depending on the passed `fulfilledHandler`, `rejectedHandler` and the state of this promise.
*/
// Based on PromiseLike.then, but returns a Bluebird instance.
then<U>(onFulfill?: (value: R) => U | PromiseLike<U>, onReject?: (error: any) => U | PromiseLike<U>): Bluebird<U>; // For simpler signature help.
then<TResult1 = R, TResult2 = never>(
onfulfilled?: ((value: R) => TResult1 | PromiseLike<TResult1>) | null,
onrejected?: ((reason: any) => TResult2 | PromiseLike<TResult2>) | null
): Bluebird<TResult1 | TResult2>;
/**
* This is a catch-all exception handler, shortcut for calling `.then(null, handler)` on this promise.
*
* Any exception happening in a `.then`-chain will propagate to nearest `.catch` handler.
*
* Alias `.caught();` for compatibility with earlier ECMAScript version.
*/
catch(onReject: (error: any) => R | PromiseLike<R>): Bluebird<R>;
catch<U>(onReject: ((error: any) => U | PromiseLike<U>) | undefined | null): Bluebird<U | R>;
/**
* This extends `.catch` to work more like catch-clauses in languages like Java or C#.
*
* Instead of manually checking `instanceof` or `.name === "SomeError"`,
* you may specify a number of error constructors which are eligible for this catch handler.
* The catch handler that is first met that has eligible constructors specified, is the one that will be called.
*
* This method also supports predicate-based filters.
* If you pass a predicate function instead of an error constructor, the predicate will receive the error as an argument.
* The return result of the predicate will be used determine whether the error handler should be called.
*
* Alias `.caught();` for compatibility with earlier ECMAScript version.
*/
catch<E1, E2, E3, E4, E5>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
filter3: CatchFilter<E3>,
filter4: CatchFilter<E4>,
filter5: CatchFilter<E5>,
onReject: (error: E1 | E2 | E3 | E4 | E5) => R | PromiseLike<R>,
): Bluebird<R>;
catch<U, E1, E2, E3, E4, E5>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
filter3: CatchFilter<E3>,
filter4: CatchFilter<E4>,
filter5: CatchFilter<E5>,
onReject: (error: E1 | E2 | E3 | E4 | E5) => U | PromiseLike<U>,
): Bluebird<U | R>;
catch<E1, E2, E3, E4>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
filter3: CatchFilter<E3>,
filter4: CatchFilter<E4>,
onReject: (error: E1 | E2 | E3 | E4) => R | PromiseLike<R>,
): Bluebird<R>;
catch<U, E1, E2, E3, E4>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
filter3: CatchFilter<E3>,
filter4: CatchFilter<E4>,
onReject: (error: E1 | E2 | E3 | E4) => U | PromiseLike<U>,
): Bluebird<U | R>;
catch<E1, E2, E3>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
filter3: CatchFilter<E3>,
onReject: (error: E1 | E2 | E3) => R | PromiseLike<R>,
): Bluebird<R>;
catch<U, E1, E2, E3>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
filter3: CatchFilter<E3>,
onReject: (error: E1 | E2 | E3) => U | PromiseLike<U>,
): Bluebird<U | R>;
catch<E1, E2>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
onReject: (error: E1 | E2) => R | PromiseLike<R>,
): Bluebird<R>;
catch<U, E1, E2>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
onReject: (error: E1 | E2) => U | PromiseLike<U>,
): Bluebird<U | R>;
catch<E1>(
filter1: CatchFilter<E1>,
onReject: (error: E1) => R | PromiseLike<R>,
): Bluebird<R>;
catch<U, E1>(
filter1: CatchFilter<E1>,
onReject: (error: E1) => U | PromiseLike<U>,
): Bluebird<U | R>;
/**
* This is a catch-all exception handler, shortcut for calling `.then(null, handler)` on this promise.
*
* Any exception happening in a `.then`-chain will propagate to nearest `.catch` handler.
*
* Alias `.caught();` for compatibility with earlier ECMAScript version.
*/
caught: Bluebird<R>["catch"];
/**
* Like `.catch` but instead of catching all types of exceptions, it only catches those that don't originate from thrown errors but rather from explicit rejections.
*/
error<U>(onReject: (reason: any) => U | PromiseLike<U>): Bluebird<U>;
/**
* Pass a handler that will be called regardless of this promise's fate. Returns a new promise chained from this promise.
*
* There are special semantics for `.finally()` in that the final value cannot be modified from the handler.
*
* Alias `.lastly();` for compatibility with earlier ECMAScript version.
*/
finally<U>(handler: () => U | PromiseLike<U>): Bluebird<R>;
lastly<U>(handler: () => U | PromiseLike<U>): Bluebird<R>;
/**
* Create a promise that follows this promise, but is bound to the given `thisArg` value. A bound promise will call its handlers with the bound value set to `this`.
*
* Additionally promises derived from a bound promise will also be bound promises with the same `thisArg` binding as the original promise.
*/
bind(thisArg: any): Bluebird<R>;
/**
* Like `.then()`, but any unhandled rejection that ends up here will be thrown as an error.
*/
done<U>(onFulfilled?: (value: R) => U | PromiseLike<U>, onRejected?: (error: any) => U | PromiseLike<U>): void;
/**
* Like `.finally()`, but not called for rejections.
*/
tap<U>(onFulFill: (value: R) => PromiseLike<U> | U): Bluebird<R>;
/**
* Like `.catch()` but rethrows the error
*/
tapCatch<U>(onReject: (error?: any) => U | PromiseLike<U>): Bluebird<R>;
tapCatch<U, E1, E2, E3, E4, E5>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
filter3: CatchFilter<E3>,
filter4: CatchFilter<E4>,
filter5: CatchFilter<E5>,
onReject: (error: E1 | E2 | E3 | E4 | E5) => U | PromiseLike<U>,
): Bluebird<R>;
tapCatch<U, E1, E2, E3, E4>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
filter3: CatchFilter<E3>,
filter4: CatchFilter<E4>,
onReject: (error: E1 | E2 | E3 | E4) => U | PromiseLike<U>,
): Bluebird<R>;
tapCatch<U, E1, E2, E3>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
filter3: CatchFilter<E3>,
onReject: (error: E1 | E2 | E3) => U | PromiseLike<U>,
): Bluebird<R>;
tapCatch<U, E1, E2>(
filter1: CatchFilter<E1>,
filter2: CatchFilter<E2>,
onReject: (error: E1 | E2) => U | PromiseLike<U>,
): Bluebird<R>;
tapCatch<U, E1>(
filter1: CatchFilter<E1>,
onReject: (error: E1) => U | PromiseLike<U>,
): Bluebird<R>;
/**
* Same as calling `Promise.delay(ms, this)`.
*/
delay(ms: number): Bluebird<R>;
/**
* Returns a promise that will be fulfilled with this promise's fulfillment value or rejection reason.
* However, if this promise is not fulfilled or rejected within ms milliseconds, the returned promise
* is rejected with a TimeoutError or the error as the reason.
*
* You may specify a custom error message with the `message` parameter.
*/
timeout(ms: number, message?: string | Error): Bluebird<R>;
/**
* Register a node-style callback on this promise.
*
* When this promise is is either fulfilled or rejected,
* the node callback will be called back with the node.js convention where error reason is the first argument and success value is the second argument.
* The error argument will be `null` in case of success.
* If the `callback` argument is not a function, this method does not do anything.
*/
nodeify(callback: (err: any, value?: R) => void, options?: Bluebird.SpreadOption): this;
nodeify(...sink: any[]): this;
asCallback(callback: (err: any, value?: R) => void, options?: Bluebird.SpreadOption): this;
asCallback(...sink: any[]): this;
/**
* See if this `promise` has been fulfilled.
*/
isFulfilled(): boolean;
/**
* See if this `promise` has been rejected.
*/
isRejected(): boolean;
/**
* See if this `promise` is still defer.
*/
isPending(): boolean;
/**
* See if this `promise` has been cancelled.
*/
isCancelled(): boolean;
/**
* See if this `promise` is resolved -> either fulfilled or rejected.
*/
isResolved(): boolean;
/**
* Get the fulfillment value of the underlying promise. Throws if the promise isn't fulfilled yet.
*
* throws `TypeError`
*/
value(): R;
/**
* Get the rejection reason for the underlying promise. Throws if the promise isn't rejected yet.
*
* throws `TypeError`
*/
reason(): any;
/**
* Synchronously inspect the state of this `promise`. The `PromiseInspection` will represent the state of
* the promise as snapshotted at the time of calling `.reflect()`.
*/
reflect(): Bluebird<Bluebird.Inspection<R>>;
reflect(): Bluebird<Bluebird.Inspection<any>>;
/**
* This is a convenience method for doing:
*
* <code>
* promise.then(function(obj){
* return obj[propertyName].call(obj, arg...);
* });
* </code>
*/
call(propertyName: keyof R, ...args: any[]): Bluebird<any>;
/**
* This is a convenience method for doing:
*
* <code>
* promise.then(function(obj){
* return obj[propertyName];
* });
* </code>
*/
get<U extends keyof R>(key: U): Bluebird<R[U]>;
/**
* Convenience method for:
*
* <code>
* .then(function() {
* return value;
* });
* </code>
*
* in the case where `value` doesn't change its value. That means `value` is bound at the time of calling `.return()`
*
* Alias `.thenReturn();` for compatibility with earlier ECMAScript version.
*/
return(): Bluebird<void>;
return<U>(value: U): Bluebird<U>;
thenReturn(): Bluebird<void>;
thenReturn<U>(value: U): Bluebird<U>;
/**
* Convenience method for:
*
* <code>
* .then(function() {
* throw reason;
* });
* </code>
* Same limitations apply as with `.return()`.
*
* Alias `.thenThrow();` for compatibility with earlier ECMAScript version.
*/
throw(reason: Error): Bluebird<never>;
thenThrow(reason: Error): Bluebird<never>;
/**
* Convenience method for:
*
* <code>
* .catch(function() {
* return value;
* });
* </code>
*
* in the case where `value` doesn't change its value. That means `value` is bound at the time of calling `.catchReturn()`
*/
catchReturn<U>(value: U): Bluebird<R | U>;
// No need to be specific about Error types in these overrides, since there's no handler function
catchReturn<U>(
filter1: CatchFilter<Error>,
filter2: CatchFilter<Error>,
filter3: CatchFilter<Error>,
filter4: CatchFilter<Error>,
filter5: CatchFilter<Error>,
value: U,
): Bluebird<R | U>;
catchReturn<U>(
filter1: CatchFilter<Error>,
filter2: CatchFilter<Error>,
filter3: CatchFilter<Error>,
filter4: CatchFilter<Error>,
value: U,
): Bluebird<R | U>;
catchReturn<U>(
filter1: CatchFilter<Error>,
filter2: CatchFilter<Error>,
filter3: CatchFilter<Error>,
value: U,
): Bluebird<R | U>;
catchReturn<U>(
filter1: CatchFilter<Error>,
filter2: CatchFilter<Error>,
value: U,
): Bluebird<R | U>;
catchReturn<U>(
filter1: CatchFilter<Error>,
value: U,
): Bluebird<R | U>;
/**
* Convenience method for:
*
* <code>
* .catch(function() {
* throw reason;
* });
* </code>
* Same limitations apply as with `.catchReturn()`.
*/
catchThrow(reason: Error): Bluebird<R>;
// No need to be specific about Error types in these overrides, since there's no handler function
catchThrow(
filter1: CatchFilter<Error>,
filter2: CatchFilter<Error>,
filter3: CatchFilter<Error>,
filter4: CatchFilter<Error>,
filter5: CatchFilter<Error>,
reason: Error,
): Bluebird<R>;
catchThrow(
filter1: CatchFilter<Error>,
filter2: CatchFilter<Error>,
filter3: CatchFilter<Error>,
filter4: CatchFilter<Error>,
reason: Error,
): Bluebird<R>;
catchThrow(
filter1: CatchFilter<Error>,
filter2: CatchFilter<Error>,
filter3: CatchFilter<Error>,
reason: Error,
): Bluebird<R>;
catchThrow(
filter1: CatchFilter<Error>,
filter2: CatchFilter<Error>,
reason: Error,
): Bluebird<R>;
catchThrow(
filter1: CatchFilter<Error>,
reason: Error,
): Bluebird<R>;
/**
* Convert to String.
*/
toString(): string;
/**
* This is implicitly called by `JSON.stringify` when serializing the object. Returns a serialized representation of the `Promise`.
*/
toJSON(): object;
/**
* Like calling `.then`, but the fulfillment value or rejection reason is assumed to be an array, which is flattened to the formal parameters of the handlers.
*/
spread<U, W>(fulfilledHandler: (...values: W[]) => U | PromiseLike<U>): Bluebird<U>;
spread<U>(fulfilledHandler: (...args: any[]) => U | PromiseLike<U>): Bluebird<U>;
/**
* Same as calling `Promise.all(thisPromise)`. With the exception that if this promise is bound to a value, the returned promise is bound to that value too.
*/
// TODO type inference from array-resolving promise?
all<U>(): Bluebird<U[]>;
/**
* Same as calling `Promise.props(thisPromise)`. With the exception that if this promise is bound to a value, the returned promise is bound to that value too.
*/
props<K, V>(this: PromiseLike<Map<K, PromiseLike<V> | V>>): Bluebird<Map<K, V>>;
props<T>(this: PromiseLike<Bluebird.ResolvableProps<T>>): Bluebird<T>;
/**
* Same as calling `Promise.any(thisPromise)`. With the exception that if this promise is bound to a value, the returned promise is bound to that value too.
*/
// TODO type inference from array-resolving promise?
any<U>(): Bluebird<U>;
/**
* Same as calling `Promise.some(thisPromise)`. With the exception that if this promise is bound to a value, the returned promise is bound to that value too.
*/
// TODO type inference from array-resolving promise?
some<U>(count: number): Bluebird<U[]>;
/**
* Same as calling `Promise.race(thisPromise, count)`. With the exception that if this promise is bound to a value, the returned promise is bound to that value too.
*/
// TODO type inference from array-resolving promise?
race<U>(): Bluebird<U>;
/**
* Same as calling `Bluebird.map(thisPromise, mapper)`. With the exception that if this promise is bound to a value, the returned promise is bound to that value too.
*/
// TODO type inference from array-resolving promise?
map<Q, U>(mapper: (item: Q, index: number, arrayLength: number) => U | PromiseLike<U>, options?: Bluebird.ConcurrencyOption): Bluebird<U[]>;<|fim▁hole|>
/**
* Same as calling `Promise.reduce(thisPromise, Function reducer, initialValue)`. With the exception that if this promise is bound to a value, the returned promise is bound to that value too.
*/
// TODO type inference from array-resolving promise?
reduce<Q, U>(reducer: (memo: U, item: Q, index: number, arrayLength: number) => U | PromiseLike<U>, initialValue?: U): Bluebird<U>;
/**
* Same as calling ``Promise.filter(thisPromise, filterer)``. With the exception that if this promise is bound to a value, the returned promise is bound to that value too.
*/
// TODO type inference from array-resolving promise?
filter<U>(filterer: (item: U, index: number, arrayLength: number) => boolean | PromiseLike<boolean>, options?: Bluebird.ConcurrencyOption): Bluebird<U[]>;
/**
* Same as calling ``Bluebird.each(thisPromise, iterator)``. With the exception that if this promise is bound to a value, the returned promise is bound to that value too.
*/
each<R, U>(iterator: (item: R, index: number, arrayLength: number) => U | PromiseLike<U>): Bluebird<R[]>;
/**
* Same as calling ``Bluebird.mapSeries(thisPromise, iterator)``. With the exception that if this promise is bound to a value, the returned promise is bound to that value too.
*/
mapSeries<R, U>(iterator: (item: R, index: number, arrayLength: number) => U | PromiseLike<U>): Bluebird<U[]>;
/**
* Cancel this `promise`. Will not do anything if this promise is already settled or if the cancellation feature has not been enabled
*/
cancel(): void;
/**
* Basically sugar for doing: somePromise.catch(function(){});
*
* Which is needed in case error handlers are attached asynchronously to the promise later, which would otherwise result in premature unhandled rejection reporting.
*/
suppressUnhandledRejections(): void;
/**
* Start the chain of promises with `Promise.try`. Any synchronous exceptions will be turned into rejections on the returned promise.
*
* Note about second argument: if it's specifically a true array, its values become respective arguments for the function call.
* Otherwise it is passed as is as the first argument for the function call.
*
* Alias for `attempt();` for compatibility with earlier ECMAScript version.
*/
static try<R>(fn: () => R | PromiseLike<R>): Bluebird<R>;
static attempt<R>(fn: () => R | PromiseLike<R>): Bluebird<R>;
/**
* Returns a new function that wraps the given function `fn`.
* The new function will always return a promise that is fulfilled with the original functions return values or rejected with thrown exceptions from the original function.
* This method is convenient when a function can sometimes return synchronously or throw synchronously.
*/
static method<R, A1>(fn: (arg1: A1) => R | PromiseLike<R>): (arg1: A1) => Bluebird<R>;
static method<R, A1, A2>(fn: (arg1: A1, arg2: A2) => R | PromiseLike<R>): (arg1: A1, arg2: A2) => Bluebird<R>;
static method<R, A1, A2, A3>(fn: (arg1: A1, arg2: A2, arg3: A3) => R | PromiseLike<R>): (arg1: A1, arg2: A2, arg3: A3) => Bluebird<R>;
static method<R, A1, A2, A3, A4>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => R | PromiseLike<R>): (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Bluebird<R>;
static method<R, A1, A2, A3, A4, A5>(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => R | PromiseLike<R>): (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Bluebird<R>;
static method<R>(fn: (...args: any[]) => R | PromiseLike<R>): (...args: any[]) => Bluebird<R>;
/**
* Create a promise that is resolved with the given `value`. If `value` is a thenable or promise, the returned promise will assume its state.
*/
static resolve(): Bluebird<void>;
static resolve<R>(value: R | PromiseLike<R>): Bluebird<R>;
/**
* Create a promise that is rejected with the given `reason`.
*/
static reject(reason: any): Bluebird<never>;
/**
* Create a promise with undecided fate and return a `PromiseResolver` to control it. See resolution?: Promise(#promise-resolution).
*/
static defer<R>(): Bluebird.Resolver<R>;
/**
* Cast the given `value` to a trusted promise.
*
* If `value` is already a trusted `Promise`, it is returned as is. If `value` is not a thenable, a fulfilled is: Promise returned with `value` as its fulfillment value.
* If `value` is a thenable (Promise-like object, like those returned by jQuery's `$.ajax`), returns a trusted that: Promise assimilates the state of the thenable.
*/
static cast<R>(value: R | PromiseLike<R>): Bluebird<R>;
/**
* Sugar for `Promise.resolve(undefined).bind(thisArg);`. See `.bind()`.
*/
static bind(thisArg: any): Bluebird<void>;
/**
* See if `value` is a trusted Promise.
*/
static is(value: any): boolean;
/**
* Call this right after the library is loaded to enabled long stack traces.
*
* Long stack traces cannot be disabled after being enabled, and cannot be enabled after promises have already been created.
* Long stack traces imply a substantial performance penalty, around 4-5x for throughput and 0.5x for latency.
*/
static longStackTraces(): void;
/**
* Returns a promise that will be resolved with value (or undefined) after given ms milliseconds.
* If value is a promise, the delay will start counting down when it is fulfilled and the returned
* promise will be fulfilled with the fulfillment value of the value promise.
*/
static delay<R>(ms: number, value: R | PromiseLike<R>): Bluebird<R>;
static delay(ms: number): Bluebird<void>;
/**
* Returns a function that will wrap the given `nodeFunction`.
*
* Instead of taking a callback, the returned function will return a promise whose fate is decided by the callback behavior of the given node function.
* The node function should conform to node.js convention of accepting a callback as last argument and
* calling that callback with error as the first argument and success value on the second argument.
*
* If the `nodeFunction` calls its callback with multiple success values, the fulfillment value will be an array of them.
*
* If you pass a `receiver`, the `nodeFunction` will be called as a method on the `receiver`.
*/
static promisify<T>(
func: (callback: (err: any, result?: T) => void) => void,
options?: Bluebird.PromisifyOptions
): () => Bluebird<T>;
static promisify<T, A1>(
func: (arg1: A1, callback: (err: any, result?: T) => void) => void,
options?: Bluebird.PromisifyOptions
): (arg1: A1) => Bluebird<T>;
static promisify<T, A1, A2>(
func: (arg1: A1, arg2: A2, callback: (err: any, result?: T) => void) => void,
options?: Bluebird.PromisifyOptions
): (arg1: A1, arg2: A2) => Bluebird<T>;
static promisify<T, A1, A2, A3>(
func: (arg1: A1, arg2: A2, arg3: A3, callback: (err: any, result?: T) => void) => void,
options?: Bluebird.PromisifyOptions
): (arg1: A1, arg2: A2, arg3: A3) => Bluebird<T>;
static promisify<T, A1, A2, A3, A4>(
func: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: (err: any, result?: T) => void) => void,
options?: Bluebird.PromisifyOptions
): (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Bluebird<T>;
static promisify<T, A1, A2, A3, A4, A5>(
func: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: (err: any, result?: T) => void) => void,
options?: Bluebird.PromisifyOptions
): (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Bluebird<T>;
static promisify(nodeFunction: (...args: any[]) => void, options?: Bluebird.PromisifyOptions): (...args: any[]) => Bluebird<any>;
/**
* Promisifies the entire object by going through the object's properties and creating an async equivalent of each function on the object and its prototype chain.
*
* The promisified method name will be the original method name postfixed with `Async`. Returns the input object.
*
* Note that the original methods on the object are not overwritten but new methods are created with the `Async`-postfix. For example,
* if you `promisifyAll()` the node.js `fs` object use `fs.statAsync()` to call the promisified `stat` method.
*/
// TODO how to model promisifyAll?
static promisifyAll<T extends object>(target: T, options?: Bluebird.PromisifyAllOptions<T>): T;
/**
* Returns a promise that is resolved by a node style callback function.
*/
static fromNode(resolver: (callback: (err: any, result?: any) => void) => void, options?: Bluebird.FromNodeOptions): Bluebird<any>;
static fromNode<T>(resolver: (callback: (err: any, result?: T) => void) => void, options?: Bluebird.FromNodeOptions): Bluebird<T>;
static fromCallback(resolver: (callback: (err: any, result?: any) => void) => void, options?: Bluebird.FromNodeOptions): Bluebird<any>;
static fromCallback<T>(resolver: (callback: (err: any, result?: T) => void) => void, options?: Bluebird.FromNodeOptions): Bluebird<T>;
/**
* Returns a function that can use `yield` to run asynchronous code synchronously.
*
* This feature requires the support of generators which are drafted in the next version of the language.
* Node version greater than `0.11.2` is required and needs to be executed with the `--harmony-generators` (or `--harmony`) command-line switch.
*/
// TODO: After https://github.com/Microsoft/TypeScript/issues/2983 is implemented, we can use
// the return type propagation of generators to automatically infer the return type T.
static coroutine<T>(
generatorFunction: () => IterableIterator<any>,
options?: Bluebird.CoroutineOptions
): () => Bluebird<T>;
static coroutine<T, A1>(
generatorFunction: (a1: A1) => IterableIterator<any>,
options?: Bluebird.CoroutineOptions
): (a1: A1) => Bluebird<T>;
static coroutine<T, A1, A2>(
generatorFunction: (a1: A1, a2: A2) => IterableIterator<any>,
options?: Bluebird.CoroutineOptions
): (a1: A1, a2: A2) => Bluebird<T>;
static coroutine<T, A1, A2, A3>(
generatorFunction: (a1: A1, a2: A2, a3: A3) => IterableIterator<any>,
options?: Bluebird.CoroutineOptions
): (a1: A1, a2: A2, a3: A3) => Bluebird<T>;
static coroutine<T, A1, A2, A3, A4>(
generatorFunction: (a1: A1, a2: A2, a3: A3, a4: A4) => IterableIterator<any>,
options?: Bluebird.CoroutineOptions
): (a1: A1, a2: A2, a3: A3, a4: A4) => Bluebird<T>;
static coroutine<T, A1, A2, A3, A4, A5>(
generatorFunction: (a1: A1, a2: A2, a3: A3, a4: A4, a5: A5) => IterableIterator<any>,
options?: Bluebird.CoroutineOptions
): (a1: A1, a2: A2, a3: A3, a4: A4, a5: A5) => Bluebird<T>;
static coroutine<T, A1, A2, A3, A4, A5, A6>(
generatorFunction: (a1: A1, a2: A2, a3: A3, a4: A4, a5: A5, a6: A6) => IterableIterator<any>,
options?: Bluebird.CoroutineOptions
): (a1: A1, a2: A2, a3: A3, a4: A4, a5: A5, a6: A6) => Bluebird<T>;
static coroutine<T, A1, A2, A3, A4, A5, A6, A7>(
generatorFunction: (a1: A1, a2: A2, a3: A3, a4: A4, a5: A5, a6: A6, a7: A7) => IterableIterator<any>,
options?: Bluebird.CoroutineOptions
): (a1: A1, a2: A2, a3: A3, a4: A4, a5: A5, a6: A6, a7: A7) => Bluebird<T>;
static coroutine<T, A1, A2, A3, A4, A5, A6, A7, A8>(
generatorFunction: (a1: A1, a2: A2, a3: A3, a4: A4, a5: A5, a6: A6, a7: A7, a8: A8) => IterableIterator<any>,
options?: Bluebird.CoroutineOptions
): (a1: A1, a2: A2, a3: A3, a4: A4, a5: A5, a6: A6, a7: A7, a8: A8) => Bluebird<T>;
/**
* Add `handler` as the handler to call when there is a possibly unhandled rejection. The default handler logs the error stack to stderr or `console.error` in browsers.
*
* Passing no value or a non-function will have the effect of removing any kind of handling for possibly unhandled rejections.
*/
static onPossiblyUnhandledRejection(handler: (reason: any) => any): void;
/**
* Add handler as the handler to call when there is a possibly unhandled rejection.
* The default handler logs the error stack to stderr or console.error in browsers.
*
* Passing no value or a non-function will have the effect of removing any kind of handling for possibly unhandled rejections.
*
* Note: this hook is specific to the bluebird instance its called on, application developers should use global rejection events.
*/
static onPossiblyUnhandledRejection(handler?: (error: Error, promise: Bluebird<any>) => void): void;
/**
* Given an array, or a promise of an array, which contains promises (or a mix of promises and values) return a promise that is fulfilled when all the items in the array are fulfilled.
* The promise's fulfillment value is an array with fulfillment values at respective positions to the original array.
* If any promise in the array rejects, the returned promise is rejected with the rejection reason.
*/
// TODO enable more overloads
// array with promises of different types
static all<T1, T2, T3, T4, T5>(values: [PromiseLike<T1> | T1, PromiseLike<T2> | T2, PromiseLike<T3> | T3, PromiseLike<T4> | T4, PromiseLike<T5> | T5]): Bluebird<[T1, T2, T3, T4, T5]>;
static all<T1, T2, T3, T4>(values: [PromiseLike<T1> | T1, PromiseLike<T2> | T2, PromiseLike<T3> | T3, PromiseLike<T4> | T4]): Bluebird<[T1, T2, T3, T4]>;
static all<T1, T2, T3>(values: [PromiseLike<T1> | T1, PromiseLike<T2> | T2, PromiseLike<T3> | T3]): Bluebird<[T1, T2, T3]>;
static all<T1, T2>(values: [PromiseLike<T1> | T1, PromiseLike<T2> | T2]): Bluebird<[T1, T2]>;
static all<T1>(values: [PromiseLike<T1> | T1]): Bluebird<[T1]>;
// array with values
static all<R>(values: PromiseLike<Iterable<PromiseLike<R> | R>> | Iterable<PromiseLike<R> | R>): Bluebird<R[]>;
/**
* Like ``Promise.all`` but for object properties instead of array items. Returns a promise that is fulfilled when all the properties of the object are fulfilled.
*
* The promise's fulfillment value is an object with fulfillment values at respective keys to the original object.
* If any promise in the object rejects, the returned promise is rejected with the rejection reason.
*
* If `object` is a trusted `Promise`, then it will be treated as a promise for object rather than for its properties.
* All other objects are treated for their properties as is returned by `Object.keys` - the object's own enumerable properties.
*
* *The original object is not modified.*
*/
// map
static props<K, V>(map: PromiseLike<Map<K, PromiseLike<V> | V>> | Map<K, PromiseLike<V> | V>): Bluebird<Map<K, V>>;
// trusted promise for object
static props<T>(object: PromiseLike<Bluebird.ResolvableProps<T>>): Bluebird<T>; // tslint:disable-line:unified-signatures
// object
static props<T>(object: Bluebird.ResolvableProps<T>): Bluebird<T>; // tslint:disable-line:unified-signatures
/**
* Like `Promise.some()`, with 1 as `count`. However, if the promise fulfills, the fulfillment value is not an array of 1 but the value directly.
*/
static any<R>(values: PromiseLike<Iterable<PromiseLike<R> | R>> | Iterable<PromiseLike<R> | R>): Bluebird<R>;
/**
* Given an array, or a promise of an array, which contains promises (or a mix of promises and values) return a promise that is fulfilled or rejected as soon as a promise in the array is fulfilled or rejected with the respective rejection reason or fulfillment value.
*
* **Note** If you pass empty array or a sparse array with no values, or a promise/thenable for such, it will be forever pending.
*/
static race<R>(values: PromiseLike<Iterable<PromiseLike<R> | R>> | Iterable<PromiseLike<R> | R>): Bluebird<R>;
/**
* Initiate a competitive race between multiple promises or values (values will become immediately fulfilled promises).
* When `count` amount of promises have been fulfilled, the returned promise is fulfilled with an array that contains the fulfillment values of the winners in order of resolution.
*
* If too many promises are rejected so that the promise can never become fulfilled, it will be immediately rejected with an array of rejection reasons in the order they were thrown in.
*
* *The original array is not modified.*
*/
static some<R>(values: PromiseLike<Iterable<PromiseLike<R> | R>> | Iterable<PromiseLike<R> | R>, count: number): Bluebird<R[]>;
/**
* Promise.join(
* Promise<any>|any values...,
* function handler
* ) -> Promise
* For coordinating multiple concurrent discrete promises.
*
* Note: In 1.x and 0.x Promise.join used to be a Promise.all that took the values in as arguments instead in an array.
* This behavior has been deprecated but is still supported partially - when the last argument is an immediate function value the new semantics will apply
*/
static join<R, A1>(
arg1: A1 | PromiseLike<A1>,
handler: (arg1: A1) => R | PromiseLike<R>
): Bluebird<R>;
static join<R, A1, A2>(
arg1: A1 | PromiseLike<A1>,
arg2: A2 | PromiseLike<A2>,
handler: (arg1: A1, arg2: A2) => R | PromiseLike<R>
): Bluebird<R>;
static join<R, A1, A2, A3>(
arg1: A1 | PromiseLike<A1>,
arg2: A2 | PromiseLike<A2>,
arg3: A3 | PromiseLike<A3>,
handler: (arg1: A1, arg2: A2, arg3: A3) => R | PromiseLike<R>
): Bluebird<R>;
static join<R, A1, A2, A3, A4>(
arg1: A1 | PromiseLike<A1>,
arg2: A2 | PromiseLike<A2>,
arg3: A3 | PromiseLike<A3>,
arg4: A4 | PromiseLike<A4>,
handler: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => R | PromiseLike<R>
): Bluebird<R>;
static join<R, A1, A2, A3, A4, A5>(
arg1: A1 | PromiseLike<A1>,
arg2: A2 | PromiseLike<A2>,
arg3: A3 | PromiseLike<A3>,
arg4: A4 | PromiseLike<A4>,
arg5: A5 | PromiseLike<A5>,
handler: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => R | PromiseLike<R>
): Bluebird<R>;
// variadic array
/** @deprecated use .all instead */
static join<R>(...values: Array<R | PromiseLike<R>>): Bluebird<R[]>;
/**
* Map an array, or a promise of an array, which contains a promises (or a mix of promises and values) with the given `mapper` function with the signature `(item, index, arrayLength)` where `item` is the resolved value of a respective promise in the input array.
* If any promise in the input array is rejected the returned promise is rejected as well.
*
* If the `mapper` function returns promises or thenables, the returned promise will wait for all the mapped results to be resolved as well.
*
* *The original array is not modified.*
*/
static map<R, U>(
values: PromiseLike<Iterable<PromiseLike<R> | R>> | Iterable<PromiseLike<R> | R>,
mapper: (item: R, index: number, arrayLength: number) => U | PromiseLike<U>,
options?: Bluebird.ConcurrencyOption
): Bluebird<U[]>;
/**
* Reduce an array, or a promise of an array, which contains a promises (or a mix of promises and values) with the given `reducer` function with the signature `(total, current, index, arrayLength)` where `item` is the resolved value of a respective promise in the input array.
* If any promise in the input array is rejected the returned promise is rejected as well.
*
* If the reducer function returns a promise or a thenable, the result for the promise is awaited for before continuing with next iteration.
*
* *The original array is not modified. If no `initialValue` is given and the array doesn't contain at least 2 items, the callback will not be called and `undefined` is returned.
* If `initialValue` is given and the array doesn't have at least 1 item, `initialValue` is returned.*
*/
static reduce<R, U>(
values: PromiseLike<Iterable<PromiseLike<R> | R>> | Iterable<PromiseLike<R> | R>,
reducer: (total: U, current: R, index: number, arrayLength: number) => U | PromiseLike<U>,
initialValue?: U
): Bluebird<U>;
/**
* Filter an array, or a promise of an array, which contains a promises (or a mix of promises and values) with the given `filterer` function with the signature `(item, index, arrayLength)` where `item` is the resolved value of a respective promise in the input array.
* If any promise in the input array is rejected the returned promise is rejected as well.
*
* The return values from the filtered functions are coerced to booleans, with the exception of promises and thenables which are awaited for their eventual result.
*
* *The original array is not modified.
*/
static filter<R>(
values: PromiseLike<Iterable<PromiseLike<R> | R>> | Iterable<PromiseLike<R> | R>,
filterer: (item: R, index: number, arrayLength: number) => boolean | PromiseLike<boolean>,
option?: Bluebird.ConcurrencyOption
): Bluebird<R[]>;
/**
* Iterate over an array, or a promise of an array, which contains promises (or a mix of promises and values) with the given iterator function with the signature (item, index, value) where item is the resolved value of a respective promise in the input array.
* Iteration happens serially. If any promise in the input array is rejected the returned promise is rejected as well.
*
* Resolves to the original array unmodified, this method is meant to be used for side effects.
* If the iterator function returns a promise or a thenable, the result for the promise is awaited for before continuing with next iteration.
*/
static each<R, U>(
values: PromiseLike<Iterable<PromiseLike<R> | R>> | Iterable<PromiseLike<R> | R>,
iterator: (item: R, index: number, arrayLength: number) => U | PromiseLike<U>
): Bluebird<R[]>;
/**
* Given an Iterable(arrays are Iterable), or a promise of an Iterable, which produces promises (or a mix of promises and values), iterate over all the values in the Iterable into an array and iterate over the array serially, in-order.
*
* Returns a promise for an array that contains the values returned by the iterator function in their respective positions.
* The iterator won't be called for an item until its previous item, and the promise returned by the iterator for that item are fulfilled.
* This results in a mapSeries kind of utility but it can also be used simply as a side effect iterator similar to Array#forEach.
*
* If any promise in the input array is rejected or any promise returned by the iterator function is rejected, the result will be rejected as well.
*/
static mapSeries<R, U>(
values: PromiseLike<Iterable<PromiseLike<R> | R>> | Iterable<PromiseLike<R> | R>,
iterator: (item: R, index: number, arrayLength: number) => U | PromiseLike<U>
): Bluebird<U[]>;
/**
* A meta method used to specify the disposer method that cleans up a resource when using `Promise.using`.
*
* Returns a Disposer object which encapsulates both the resource as well as the method to clean it up.
* The user can pass this object to `Promise.using` to get access to the resource when it becomes available,
* as well as to ensure its automatically cleaned up.
*
* The second argument passed to a disposer is the result promise of the using block, which you can
* inspect synchronously.
*/
disposer(disposeFn: (arg: R, promise: Bluebird<R>) => void | PromiseLike<void>): Bluebird.Disposer<R>;
/**
* In conjunction with `.disposer`, using will make sure that no matter what, the specified disposer
* will be called when the promise returned by the callback passed to using has settled. The disposer is
* necessary because there is no standard interface in node for disposing resources.
*/
static using<R, T>(
disposer: Bluebird.Disposer<R>,
executor: (transaction: R) => PromiseLike<T>
): Bluebird<T>;
static using<R1, R2, T>(
disposer: Bluebird.Disposer<R1>,
disposer2: Bluebird.Disposer<R2>,
executor: (transaction1: R1, transaction2: R2
) => PromiseLike<T>): Bluebird<T>;
static using<R1, R2, R3, T>(
disposer: Bluebird.Disposer<R1>,
disposer2: Bluebird.Disposer<R2>,
disposer3: Bluebird.Disposer<R3>,
executor: (transaction1: R1, transaction2: R2, transaction3: R3) => PromiseLike<T>
): Bluebird<T>;
/**
* Configure long stack traces, warnings, monitoring and cancellation.
* Note that even though false is the default here, a development environment might be detected which automatically
* enables long stack traces and warnings.
*/
static config(options: {
/** Enable warnings */
warnings?: boolean | {
/** Enables all warnings except forgotten return statements. */
wForgottenReturn: boolean;
};
/** Enable long stack traces */
longStackTraces?: boolean;
/** Enable cancellation */
cancellation?: boolean;
/** Enable monitoring */
monitoring?: boolean;
}): void;
/**
* Create a new promise. The passed in function will receive functions `resolve` and `reject` as its arguments which can be called to seal the fate of the created promise.
* If promise cancellation is enabled, passed in function will receive one more function argument `onCancel` that allows to register an optional cancellation callback.
*/
static Promise: typeof Bluebird;
/**
* The version number of the library
*/
static version: string;
}
declare namespace Bluebird {
interface ConcurrencyOption {
concurrency: number;
}
interface SpreadOption {
spread: boolean;
}
interface FromNodeOptions {
multiArgs?: boolean;
}
interface PromisifyOptions {
context?: any;
multiArgs?: boolean;
}
interface PromisifyAllOptions<T> extends PromisifyOptions {
suffix?: string;
filter?(name: string, func: (...args: any[]) => any, target?: any, passesDefaultFilter?: boolean): boolean;
// The promisifier gets a reference to the original method and should return a function which returns a promise
promisifier?(this: T, originalMethod: (...args: any[]) => any, defaultPromisifer: (...args: any[]) => (...args: any[]) => Bluebird<any>): () => PromiseLike<any>;
}
interface CoroutineOptions {
yieldHandler(value: any): any;
}
/**
* Represents an error is an explicit promise rejection as opposed to a thrown error.
* For example, if an error is errbacked by a callback API promisified through undefined or undefined
* and is not a typed error, it will be converted to a `OperationalError` which has the original error in
* the `.cause` property.
*
* `OperationalError`s are caught in `.error` handlers.
*/
class OperationalError extends Error { }
/**
* Signals that an operation has timed out. Used as a custom cancellation reason in `.timeout`.
*/
class TimeoutError extends Error { }
/**
* Signals that an operation has been aborted or cancelled. The default reason used by `.cancel`.
*/
class CancellationError extends Error { }
/**
* A collection of errors. `AggregateError` is an array-like object, with numeric indices and a `.length` property.
* It supports all generic array methods such as `.forEach` directly.
*
* `AggregateError`s are caught in `.error` handlers, even if the contained errors are not operational.
*
* `Promise.some` and `Promise.any` use `AggregateError` as rejection reason when they fail.
*/
class AggregateError extends Error implements ArrayLike<Error> {
length: number;
[index: number]: Error;
join(separator?: string): string;
pop(): Error;
push(...errors: Error[]): number;
shift(): Error;
unshift(...errors: Error[]): number;
slice(begin?: number, end?: number): AggregateError;
filter(callback: (element: Error, index: number, array: AggregateError) => boolean, thisArg?: any): AggregateError;
forEach(callback: (element: Error, index: number, array: AggregateError) => void, thisArg?: any): undefined;
some(callback: (element: Error, index: number, array: AggregateError) => boolean, thisArg?: any): boolean;
every(callback: (element: Error, index: number, array: AggregateError) => boolean, thisArg?: any): boolean;
map(callback: (element: Error, index: number, array: AggregateError) => boolean, thisArg?: any): AggregateError;
indexOf(searchElement: Error, fromIndex?: number): number;
lastIndexOf(searchElement: Error, fromIndex?: number): number;
reduce(callback: (accumulator: any, element: Error, index: number, array: AggregateError) => any, initialValue?: any): any;
reduceRight(callback: (previousValue: any, element: Error, index: number, array: AggregateError) => any, initialValue?: any): any;
sort(compareFunction?: (errLeft: Error, errRight: Error) => number): AggregateError;
reverse(): AggregateError;
}
/**
* returned by `Bluebird.disposer()`.
*/
class Disposer<R> { }
/** @deprecated Use PromiseLike<T> directly. */
type Thenable<T> = PromiseLike<T>;
type ResolvableProps<T> = object & {[K in keyof T]: PromiseLike<T[K]> | T[K]};
interface Resolver<R> {
/**
* Returns a reference to the controlled promise that can be passed to clients.
*/
promise: Bluebird<R>;
/**
* Resolve the underlying promise with `value` as the resolution value. If `value` is a thenable or a promise, the underlying promise will assume its state.
*/
resolve(value: R): void;
resolve(): void;
/**
* Reject the underlying promise with `reason` as the rejection reason.
*/
reject(reason: any): void;
/**
* Gives you a callback representation of the `PromiseResolver`. Note that this is not a method but a property.
* The callback accepts error object in first argument and success values on the 2nd parameter and the rest, I.E. node js conventions.
*
* If the the callback is called with multiple success values, the resolver fulfills its promise with an array of the values.
*/
// TODO specify resolver callback
callback(err: any, value: R, ...values: R[]): void;
}
interface Inspection<R> {
/**
* See if the underlying promise was fulfilled at the creation time of this inspection object.
*/
isFulfilled(): boolean;
/**
* See if the underlying promise was rejected at the creation time of this inspection object.
*/
isRejected(): boolean;
/**
* See if the underlying promise was cancelled at the creation time of this inspection object.
*/
isCancelled(): boolean;
/**
* See if the underlying promise was defer at the creation time of this inspection object.
*/
isPending(): boolean;
/**
* Get the fulfillment value of the underlying promise. Throws if the promise wasn't fulfilled at the creation time of this inspection object.
*
* throws `TypeError`
*/
value(): R;
/**
* Get the rejection reason for the underlying promise. Throws if the promise wasn't rejected at the creation time of this inspection object.
*
* throws `TypeError`
*/
reason(): any;
}
/**
* Returns a new independent copy of the Bluebird library.
*
* This method should be used before you use any of the methods which would otherwise alter the global Bluebird object - to avoid polluting global state.
*/
function getNewLibraryCopy(): typeof Bluebird;
/**
* This is relevant to browser environments with no module loader.
*
* Release control of the Promise namespace to whatever it was before this library was loaded. Returns a reference to the library namespace so you can attach it to something else.
*/
function noConflict(): typeof Bluebird;
/**
* Changes how bluebird schedules calls a-synchronously.
*
* @param scheduler Should be a function that asynchronously schedules
* the calling of the passed in function
*/
function setScheduler(scheduler: (callback: (...args: any[]) => void) => void): void;
}
export = Bluebird;<|fim▁end|> | |
<|file_name|>PlusCircle.ts<|end_file_name|><|fim▁begin|><|fim▁hole|> <circle stroke="currentColor" cx="14" cy="14" r="13"/>
<path d="M14 8v12" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round"/>
</g>
</svg>`;<|fim▁end|> | export const PlusCircle = `
<svg viewBox="0 0 28 28">
<g fill="none" fill-rule="evenodd">
<path d="M8 14h12" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round"/> |
<|file_name|>rabbitMQ_rpc_serverl.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python<|fim▁hole|> host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def on_request(ch, method, props, body):
n = int(body)
print(" [.] fib(%s)" % n)
response = fib(n)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
print(" [x] Awaiting RPC requests")
channel.start_consuming()<|fim▁end|> | # -*- coding:utf-8 -*-
import pika
import time
connection = pika.BlockingConnection(pika.ConnectionParameters( |
<|file_name|>EvenPowersOf2.java<|end_file_name|><|fim▁begin|>package com.company;
import java.util.Scanner;
public class EvenPowersOf2 {
public static void main(String[] args) {
Scanner scanner = new Scanner(System.in);
int n = Integer.parseInt(scanner.nextLine());
int num = 1;
for (int i = 0; i <= n ; i+=2) {
System.out.println(num);
num *= 4;
}
}<|fim▁hole|><|fim▁end|> | } |
<|file_name|>blast2dem.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
***************************************************************************
blast2dem.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from .LAStoolsUtils import LAStoolsUtils
from .LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterBoolean
class blast2dem(LAStoolsAlgorithm):
ATTRIBUTE = "ATTRIBUTE"
PRODUCT = "PRODUCT"
ATTRIBUTES = ["elevation", "slope", "intensity", "rgb"]
PRODUCTS = ["actual values", "hillshade", "gray", "false"]
USE_TILE_BB = "USE_TILE_BB"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('blast2dem')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParametersFilter1ReturnClassFlagsGUI()
self.addParametersStepGUI()
self.addParameter(ParameterSelection(blast2dem.ATTRIBUTE,
self.tr("Attribute"), blast2dem.ATTRIBUTES, 0))
self.addParameter(ParameterSelection(blast2dem.PRODUCT,
self.tr("Product"), blast2dem.PRODUCTS, 0))
self.addParameter(ParameterBoolean(blast2dem.USE_TILE_BB,<|fim▁hole|> def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "blast2dem")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
self.addParametersFilter1ReturnClassFlagsCommands(commands)
self.addParametersStepCommands(commands)
attribute = self.getParameterValue(blast2dem.ATTRIBUTE)
if attribute != 0:
commands.append("-" + blast2dem.ATTRIBUTES[attribute])
product = self.getParameterValue(blast2dem.PRODUCT)
if product != 0:
commands.append("-" + blast2dem.PRODUCTS[product])
if (self.getParameterValue(blast2dem.USE_TILE_BB)):
commands.append("-use_tile_bb")
self.addParametersRasterOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)<|fim▁end|> | self.tr("Use tile bounding box (after tiling with buffer)"), False))
self.addParametersRasterOutputGUI()
self.addParametersAdditionalGUI()
|
<|file_name|>cmn.rs<|end_file_name|><|fim▁begin|>// COPY OF 'src/rust/api/cmn.rs'
// DO NOT EDIT
use std::io::{self, Read, Seek, Cursor, Write, SeekFrom};
use std;
use std::fmt::{self, Display};
use std::str::FromStr;
use std::error;
use std::thread::sleep_ms;
use mime::{Mime, TopLevel, SubLevel, Attr, Value};
use oauth2::{TokenType, Retry, self};
use hyper;
use hyper::header::{ContentType, ContentLength, Headers, UserAgent, Authorization, Header,
HeaderFormat};
use hyper::http::LINE_ENDING;
use hyper::method::Method;
use hyper::status::StatusCode;
use serde;
/// Identifies the Hub. There is only one per library, this trait is supposed
/// to make intended use more explicit.
/// The hub allows to access all resource methods more easily.
pub trait Hub {}
/// Identifies types for building methods of a particular resource type
pub trait MethodsBuilder {}
/// Identifies types which represent builders for a particular resource method
pub trait CallBuilder {}
/// Identifies types which can be inserted and deleted.
/// Types with this trait are most commonly used by clients of this API.
pub trait Resource {}
/// Identifies types which are used in API responses.
pub trait ResponseResult {}
/// Identifies types which are used in API requests.
pub trait RequestValue {}
/// Identifies types which are not actually used by the API
/// This might be a bug within the google API schema.
pub trait UnusedType {}
/// Identifies types which are only used as part of other types, which
/// usually are carrying the `Resource` trait.
pub trait Part {}
/// Identifies types which are only used by other types internally.
/// They have no special meaning, this trait just marks them for completeness.
pub trait NestedType {}
/// A utility to specify reader types which provide seeking capabilities too
pub trait ReadSeek: Seek + Read {}
impl<T: Seek + Read> ReadSeek for T {}
/// A trait for all types that can convert themselves into a *parts* string
pub trait ToParts {
fn to_parts(&self) -> String;
}
/// A utility type which can decode a server response that indicates error
#[derive(Deserialize)]
pub struct JsonServerError {
pub error: String,
pub error_description: Option<String>
}
/// A utility to represent detailed errors we might see in case there are BadRequests.
/// The latter happen if the sent parameters or request structures are unsound
#[derive(Deserialize, Serialize, Debug)]
pub struct ErrorResponse {
error: ServerError,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct ServerError {
errors: Vec<ServerMessage>,
code: u16,
message: String,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct ServerMessage {
domain: String,
reason: String,
message: String,
#[serde(rename="locationType")]
location_type: Option<String>,
location: Option<String>
}
#[derive(Copy, Clone)]
pub struct DummyNetworkStream;
impl Read for DummyNetworkStream {
fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
Ok(0)
}
}
impl Write for DummyNetworkStream {
fn write(&mut self, _: &[u8]) -> io::Result<usize> {
Ok(0)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl hyper::net::NetworkStream for DummyNetworkStream {
fn peer_addr(&mut self) -> io::Result<std::net::SocketAddr> {
Ok("127.0.0.1:1337".parse().unwrap())
}
}
/// A trait specifying functionality to help controlling any request performed by the API.
/// The trait has a conservative default implementation.
///
/// It contains methods to deal with all common issues, as well with the ones related to
/// uploading media
pub trait Delegate {
/// Called at the beginning of any API request. The delegate should store the method
/// information if he is interesting in knowing more context when further calls to it
/// are made.
/// The matching `finished()` call will always be made, no matter whether or not the API
/// request was successful. That way, the delegate may easily maintain a clean state
/// between various API calls.
fn begin(&mut self, MethodInfo) {}
/// Called whenever there is an [HttpError](http://hyperium.github.io/hyper/hyper/error/enum.HttpError.html), usually if there are network problems.
///
/// If you choose to retry after a duration, the duration should be chosen using the
/// [exponential backoff algorithm](http://en.wikipedia.org/wiki/Exponential_backoff).
///
/// Return retry information.
fn http_error(&mut self, &hyper::Error) -> Retry {
Retry::Abort
}
/// Called whenever there is the need for your applications API key after
/// the official authenticator implementation didn't provide one, for some reason.
/// If this method returns None as well, the underlying operation will fail
fn api_key(&mut self) -> Option<String> {
None
}
/// Called whenever the Authenticator didn't yield a token. The delegate
/// may attempt to provide one, or just take it as a general information about the
/// impending failure.
/// The given Error provides information about why the token couldn't be acquired in the
/// first place
fn token(&mut self, err: &error::Error) -> Option<oauth2::Token> {
let _ = err;
None
}
/// Called during resumable uploads to provide a URL for the impending upload.
/// It was saved after a previous call to `store_upload_url(...)`, and if not None,
/// will be used instead of asking the server for a new upload URL.
/// This is useful in case a previous resumable upload was aborted/canceled, but should now
/// be resumed.
/// The returned URL will be used exactly once - if it fails again and the delegate allows
/// to retry, we will ask the server for a new upload URL.
fn upload_url(&mut self) -> Option<String> {
None
}
/// Called after we have retrieved a new upload URL for a resumable upload to store it
/// in case we fail or cancel. That way, we can attempt to resume the upload later,
/// see `upload_url()`.
/// It will also be called with None after a successful upload, which allows the delegate
/// to forget the URL. That way, we will not attempt to resume an upload that has already
/// finished.
fn store_upload_url(&mut self, url: Option<&str>) {
let _ = url;
}
/// Called whenever a server response could not be decoded from json.
/// It's for informational purposes only, the caller will return with an error
/// accordingly.
///
/// # Arguments
///
/// * `json_encoded_value` - The json-encoded value which failed to decode.
/// * `json_decode_error` - The decoder error
fn response_json_decode_error(&mut self, json_encoded_value: &str, json_decode_error: &serde::json::Error) {
let _ = json_encoded_value;
let _ = json_decode_error;
}
/// Called whenever the http request returns with a non-success status code.
/// This can involve authentication issues, or anything else that very much
/// depends on the used API method.
/// The delegate should check the status, header and decoded json error to decide
/// whether to retry or not. In the latter case, the underlying call will fail.
///
/// If you choose to retry after a duration, the duration should be chosen using the
/// [exponential backoff algorithm](http://en.wikipedia.org/wiki/Exponential_backoff).
fn http_failure(&mut self, _: &hyper::client::Response, Option<JsonServerError>, _: Option<ServerError>) -> Retry {
Retry::Abort
}
/// Called prior to sending the main request of the given method. It can be used to time
/// the call or to print progress information.
/// It's also useful as you can be sure that a request will definitely be made.
fn pre_request(&mut self) { }
/// Return the size of each chunk of a resumable upload.
/// Must be a power of two, with 1<<18 being the smallest allowed chunk size.
/// Will be called once before starting any resumable upload.
fn chunk_size(&mut self) -> u64 {
1 << 23
}
/// Called before the given chunk is uploaded to the server.
/// If true is returned, the upload will be interrupted.
/// However, it may be resumable if you stored the upload URL in a previous call
/// to `store_upload_url()`
fn cancel_chunk_upload(&mut self, chunk: &ContentRange) -> bool {
let _ = chunk;
false
}
/// Called before the API request method returns, in every case. It can be used to clean up
/// internal state between calls to the API.
/// This call always has a matching call to `begin(...)`.
///
/// # Arguments
///
/// * `is_success` - a true value indicates the operation was successful. If false, you should
/// discard all values stored during `store_upload_url`.
fn finished(&mut self, is_success: bool) {
let _ = is_success;
}
}
/// A delegate with a conservative default implementation, which is used if no other delegate is
/// set.
#[derive(Default)]
pub struct DefaultDelegate;
impl Delegate for DefaultDelegate {}
#[derive(Debug)]
pub enum Error {
/// The http connection failed
HttpError(hyper::Error),
/// An attempt was made to upload a resource with size stored in field `.0`
/// even though the maximum upload size is what is stored in field `.1`.
UploadSizeLimitExceeded(u64, u64),
/// Represents information about a request that was not understood by the server.
/// Details are included.
BadRequest(ErrorResponse),
/// We needed an API key for authentication, but didn't obtain one.
/// Neither through the authenticator, nor through the Delegate.
MissingAPIKey,
/// We required a Token, but didn't get one from the Authenticator
MissingToken(Box<error::Error>),
/// The delgate instructed to cancel the operation
Cancelled,
/// An additional, free form field clashed with one of the built-in optional ones
FieldClash(&'static str),
/// Shows that we failed to decode the server response.
/// This can happen if the protocol changes in conjunction with strict json decoding.
JsonDecodeError(String, serde::json::Error),
/// Indicates an HTTP repsonse with a non-success status code
Failure(hyper::client::Response),
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::HttpError(ref err) => err.fmt(f),
Error::UploadSizeLimitExceeded(ref resource_size, ref max_size) =>
writeln!(f, "The media size {} exceeds the maximum allowed upload size of {}"
, resource_size, max_size),
Error::MissingAPIKey => {
(writeln!(f, "The application's API key was not found in the configuration")).ok();
writeln!(f, "It is used as there are no Scopes defined for this method.")
},
Error::BadRequest(ref err) => {
try!(writeln!(f, "Bad Requst ({}): {}", err.error.code, err.error.message));
for err in err.error.errors.iter() {
try!(writeln!(f, " {}: {}, {}{}",
err.domain,
err.message,
err.reason,
match &err.location {
&Some(ref loc) => format!("@{}", loc),
&None => String::new(),
}));
}
Ok(())
},
Error::MissingToken(ref err) =>
writeln!(f, "Token retrieval failed with error: {}", err),
Error::Cancelled =>
writeln!(f, "Operation cancelled by delegate"),
Error::FieldClash(field) =>
writeln!(f, "The custom parameter '{}' is already provided natively by the CallBuilder.", field),
Error::JsonDecodeError(ref json_str, ref err)
=> writeln!(f, "{}: {}", err, json_str),
Error::Failure(ref response) =>
writeln!(f, "Http status indicates failure: {:?}", response),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::HttpError(ref err) => err.description(),
Error::JsonDecodeError(_, ref err) => err.description(),
_ => "NO DESCRIPTION POSSIBLE - use `Display.fmt()` instead"
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::HttpError(ref err) => err.cause(),
Error::JsonDecodeError(_, ref err) => err.cause(),
_ => None
}
}
}
/// A universal result type used as return for all calls.
pub type Result<T> = std::result::Result<T, Error>;
/// Contains information about an API request.
pub struct MethodInfo {
pub id: &'static str,
pub http_method: Method,
}
const BOUNDARY: &'static str = "MDuXWGyeE33QFXGchb2VFWc4Z7945d";
/// Provides a `Read` interface that converts multiple parts into the protocol
/// identified by [RFC2387](https://tools.ietf.org/html/rfc2387).
/// **Note**: This implementation is just as rich as it needs to be to perform uploads
/// to google APIs, and might not be a fully-featured implementation.
#[derive(Default)]
pub struct MultiPartReader<'a> {
raw_parts: Vec<(Headers, &'a mut Read)>,
current_part: Option<(Cursor<Vec<u8>>, &'a mut Read)>,
last_part_boundary: Option<Cursor<Vec<u8>>>,
}
impl<'a> MultiPartReader<'a> {
/// Reserve memory for exactly the given amount of parts
pub fn reserve_exact(&mut self, cap: usize) {
self.raw_parts.reserve_exact(cap);
}
/// Add a new part to the queue of parts to be read on the first `read` call.
///
/// # Arguments
///
/// `headers` - identifying the body of the part. It's similar to the header
/// in an ordinary single-part call, and should thus contain the
/// same information.
/// `reader` - a reader providing the part's body
/// `size` - the amount of bytes provided by the reader. It will be put onto the header as
/// content-size.
/// `mime` - It will be put onto the content type
pub fn add_part(&mut self, reader: &'a mut Read, size: u64, mime_type: Mime) -> &mut MultiPartReader<'a> {
let mut headers = Headers::new();
headers.set(ContentType(mime_type));
headers.set(ContentLength(size));
self.raw_parts.push((headers, reader));
self
}
/// Returns the mime-type representing our multi-part message.
/// Use it with the ContentType header.
pub fn mime_type(&self) -> Mime {
Mime(
TopLevel::Multipart,
SubLevel::Ext("Related".to_string()),
vec![(Attr::Ext("boundary".to_string()), Value::Ext(BOUNDARY.to_string()))],
)
}
/// Returns true if we are totally used
fn is_depleted(&self) -> bool {
self.raw_parts.len() == 0 && self.current_part.is_none() && self.last_part_boundary.is_none()
}
/// Returns true if we are handling our last part
fn is_last_part(&self) -> bool {
self.raw_parts.len() == 0 && self.current_part.is_some()
}
}
impl<'a> Read for MultiPartReader<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match (self.raw_parts.len(),
self.current_part.is_none(),
self.last_part_boundary.is_none()) {
(_, _, false) => {
let br = self.last_part_boundary.as_mut().unwrap().read(buf).unwrap_or(0);
if br < buf.len() {
self.last_part_boundary = None;
}
return Ok(br)
},
(0, true, true) => return Ok(0),
(n, true, _) if n > 0 => {
let (headers, reader) = self.raw_parts.remove(0);
let mut c = Cursor::new(Vec::<u8>::new());
(write!(&mut c, "{}--{}{}{}{}", LINE_ENDING, BOUNDARY, LINE_ENDING,
headers, LINE_ENDING)).unwrap();
c.seek(SeekFrom::Start(0)).unwrap();
self.current_part = Some((c, reader));
}
_ => {},
}
// read headers as long as possible
let (hb, rr) = {
let &mut (ref mut c, ref mut reader) = self.current_part.as_mut().unwrap();
let b = c.read(buf).unwrap_or(0);
(b, reader.read(&mut buf[b..]))
};
match rr {
Ok(bytes_read) => {
if hb < buf.len() && bytes_read == 0 {
if self.is_last_part() {
// before clearing the last part, we will add the boundary that
// will be written last
self.last_part_boundary = Some(Cursor::new(
format!("{}--{}--", LINE_ENDING, BOUNDARY).into_bytes()))
}
// We are depleted - this can trigger the next part to come in
self.current_part = None;
}
let mut total_bytes_read = hb + bytes_read;
while total_bytes_read < buf.len() && !self.is_depleted() {
match self.read(&mut buf[total_bytes_read ..]) {
Ok(br) => total_bytes_read += br,
Err(err) => return Err(err),
}
}
Ok(total_bytes_read)
}
Err(err) => {
// fail permanently
self.current_part = None;
self.last_part_boundary = None;
self.raw_parts.clear();
Err(err)
}
}
}
}
// The following macro invocation needs to be expanded, as `include!`
// doens't support external macros
// header!{
// #[doc="The `X-Upload-Content-Type` header."]
// (XUploadContentType, "X-Upload-Content-Type") => [Mime]
// xupload_content_type {
// test_header!(
// test1,
// vec![b"text/plain"],
// Some(HeaderField(
// vec![Mime(TopLevel::Text, SubLevel::Plain, Vec::new())]
// )));
// }
// }
/// The `X-Upload-Content-Type` header.
///
/// Generated via rustc --pretty expanded -Z unstable-options, and manually
/// processed to be more readable.
#[derive(PartialEq, Debug, Clone)]
pub struct XUploadContentType(pub Mime);
impl ::std::ops::Deref for XUploadContentType {
type Target = Mime;
fn deref<'a>(&'a self) -> &'a Mime { &self.0 }
}
impl ::std::ops::DerefMut for XUploadContentType {
fn deref_mut<'a>(&'a mut self) -> &'a mut Mime { &mut self.0 }
}
impl Header for XUploadContentType {
fn header_name() -> &'static str { "X-Upload-Content-Type" }
fn parse_header(raw: &[Vec<u8>]) -> Option<Self> {
hyper::header::parsing::from_one_raw_str(raw).map(XUploadContentType)
}
}
impl HeaderFormat for XUploadContentType {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&**self, f)
}
}
impl Display for XUploadContentType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[derive(Clone, PartialEq, Debug)]
pub struct Chunk {
pub first: u64,
pub last: u64
}
impl fmt::Display for Chunk {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
(write!(fmt, "{}-{}", self.first, self.last)).ok();
Ok(())
}
}
impl FromStr for Chunk {
type Err = &'static str;
/// NOTE: only implements `%i-%i`, not `*`
fn from_str(s: &str) -> std::result::Result<Chunk, &'static str> {
let parts: Vec<&str> = s.split('-').collect();
if parts.len() != 2 {
return Err("Expected two parts: %i-%i")
}
Ok(
Chunk {
first: match FromStr::from_str(parts[0]) {
Ok(d) => d,
_ => return Err("Couldn't parse 'first' as digit")
},
last: match FromStr::from_str(parts[1]) {
Ok(d) => d,
_ => return Err("Couldn't parse 'last' as digit")
}
}
)
}
}
/// Implements the Content-Range header, for serialization only
#[derive(Clone, PartialEq, Debug)]
pub struct ContentRange {
pub range: Option<Chunk>,
pub total_length: u64,
}
impl Header for ContentRange {
fn header_name() -> &'static str {
"Content-Range"
}
/// We are not parsable, as parsing is done by the `Range` header
fn parse_header(_: &[Vec<u8>]) -> Option<Self> {
None
}
}
impl HeaderFormat for ContentRange {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
try!(fmt.write_str("bytes "));
match self.range {
Some(ref c) => try!(c.fmt(fmt)),
None => try!(fmt.write_str("*"))
}
(write!(fmt, "/{}", self.total_length)).ok();
Ok(())
}
}
#[derive(Clone, PartialEq, Debug)]
pub struct RangeResponseHeader(pub Chunk);
impl Header for RangeResponseHeader {
fn header_name() -> &'static str {
"Range"
}
fn parse_header(raw: &[Vec<u8>]) -> Option<Self> {
if raw.len() > 0 {
let v = &raw[0];
if let Ok(s) = std::str::from_utf8(v) {
const PREFIX: &'static str = "bytes ";
if s.starts_with(PREFIX) {
if let Ok(c) = <Chunk as FromStr>::from_str(&s[PREFIX.len()..]) {
return Some(RangeResponseHeader(c))
}
}
}
}
None
}
}
impl HeaderFormat for RangeResponseHeader {
/// No implmentation necessary, we just need to parse
fn fmt_header(&self, _: &mut fmt::Formatter) -> fmt::Result {
Err(fmt::Error)
}
}
/// A utility type to perform a resumable upload from start to end.
pub struct ResumableUploadHelper<'a, A: 'a> {
pub client: &'a mut hyper::client::Client,
pub delegate: &'a mut Delegate,
pub start_at: Option<u64>,
pub auth: &'a mut A,
pub user_agent: &'a str,
pub auth_header: Authorization<oauth2::Scheme>,
pub url: &'a str,
pub reader: &'a mut ReadSeek,
pub media_type: Mime,
pub content_length: u64
}
impl<'a, A> ResumableUploadHelper<'a, A>
where A: oauth2::GetToken {
fn query_transfer_status(&mut self) -> std::result::Result<u64, hyper::Result<hyper::client::Response>> {
loop {
match self.client.post(self.url)
.header(UserAgent(self.user_agent.to_string()))
.header(ContentRange { range: None, total_length: self.content_length })
.header(self.auth_header.clone())
.send() {
Ok(r) => {
// 308 = resume-incomplete == PermanentRedirect
let headers = r.headers.clone();
let h: &RangeResponseHeader = match headers.get() {
Some(hh) if r.status == StatusCode::PermanentRedirect => hh,
None|Some(_) => {
if let Retry::After(d) = self.delegate.http_failure(&r, None, None) {
sleep_ms(d.num_milliseconds() as u32);
continue;
}
return Err(Ok(r))
}
};
return Ok(h.0.last)
}
Err(err) => {
if let Retry::After(d) = self.delegate.http_error(&err) {
sleep_ms(d.num_milliseconds() as u32);
continue;
}
return Err(Err(err))
}
}
}
}
/// returns None if operation was cancelled by delegate, or the HttpResult.
/// It can be that we return the result just because we didn't understand the status code -
/// caller should check for status himself before assuming it's OK to use
pub fn upload(&mut self) -> Option<hyper::Result<hyper::client::Response>> {
let mut start = match self.start_at {
Some(s) => s,
None => match self.query_transfer_status() {
Ok(s) => s,
Err(result) => return Some(result)
}
};
const MIN_CHUNK_SIZE: u64 = 1 << 18;
let chunk_size = match self.delegate.chunk_size() {
cs if cs > MIN_CHUNK_SIZE => cs,
_ => MIN_CHUNK_SIZE
};
self.reader.seek(SeekFrom::Start(start)).unwrap();
loop {
let request_size = match self.content_length - start {
rs if rs > chunk_size => chunk_size,
rs => rs
};
let mut section_reader = self.reader.take(request_size);
let range_header = ContentRange {
range: Some(Chunk {first: start, last: start + request_size - 1}),<|fim▁hole|> };
start += request_size;
if self.delegate.cancel_chunk_upload(&range_header) {
return None
}
// workaround https://github.com/rust-lang/rust/issues/22252
let res = self.client.post(self.url)
.header(range_header)
.header(ContentType(self.media_type.clone()))
.header(UserAgent(self.user_agent.to_string()))
.body(&mut section_reader)
.send();
match res {
Ok(mut res) => {
if res.status == StatusCode::PermanentRedirect {
continue
}
if !res.status.is_success() {
let mut json_err = String::new();
res.read_to_string(&mut json_err).unwrap();
if let Retry::After(d) = self.delegate.http_failure(&res,
serde::json::from_str(&json_err).ok(),
serde::json::from_str(&json_err).ok()) {
sleep_ms(d.num_milliseconds() as u32);
continue;
}
}
return Some(Ok(res))
},
Err(err) => {
if let Retry::After(d) = self.delegate.http_error(&err) {
sleep_ms(d.num_milliseconds() as u32);
continue;
}
return Some(Err(err))
}
}
}
}
}
// Copy of src/rust/cli/cmn.rs
// TODO(ST): Allow sharing common code between program types
pub fn remove_json_null_values(value: &mut serde::json::value::Value) {
match *value {
serde::json::value::Value::Object(ref mut map) => {
let mut for_removal = Vec::new();
for (key, mut value) in map.iter_mut() {
if value.is_null() {
for_removal.push(key.clone());
} else {
remove_json_null_values(&mut value);
}
}
for key in &for_removal {
map.remove(key);
}
}
_ => {}
}
}<|fim▁end|> | total_length: self.content_length |
<|file_name|>coming_episodes_tests.py<|end_file_name|><|fim▁begin|># coding=utf-8
# This file is part of SickChill.
#
# URL: https://sickchill.github.io
# Git: https://github.com/SickChill/SickChill.git
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.<|fim▁hole|>"""
from __future__ import print_function, unicode_literals
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
from sickchill.show.ComingEpisodes import ComingEpisodes
import six
class ComingEpisodesTests(unittest.TestCase):
"""
Test comping episodes
"""
def test_get_categories(self):
"""
Tests if get_categories returns the valid format and the right values
"""
categories_list = [
None, [], ['A', 'B'], ['A', 'B'], '', 'A|B', 'A|B',
]
results_list = [
[], [], ['A', 'B'], ['A', 'B'], [], ['A', 'B'], ['A', 'B']
]
self.assertEqual(
len(categories_list), len(results_list),
'Number of parameters ({0:d}) and results ({1:d}) does not match'.format(len(categories_list), len(results_list))
)
for (index, categories) in enumerate(categories_list):
self.assertEqual(ComingEpisodes._get_categories(categories), results_list[index])
def test_get_categories_map(self):
"""
Tests if get_categories_map returns the valid format and the right values
"""
categories_list = [
None, [], ['A', 'B'], ['A', 'B']
]
results_list = [
{}, {}, {'A': [], 'B': []}, {'A': [], 'B': []}
]
self.assertEqual(
len(categories_list), len(results_list),
'Number of parameters ({0:d}) and results ({1:d}) does not match'.format(len(categories_list), len(results_list))
)
for (index, categories) in enumerate(categories_list):
self.assertEqual(ComingEpisodes._get_categories_map(categories), results_list[index])
def test_get_sort(self):
"""
Tests if get_sort returns the right sort of coming episode
"""
test_cases = {
None: 'date',
'': 'date',
'wrong': 'date',
'date': 'date',
'Date': 'date',
'network': 'network',
'NetWork': 'network',
'show': 'show',
'Show': 'show',
}
unicode_test_cases = {
'': 'date',
'wrong': 'date',
'date': 'date',
'Date': 'date',
'network': 'network',
'NetWork': 'network',
'show': 'show',
'Show': 'show',
}
for tests in test_cases, unicode_test_cases:
for (sort, result) in six.iteritems(tests):
self.assertEqual(ComingEpisodes._get_sort(sort), result)
if __name__ == '__main__':
print('=====> Testing {0}'.format(__file__))
SUITE = unittest.TestLoader().loadTestsFromTestCase(ComingEpisodesTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)<|fim▁end|> |
"""
Test coming episodes |
<|file_name|>issue-3314.rs<|end_file_name|><|fim▁begin|>/*code
/*code*/
if true {
println!("1");<|fim▁hole|><|fim▁end|> | }*/ |
<|file_name|>pane.test.js<|end_file_name|><|fim▁begin|>var expect = require('chai').expect
describe('Pane', function () {
describe('# Show pane', function () {
it('should show a lateral pane when requested (click on data feature)', function () {
const pane = require('../src/pane')
expect(pane.show).to.be.a('function')
pane.show()
expect(document.getElementById('pane')).not.to.be.null
})
})
describe('# Close pane', function () {
it('should empty lateral pane when requested (click on map or click on close button)', function () {
const pane = require('../src/pane')
expect(pane.close).to.be.a('function')
pane.close()
expect(document.getElementById('pane').hasChildNodes()).to.be.false
})
})
describe('# Show feature in pane', function () {
const pane = require('../src/pane')
const parcelFeature = require('./fixtures').getGoogleMapsParcelFeature()
it('should have a hero header section with an image button', function () {
pane.show(parcelFeature)
var sectionsHeroHeader = document.getElementById('pane').getElementsByClassName('section-hero-header')<|fim▁hole|> expect(sectionsHeroHeader).to.not.be.null
expect(sectionsHeroHeader.length).to.be.equal(1, '1 and only 1 section hero header')
var sectionHeroHeader = sectionsHeroHeader[0]
expect(sectionHeroHeader.getElementsByTagName('button')).to.not.be.null
expect(sectionHeroHeader.getElementsByTagName('button').length).to.be.least(1, 'Almost a button in section-hero-header')
var imageButton = sectionHeroHeader.getElementsByTagName('button')[0]
expect(imageButton.getElementsByTagName('img')).to.not.be.null
expect(imageButton.getElementsByTagName('img').length).to.be.least(1, 'Almost an image in the button of section-hero-header')
})
it('and with a description in the hero header section', function () {
pane.show(parcelFeature)
var sectionHeroHeader = document.getElementById('pane').getElementsByClassName('section-hero-header')[0]
expect(sectionHeroHeader.getElementsByClassName('section-hero-header-description')).to.not.be.null
expect(sectionHeroHeader.getElementsByClassName('section-hero-header-description').length).to.be.least(1, 'Almost a description in section-hero-header')
var description = sectionHeroHeader.getElementsByClassName('section-hero-header-description')[0]
expect(description.getElementsByClassName('section-hero-header-title')).to.not.be.null
expect(description.getElementsByClassName('section-hero-header-title').length).to.be.least(1, 'Almost a title in section-hero-header')
expect(description.getElementsByTagName('h1').length).to.be.least(1, 'Almost a title in section-hero-header')
expect(description.getElementsByTagName('h1')[0].textContent).to.be.equal(parcelFeature.properties.nationalCadastralReference, 'Title with the national cadastral reference of the selected parcel')
expect(description.getElementsByClassName('section-hero-header-container')).to.not.be.null
expect(description.getElementsByClassName('section-hero-header-description-container').length).to.be.least(1, 'Almost a description container in section-hero-header')
})
it('should have an action section', function () {
pane.show(parcelFeature)
var sectionsAction = document.getElementById('pane').getElementsByClassName('section-action')
expect(sectionsAction).to.not.be.null
expect(sectionsAction.length).to.be.equal(1, '1 and only 1 action section')
var sectionAction = sectionsAction[0]
var buttons = sectionAction.getElementsByTagName('button')
expect(buttons).to.not.be.null
expect(buttons.length).to.be.equal(4, 'Four action buttons in the action section')
})
})
})<|fim▁end|> | |
<|file_name|>Index.js<|end_file_name|><|fim▁begin|>(function () {
$(function () {
var _userService = abp.services.app.user;
var _$modal = $('#UserCreateModal');
var _$form = _$modal.find('form');
_$form.validate({
rules: {
Password: "required",
ConfirmPassword: {
equalTo: "#Password"
}
}
});
$('#RefreshButton').click(function () {
refreshUserList();
});
$('.delete-user').click(function () {
var userId = $(this).attr("data-user-id");
var userName = $(this).attr('data-user-name');
deleteUser(userId, userName);
});
$('.edit-user').click(function (e) {
var userId = $(this).attr("data-user-id");
e.preventDefault();
$.ajax({
url: abp.appPath + 'Users/EditUserModal?userId=' + userId,
type: 'POST',
contentType: 'application/html',
success: function (content) {
$('#UserEditModal div.modal-content').html(content);
},
error: function (e) { }
});
});
_$form.find('button[type="submit"]').click(function (e) {
e.preventDefault();
if (!_$form.valid()) {
return;
}
var user = _$form.serializeFormToObject(); //serializeFormToObject is defined in main.js
user.roleNames = [];
var _$roleCheckboxes = $("input[name='role']:checked");
if (_$roleCheckboxes) {
for (var roleIndex = 0; roleIndex < _$roleCheckboxes.length; roleIndex++) {
var _$roleCheckbox = $(_$roleCheckboxes[roleIndex]);
user.roleNames.push(_$roleCheckbox.attr('data-role-name'));
}
}
abp.ui.setBusy(_$modal);
_userService.create(user).done(function () {
_$modal.modal('hide');
location.reload(true); //reload page to see new user!
}).always(function () {
abp.ui.clearBusy(_$modal);
});
});
_$modal.on('shown.bs.modal', function () {
_$modal.find('input:not([type=hidden]):first').focus();
});
function refreshUserList() {
location.reload(true); //reload page to see new user!
}
function deleteUser(userId, userName) {<|fim▁hole|> function (isConfirmed) {
if (isConfirmed) {
_userService.delete({
id: userId
}).done(function () {
refreshUserList();
});
}
}
);
}
});
})();<|fim▁end|> | abp.message.confirm(
"Delete user '" + userName + "'?", |
<|file_name|>main.js<|end_file_name|><|fim▁begin|>import Model from 'flarum/Model';
import Discussion from 'flarum/models/Discussion';
import IndexPage from 'flarum/components/IndexPage';
import Tag from 'tags/models/Tag';<|fim▁hole|>
import addTagList from 'tags/addTagList';
import addTagFilter from 'tags/addTagFilter';
import addTagLabels from 'tags/addTagLabels';
import addTagControl from 'tags/addTagControl';
import addTagComposer from 'tags/addTagComposer';
app.initializers.add('tags', function(app) {
app.routes.tags = {path: '/tags', component: TagsPage.component()};
app.routes.tag = {path: '/t/:tags', component: IndexPage.component()};
app.route.tag = tag => app.route('tag', {tags: tag.slug()});
app.postComponents.discussionTagged = DiscussionTaggedPost;
app.store.models.tags = Tag;
Discussion.prototype.tags = Model.hasMany('tags');
Discussion.prototype.canTag = Model.attribute('canTag');
addTagList();
addTagFilter();
addTagLabels();
addTagControl();
addTagComposer();
});<|fim▁end|> | import TagsPage from 'tags/components/TagsPage';
import DiscussionTaggedPost from 'tags/components/DiscussionTaggedPost'; |
<|file_name|>wav2png.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Freesound is (c) MUSIC TECHNOLOGY GROUP, UNIVERSITAT POMPEU FABRA
#
# Freesound is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Freesound is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# See AUTHORS file.
#
# 03/10/2013: Modified from original code
import sys
from compmusic.extractors.imagelib.MelSpectrogramImage import create_wave_images
from processing import AudioProcessingException
'''<|fim▁hole|>parser.add_option("-w", "--width", action="store", dest="image_width", type="int", help="image width in pixels (default %default)")
parser.add_option("-h", "--height", action="store", dest="image_height", type="int", help="image height in pixels (default %default)")
parser.add_option("-f", "--fft", action="store", dest="fft_size", type="int", help="fft size, power of 2 for increased performance (default %default)")
parser.add_option("-p", "--profile", action="store_true", dest="profile", help="run profiler and output profiling information")
parser.set_defaults(output_filename_w=None, output_filename_s=None, image_width=500, image_height=171, fft_size=2048)
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
parser.error("not enough arguments")
if len(args) > 1 and (options.output_filename_w != None or options.output_filename_s != None):
parser.error("when processing multiple files you can't define the output filename!")
'''
def progress_callback(percentage):
sys.stdout.write(str(percentage) + "% ")
sys.stdout.flush()
# process all files so the user can use wildcards like *.wav
def genimages(input_file, output_file_w, output_file_s, output_file_m, options):
args = (input_file, output_file_w, output_file_s, output_file_m, options.image_width, options.image_height,
options.fft_size, progress_callback, options.f_min, options.f_max, options.scale_exp, options.pallete)
print("processing file %s:\n\t" % input_file, end="")
try:
create_wave_images(*args)
except AudioProcessingException as e:
print("Error running wav2png: ", e)<|fim▁end|> | parser = optparse.OptionParser("usage: %prog [options] input-filename", conflict_handler="resolve")
parser.add_option("-a", "--waveout", action="store", dest="output_filename_w", type="string", help="output waveform image (default input filename + _w.png)")
parser.add_option("-s", "--specout", action="store", dest="output_filename_s", type="string", help="output spectrogram image (default input filename + _s.jpg)") |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![crate_name = "canvas_traits"]
#![crate_type = "rlib"]
#![feature(custom_derive)]
#![feature(plugin)]
#![plugin(heapsize_plugin, plugins, serde_macros)]
extern crate azure;
extern crate core;
extern crate cssparser;
extern crate euclid;
extern crate gfx_traits;
extern crate heapsize;
extern crate ipc_channel;
extern crate layers;
extern crate offscreen_gl_context;
extern crate serde;
extern crate util;
extern crate webrender_traits;
use azure::azure::{AzColor, AzFloat};
use azure::azure_hl::{CapStyle, CompositionOp, JoinStyle};
use azure::azure_hl::{ColorPattern, DrawTarget, Pattern};
use azure::azure_hl::{ExtendMode, GradientStop, LinearGradientPattern, RadialGradientPattern};
use azure::azure_hl::{SurfaceFormat, SurfacePattern};
use cssparser::RGBA;
use euclid::matrix2d::Matrix2D;
use euclid::point::Point2D;
use euclid::rect::Rect;
use euclid::size::Size2D;
use gfx_traits::color;
use ipc_channel::ipc::{IpcSender, IpcSharedMemory};
use layers::platform::surface::NativeSurface;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::default::Default;
use std::str::FromStr;
use std::sync::mpsc::Sender;
pub use webrender_traits::{WebGLFramebufferBindingRequest, WebGLError, WebGLParameter, WebGLResult, WebGLContextId};
pub use webrender_traits::WebGLCommand as CanvasWebGLMsg;
#[derive(Clone, Deserialize, Serialize)]
pub enum FillRule {
Nonzero,
Evenodd,
}
#[derive(Clone, Deserialize, Serialize)]
pub enum CanvasMsg {
Canvas2d(Canvas2dMsg),
Common(CanvasCommonMsg),
FromLayout(FromLayoutMsg),
FromPaint(FromPaintMsg),
WebGL(CanvasWebGLMsg),
}
#[derive(Clone, Deserialize, Serialize)]
pub enum CanvasCommonMsg {
Close,
Recreate(Size2D<i32>),
}
#[derive(Clone, Deserialize, Serialize)]
pub enum CanvasData {
Pixels(CanvasPixelData),
WebGL(WebGLContextId),
}
#[derive(Clone, Deserialize, Serialize)]
pub struct CanvasPixelData {
pub image_data: IpcSharedMemory,
pub image_key: Option<webrender_traits::ImageKey>,
}
#[derive(Clone, Deserialize, Serialize)]
pub enum FromLayoutMsg {
SendData(IpcSender<CanvasData>),
}
#[derive(Clone)]
pub enum FromPaintMsg {
SendNativeSurface(Sender<NativeSurface>),
}
impl Serialize for FromPaintMsg {
fn serialize<S>(&self, _: &mut S) -> Result<(), S::Error> where S: Serializer {
panic!("can't serialize a `FromPaintMsg`!")
}
}
impl Deserialize for FromPaintMsg {
fn deserialize<D>(_: &mut D) -> Result<FromPaintMsg, D::Error> where D: Deserializer {
panic!("can't deserialize a `FromPaintMsg`!")
}
}
#[derive(Clone, Deserialize, Serialize)]
pub enum Canvas2dMsg {
Arc(Point2D<f32>, f32, f32, f32, bool),
ArcTo(Point2D<f32>, Point2D<f32>, f32),
DrawImage(Vec<u8>, Size2D<f64>, Rect<f64>, Rect<f64>, bool),
DrawImageSelf(Size2D<f64>, Rect<f64>, Rect<f64>, bool),
BeginPath,
BezierCurveTo(Point2D<f32>, Point2D<f32>, Point2D<f32>),
ClearRect(Rect<f32>),
Clip,
ClosePath,
Fill,
FillRect(Rect<f32>),
GetImageData(Rect<i32>, Size2D<f64>, IpcSender<Vec<u8>>),
IsPointInPath(f64, f64, FillRule, IpcSender<bool>),
LineTo(Point2D<f32>),
MoveTo(Point2D<f32>),
PutImageData(Vec<u8>, Point2D<f64>, Size2D<f64>, Rect<f64>),
QuadraticCurveTo(Point2D<f32>, Point2D<f32>),
Rect(Rect<f32>),
RestoreContext,
SaveContext,
StrokeRect(Rect<f32>),
Stroke,
SetFillStyle(FillOrStrokeStyle),
SetStrokeStyle(FillOrStrokeStyle),
SetLineWidth(f32),
SetLineCap(LineCapStyle),
SetLineJoin(LineJoinStyle),
SetMiterLimit(f32),
SetGlobalAlpha(f32),
SetGlobalComposition(CompositionOrBlending),
SetTransform(Matrix2D<f32>),
SetShadowOffsetX(f64),
SetShadowOffsetY(f64),
SetShadowBlur(f64),
SetShadowColor(RGBA),
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct CanvasGradientStop {
pub offset: f64,
pub color: RGBA,
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct LinearGradientStyle {
pub x0: f64,
pub y0: f64,
pub x1: f64,
pub y1: f64,
pub stops: Vec<CanvasGradientStop>
}
impl LinearGradientStyle {
pub fn new(x0: f64, y0: f64, x1: f64, y1: f64, stops: Vec<CanvasGradientStop>)
-> LinearGradientStyle {
LinearGradientStyle {
x0: x0,
y0: y0,
x1: x1,
y1: y1,
stops: stops,
}
}
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct RadialGradientStyle {
pub x0: f64,
pub y0: f64,
pub r0: f64,
pub x1: f64,
pub y1: f64,
pub r1: f64,
pub stops: Vec<CanvasGradientStop>
}
impl RadialGradientStyle {
pub fn new(x0: f64, y0: f64, r0: f64, x1: f64, y1: f64, r1: f64, stops: Vec<CanvasGradientStop>)
-> RadialGradientStyle {
RadialGradientStyle {
x0: x0,
y0: y0,
r0: r0,
x1: x1,
y1: y1,
r1: r1,
stops: stops,
}
}
}
#[derive(Clone, Deserialize, Serialize)]
pub struct SurfaceStyle {
pub surface_data: Vec<u8>,
pub surface_size: Size2D<i32>,
pub repeat_x: bool,
pub repeat_y: bool,
}
impl SurfaceStyle {
pub fn new(surface_data: Vec<u8>, surface_size: Size2D<i32>, repeat_x: bool, repeat_y: bool)
-> SurfaceStyle {
SurfaceStyle {
surface_data: surface_data,
surface_size: surface_size,
repeat_x: repeat_x,
repeat_y: repeat_y,
}
}
}
#[derive(Clone, Deserialize, Serialize)]
pub enum FillOrStrokeStyle {
Color(RGBA),
LinearGradient(LinearGradientStyle),
RadialGradient(RadialGradientStyle),
Surface(SurfaceStyle),
}
impl FillOrStrokeStyle {
pub fn to_azure_pattern(&self, drawtarget: &DrawTarget) -> Option<Pattern> {
match *self {
FillOrStrokeStyle::Color(ref color) => {
Some(Pattern::Color(ColorPattern::new(color::new(color.red,
color.green,
color.blue,
color.alpha))))
},
FillOrStrokeStyle::LinearGradient(ref linear_gradient_style) => {
let gradient_stops: Vec<GradientStop> = linear_gradient_style.stops.iter().map(|s| {
GradientStop {
offset: s.offset as AzFloat,
color: color::new(s.color.red, s.color.green, s.color.blue, s.color.alpha)
}
}).collect();
Some(Pattern::LinearGradient(LinearGradientPattern::new(
&Point2D::new(linear_gradient_style.x0 as AzFloat, linear_gradient_style.y0 as AzFloat),
&Point2D::new(linear_gradient_style.x1 as AzFloat, linear_gradient_style.y1 as AzFloat),
drawtarget.create_gradient_stops(&gradient_stops, ExtendMode::Clamp),
&Matrix2D::identity())))
},
FillOrStrokeStyle::RadialGradient(ref radial_gradient_style) => {
let gradient_stops: Vec<GradientStop> = radial_gradient_style.stops.iter().map(|s| {
GradientStop {
offset: s.offset as AzFloat,
color: color::new(s.color.red, s.color.green, s.color.blue, s.color.alpha)
}
}).collect();
Some(Pattern::RadialGradient(RadialGradientPattern::new(
&Point2D::new(radial_gradient_style.x0 as AzFloat, radial_gradient_style.y0 as AzFloat),
&Point2D::new(radial_gradient_style.x1 as AzFloat, radial_gradient_style.y1 as AzFloat),
radial_gradient_style.r0 as AzFloat, radial_gradient_style.r1 as AzFloat,
drawtarget.create_gradient_stops(&gradient_stops, ExtendMode::Clamp),
&Matrix2D::identity())))
},
FillOrStrokeStyle::Surface(ref surface_style) => {
drawtarget.create_source_surface_from_data(&surface_style.surface_data,
surface_style.surface_size,
surface_style.surface_size.width * 4,
SurfaceFormat::B8G8R8A8)
.map(|source_surface| {
Pattern::Surface(SurfacePattern::new(
source_surface.azure_source_surface,
surface_style.repeat_x,
surface_style.repeat_y,
&Matrix2D::identity()))
})
}
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize, HeapSizeOf)]
pub enum LineCapStyle {
Butt = 0,
Round = 1,
Square = 2,
}
impl FromStr for LineCapStyle {
type Err = ();
fn from_str(string: &str) -> Result<LineCapStyle, ()> {
match string {
"butt" => Ok(LineCapStyle::Butt),
"round" => Ok(LineCapStyle::Round),
"square" => Ok(LineCapStyle::Square),
_ => Err(()),
}
}
}
impl LineCapStyle {
pub fn to_azure_style(&self) -> CapStyle {
match *self {
LineCapStyle::Butt => CapStyle::Butt,
LineCapStyle::Round => CapStyle::Round,
LineCapStyle::Square => CapStyle::Square,
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize, HeapSizeOf)]
pub enum LineJoinStyle {
Round = 0,
Bevel = 1,
Miter = 2,
}
impl FromStr for LineJoinStyle {
type Err = ();
fn from_str(string: &str) -> Result<LineJoinStyle, ()> {
match string {
"round" => Ok(LineJoinStyle::Round),
"bevel" => Ok(LineJoinStyle::Bevel),
"miter" => Ok(LineJoinStyle::Miter),
_ => Err(()),
}
}
}
impl LineJoinStyle {
pub fn to_azure_style(&self) -> JoinStyle {
match *self {
LineJoinStyle::Round => JoinStyle::Round,
LineJoinStyle::Bevel => JoinStyle::Bevel,
LineJoinStyle::Miter => JoinStyle::Miter,
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize)]
pub enum RepetitionStyle {
Repeat,
RepeatX,
RepeatY,
NoRepeat,
}
impl FromStr for RepetitionStyle {
type Err = ();
fn from_str(string: &str) -> Result<RepetitionStyle, ()> {
match string {
"repeat" => Ok(RepetitionStyle::Repeat),
"repeat-x" => Ok(RepetitionStyle::RepeatX),
"repeat-y" => Ok(RepetitionStyle::RepeatY),
"no-repeat" => Ok(RepetitionStyle::NoRepeat),
_ => Err(()),
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize, HeapSizeOf)]
pub enum CompositionStyle {
SrcIn,
SrcOut,
SrcOver,
SrcAtop,
DestIn,
DestOut,
DestOver,
DestAtop,
Copy,
Lighter,
Xor,
}
impl FromStr for CompositionStyle {
type Err = ();
fn from_str(string: &str) -> Result<CompositionStyle, ()> {
match string {
"source-in" => Ok(CompositionStyle::SrcIn),
"source-out" => Ok(CompositionStyle::SrcOut),
"source-over" => Ok(CompositionStyle::SrcOver),
"source-atop" => Ok(CompositionStyle::SrcAtop),
"destination-in" => Ok(CompositionStyle::DestIn),
"destination-out" => Ok(CompositionStyle::DestOut),
"destination-over" => Ok(CompositionStyle::DestOver),
"destination-atop" => Ok(CompositionStyle::DestAtop),
"copy" => Ok(CompositionStyle::Copy),
"lighter" => Ok(CompositionStyle::Lighter),
"xor" => Ok(CompositionStyle::Xor),
_ => Err(())
}
}
}
impl CompositionStyle {
pub fn to_azure_style(&self) -> CompositionOp {
match *self {
CompositionStyle::SrcIn => CompositionOp::In,
CompositionStyle::SrcOut => CompositionOp::Out,
CompositionStyle::SrcOver => CompositionOp::Over,
CompositionStyle::SrcAtop => CompositionOp::Atop,
CompositionStyle::DestIn => CompositionOp::DestIn,
CompositionStyle::DestOut => CompositionOp::DestOut,
CompositionStyle::DestOver => CompositionOp::DestOver,
CompositionStyle::DestAtop => CompositionOp::DestAtop,
CompositionStyle::Copy => CompositionOp::Source,
CompositionStyle::Lighter => CompositionOp::Add,
CompositionStyle::Xor => CompositionOp::Xor,
}
}
pub fn to_str(&self) -> &str {
match *self {
CompositionStyle::SrcIn => "source-in",
CompositionStyle::SrcOut => "source-out",
CompositionStyle::SrcOver => "source-over",
CompositionStyle::SrcAtop => "source-atop",
CompositionStyle::DestIn => "destination-in",
CompositionStyle::DestOut => "destination-out",
CompositionStyle::DestOver => "destination-over",
CompositionStyle::DestAtop => "destination-atop",
CompositionStyle::Copy => "copy",
CompositionStyle::Lighter => "lighter",
CompositionStyle::Xor => "xor",
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize, HeapSizeOf)]
pub enum BlendingStyle {
Multiply,
Screen,
Overlay,
Darken,
Lighten,
ColorDodge,
ColorBurn,
HardLight,
SoftLight,
Difference,
Exclusion,
Hue,
Saturation,
Color,<|fim▁hole|>}
impl FromStr for BlendingStyle {
type Err = ();
fn from_str(string: &str) -> Result<BlendingStyle, ()> {
match string {
"multiply" => Ok(BlendingStyle::Multiply),
"screen" => Ok(BlendingStyle::Screen),
"overlay" => Ok(BlendingStyle::Overlay),
"darken" => Ok(BlendingStyle::Darken),
"lighten" => Ok(BlendingStyle::Lighten),
"color-dodge" => Ok(BlendingStyle::ColorDodge),
"color-burn" => Ok(BlendingStyle::ColorBurn),
"hard-light" => Ok(BlendingStyle::HardLight),
"soft-light" => Ok(BlendingStyle::SoftLight),
"difference" => Ok(BlendingStyle::Difference),
"exclusion" => Ok(BlendingStyle::Exclusion),
"hue" => Ok(BlendingStyle::Hue),
"saturation" => Ok(BlendingStyle::Saturation),
"color" => Ok(BlendingStyle::Color),
"luminosity" => Ok(BlendingStyle::Luminosity),
_ => Err(())
}
}
}
impl BlendingStyle {
pub fn to_azure_style(&self) -> CompositionOp {
match *self {
BlendingStyle::Multiply => CompositionOp::Multiply,
BlendingStyle::Screen => CompositionOp::Screen,
BlendingStyle::Overlay => CompositionOp::Overlay,
BlendingStyle::Darken => CompositionOp::Darken,
BlendingStyle::Lighten => CompositionOp::Lighten,
BlendingStyle::ColorDodge => CompositionOp::ColorDodge,
BlendingStyle::ColorBurn => CompositionOp::ColorBurn,
BlendingStyle::HardLight => CompositionOp::HardLight,
BlendingStyle::SoftLight => CompositionOp::SoftLight,
BlendingStyle::Difference => CompositionOp::Difference,
BlendingStyle::Exclusion => CompositionOp::Exclusion,
BlendingStyle::Hue => CompositionOp::Hue,
BlendingStyle::Saturation => CompositionOp::Saturation,
BlendingStyle::Color => CompositionOp::Color,
BlendingStyle::Luminosity => CompositionOp::Luminosity,
}
}
pub fn to_str(&self) -> &str {
match *self {
BlendingStyle::Multiply => "multiply",
BlendingStyle::Screen => "screen",
BlendingStyle::Overlay => "overlay",
BlendingStyle::Darken => "darken",
BlendingStyle::Lighten => "lighten",
BlendingStyle::ColorDodge => "color-dodge",
BlendingStyle::ColorBurn => "color-burn",
BlendingStyle::HardLight => "hard-light",
BlendingStyle::SoftLight => "soft-light",
BlendingStyle::Difference => "difference",
BlendingStyle::Exclusion => "exclusion",
BlendingStyle::Hue => "hue",
BlendingStyle::Saturation => "saturation",
BlendingStyle::Color => "color",
BlendingStyle::Luminosity => "luminosity",
}
}
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize, HeapSizeOf)]
pub enum CompositionOrBlending {
Composition(CompositionStyle),
Blending(BlendingStyle),
}
impl Default for CompositionOrBlending {
fn default() -> CompositionOrBlending {
CompositionOrBlending::Composition(CompositionStyle::SrcOver)
}
}
impl FromStr for CompositionOrBlending {
type Err = ();
fn from_str(string: &str) -> Result<CompositionOrBlending, ()> {
if let Ok(op) = CompositionStyle::from_str(string) {
return Ok(CompositionOrBlending::Composition(op));
}
if let Ok(op) = BlendingStyle::from_str(string) {
return Ok(CompositionOrBlending::Blending(op));
}
Err(())
}
}
impl CompositionOrBlending {
pub fn to_azure_style(&self) -> CompositionOp {
match *self {
CompositionOrBlending::Composition(op) => op.to_azure_style(),
CompositionOrBlending::Blending(op) => op.to_azure_style(),
}
}
}
pub trait ToAzColor {
fn to_azcolor(&self) -> AzColor;
}
impl ToAzColor for RGBA {
fn to_azcolor(&self) -> AzColor {
color::rgba(self.red as AzFloat,
self.green as AzFloat,
self.blue as AzFloat,
self.alpha as AzFloat)
}
}<|fim▁end|> | Luminosity, |
<|file_name|>naturesbest.py<|end_file_name|><|fim▁begin|>import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip\
as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class NaturesBestSpider(BaseSpider):<|fim▁hole|> name = 'naturesbest.co.uk'
allowed_domains = ['www.naturesbest.co.uk', 'naturesbest.co.uk']
start_urls = ('http://www.naturesbest.co.uk/page/productdirectory/?alpha=abcde',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=fghij',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=klmno',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=pqrst',
'http://www.naturesbest.co.uk/page/productdirectory/?alpha=uvwxyz')
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# getting product links from A-Z product list
letter_links = hxs.select(u'//div[@class="content"]')
for letter_link in letter_links:
prod_urls = letter_link.select(u'./div/a/@href').extract()
for prod_url in prod_urls:
url = urljoin_rfc(get_base_url(response), prod_url)
yield Request(url)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
name = hxs.select(u'//div[@class="productTITLE"]/h1/text()').extract()
if name:
url = response.url
url = urljoin_rfc(get_base_url(response), url)
skus = hxs.select('//td[@class="skuname"]/text()').extract()
prices = hxs.select('//td[@class="price"]/text()').extract()
skus_prices = zip(skus, prices)
for sku, price in skus_prices:
loader = ProductLoader(item=Product(), selector=hxs)
loader.add_value('url', url)
loader.add_value('name', (name[0].strip() + ' ' + sku.strip()).replace(u'\xa0', ' '))
#loader.add_value('sku', sku)
loader.add_value('price', price)
yield loader.load_item()<|fim▁end|> | |
<|file_name|>waf_config.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
############################################################################
# Copyright 2017 RIFT.IO Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import argparse
import logging
import os
import stat
import subprocess
import sys
import time
import yaml
class ConfigurationError(Exception):
pass
def configure_waf_haproxy_cp(logger, run_dir, mgmt_ip, haproxy_cp_ip):
sh_file = "{}/waf_set_haproxy_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "echo \"<VirtualHost *:80>\r"
send " AddDefaultCharset UTF-8\r"
send " ProxyPreserveHost On\r"
send " ProxyRequests off\r"
send " ProxyVia Off\r"
send " ProxyPass / http://{haproxy_cp_ip}:5000/\r"
send " ProxyPassReverse / http://{haproxy_cp_ip}:5000/\r"
send " </VirtualHost>\" > /etc/httpd/conf.d/waf_proxy.conf\r"
expect "]# "
send "echo \"<IfModule mod_security2.c>\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/modsecurity_crs_10_setup.conf\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/base_rules/*.conf\r\r"
send " SecRuleEngine On\r"
send " SecRequestBodyAccess On\r"
send " SecResponseBodyAccess On\r"
send " SecDebugLog /var/log/httpd/modsec-debug.log\r"
send " SecDebugLogLevel 3\r"
send "</IfModule>\" > /etc/httpd/conf.d/mod_security.conf\r"
expect "]# "
send "systemctl stop httpd\r"
expect "]# "
send "systemctl start httpd\r"
expect "]# "
'''.format(mgmt_ip=mgmt_ip, haproxy_cp_ip=haproxy_cp_ip))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name):
sh_file = "{}/haproxy_add_waf_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "grep \"server {waf_server_name} {waf_cp_ip}\" /etc/haproxy/haproxy.cfg && echo \"Already configured\" && exit 0\r"
expect {{
"]$ " {{ exit }}
"]# "
}}
send "sed -i \'s/\\(.*WAF list.*\\)/\\1\\n server {waf_server_name} {waf_cp_ip}:80 check/g\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_cp_ip=waf_cp_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name):
sh_file = "{}/haproxy_remove_httpd_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "sed -i \'/server {waf_server_name}/d\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy remove waf config failed: {}".format(rc))
def main(argv=sys.argv[1:]):
try:
parser = argparse.ArgumentParser()
parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")<|fim▁hole|> args = parser.parse_args()
run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
if not os.path.exists(run_dir):
os.makedirs(run_dir)
log_file = "{}/rift_waf_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logging.basicConfig(filename=log_file, level=logging.DEBUG)
logger = logging.getLogger()
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
except Exception as e:
print("Got exception:{}".format(e))
raise
try:
dry_run = args.dry_run
yaml_str = args.yaml_cfg_file.read()
logger.debug("Input YAML file: %s", yaml_str)
yaml_cfg = yaml.load(yaml_str)
logger.debug("Input YAML cfg: %s", yaml_cfg)
# Check if this is post scale out trigger
def find_cp_ip(vnfr_list, vnfd_name, cp_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
for cp in vnfr['connection_points']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
return cp['ip_address']
raise ValueError("Could not find vnfd %s connection point %s", vnfd_name, cp_name)
def find_mgmt_ip(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr['rw_mgmt_ip']
raise ValueError("Could not find vnfd %s mgmt ip", vnfd_name)
def find_vnfr(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr
raise ValueError("Could not find vnfd %s", vnfd_name)
haproxy_cp_ip = find_cp_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd", "cp0")
haproxy_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd")
waf_cp_ip = find_cp_ip(yaml_cfg['vnfrs_in_group'], "waf_vnfd", "cp0")
waf_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_in_group'], "waf_vnfd")
waf_vnfr = find_vnfr(yaml_cfg['vnfrs_in_group'], "waf_vnfd")
# HAProxy wants to use a name without .'s
waf_server_name = waf_vnfr["name"].replace(".", "__")
if yaml_cfg['trigger'] == 'post_scale_out':
logger.debug("Sleeping for 60 seconds to give VNFD mgmt VM a chance to boot up")
time.sleep(60)
configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name)
configure_waf_haproxy_cp(logger, run_dir, waf_mgmt_ip, haproxy_cp_ip)
elif yaml_cfg['trigger'] == 'pre_scale_in':
configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name)
else:
raise ValueError("Unexpected trigger {}".format(yaml_cfg['trigger']))
except Exception as e:
logger.exception(e)
raise
if __name__ == "__main__":
main()<|fim▁end|> | |
<|file_name|>mongodb_replication_status.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# mongodb_replicaset_status.py
# Author: Tyler Stroud <[email protected]>
# Date: 2012-11-06
"""
This script monitors replication status of a replicaset
"""
from daemon import runner
import logging
from pymongo import Connection
from pymongo.errors import AutoReconnect
from time import sleep
import smtplib
from email.mime.text import MIMEText
import sys
from argparse import ArgumentParser
from ConfigParser import RawConfigParser, NoOptionError
class MongoDBReplicationStatus(object):
last_primary = None
def __init__(self, host, poll_interval=5, lag_threshold=30,
max_connect_retries=5, log_level=logging.INFO,
pidfile='/tmp/mongodb_replication_status.pid',
logfile='/var/log/mongodb_replication_status.log'):
self.poll_interval = poll_interval
self.lag_threshold = lag_threshold
self.max_connect_retries = max_connect_retries
self.stdin_path = '/dev/null'
self.stdout_path = logfile
self.stderr_path = logfile
self.pidfile_path = pidfile
self.pidfile_timeout = 5
self.hostnames = host
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.getLevelName(log_level))
self.logger_handler = logging.FileHandler(logfile)
self.logger_handler.setFormatter(logging.Formatter('[%(asctime)s] %(message)s'))
self.logger.addHandler(self.logger_handler)
def set_notifier(self, notifier):
assert isinstance(notifier, Notifier), ('"notifier" must be an instance'
'of "Notifier"')
self.notifier = notifier
def get_members(self):
""" Connect to the primary member and refresh the replica set status """
if self.last_primary is not None:
connection = self.get_connection(self.last_primary)
if connection is not None and connection.is_primary:
return connection['admin'].command('replSetGetStatus')['members']
for hostname in [h for h in self.hostnames if h != self.last_primary]:
connection = self.get_connection(hostname)
if not isinstance(connection, Connection):
continue # failed to connect to the current iteration's hostname, so continue and try the next hostname
if connection.is_primary:
self.last_primary = hostname
return connection['admin'].command('replSetGetStatus')['members']
# There is no primary, so wait 5 seconds and try again
sleep(5)
return self.get_members()
def get_primary_optime(self, members):
""" Returns the optime of the primary member """
for member in members:
if 'PRIMARY' == member['stateStr']:
return member['optime'].time
def get_connection(self, hostname):
""" Attempt to create a mongodb Connection to the given hostname """
retries = self.max_connect_retries
while retries > 0:
try:
return Connection(hostname)<|fim▁hole|> % (hostname, retries))
retries -= 1
sleep(5)
errmsg = 'ERROR: All %s attempts to connect to hostname "%s" failed. Host may be down.'\
% (self.max_connect_retries, hostname)
self.logger.error(errmsg)
self.notifier.send_to_all(errmsg, '[ALERT] Host %s may be down' % hostname)
def run(self):
while True:
members = self.get_members()
message = ''
for member in members:
lag = self.get_primary_optime(members) - member['optime'].time
if lag > self.lag_threshold:
message += 'WARNING: Member "%s" is %s seconds behind the primary\n' % (member['name'], lag)
self.logger.warning(message)
self.logger.debug('DEBUG: Member "%s" is %s seconds behind the primary' % (member['name'], lag))
if message is not '':
self.notifier.send_to_all(message)
sleep(self.poll_interval)
class Notifier(object):
def __init__(self, from_email, recipient_emails, smtp_host='localhost'):
self.from_email = from_email
self.recipient_emails = recipient_emails
self.smtp_host = smtp_host
def send_to_all(self, message, subject='[ALERT] Replication Status Warning'):
message = MIMEText(message)
message['Subject'] = subject
mailer = smtplib.SMTP(self.smtp_host)
return mailer.sendmail(self.from_email, self.recipient_emails, str(message))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-c', '--config',help='The path to the configuration file', dest='FILE', required=True)
parser.add_argument('action', choices=('start', 'stop', 'restart'))
args = parser.parse_args()
config_parser = RawConfigParser()
config_file = open(args.FILE)
try:
config_parser.readfp(config_file)
finally:
config_file.close()
status = MongoDBReplicationStatus(
config_parser.get('main', 'host').split(','),
config_parser.getint('main', 'poll_interval'),
config_parser.getint('main', 'lag_threshold'),
config_parser.getint('main', 'max_connect_retries'),
config_parser.get('main', 'log_level'),
config_parser.get('main', 'pidfile'),
config_parser.get('main', 'logfile'),
)
notifier = Notifier(config_parser.get('main', 'from_email'),
config_parser.get('main', 'recipients'),
config_parser.get('main', 'smtp_host'))
status.set_notifier(notifier)
sys.argv = sys.argv[0], args.action # overwrite sys.argv to be what daemon_runner expects
daemon_runner = runner.DaemonRunner(status)
daemon_runner.daemon_context.files_preserve = [status.logger_handler.stream]
daemon_runner.do_action()<|fim▁end|> | except AutoReconnect:
self.logger.warning(
'WARNING: Failed to connect to hostname "%s". Trying again in 5 seconds. (%s tries left).' |
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for ssh2-streams v0.1.9
// Project: https://github.com/mscdex/ssh2-streams
// Definitions by: Ron Buckton <https://github.com/rbuckton>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
/// <reference types="node" />
import * as stream from "stream";
export class SSH2Stream extends stream.Transform {
/**
* The number of bytes sent since the last keying. This metric can be useful in determining when to call rekey().
*/
bytesSent: number;
/**
* The number of bytes received since the last keying. This metric can be useful in determining when to call rekey().
*/
bytesReceived: number;
/**
* Creates and returns a new SSH2Stream instance.
*/
constructor(config?: SSH2StreamConfig);
/**
* (Client/Server)
* Writes a dummy GLOBAL_REQUEST packet (specifically "[email protected]") that requests a reply.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
ping(): boolean;
/**
* (Client/Server)
* Writes a disconnect packet and closes the stream.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
disconnect(reasonCode?: number): boolean;
/**
* (Client/Server)
* Starts the re-keying process. Incoming/Outgoing packets are buffered until the re-keying
* process has finished. Returns `false` to indicate that no more packets should be written
* until the `NEWKEYS` event is seen.
*/
rekey(): boolean;
/**
* (Client/Server)
* Writes a request success packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
requestSuccess(data?: Buffer): boolean;
/**
* (Client/Server)
* Writes a request failure packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
requestFailure(): boolean;
/**
* (Client/Server)
* Writes a channel success packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
channelSuccess(channel: number): boolean;
/**
* (Client/Server)
* Writes a channel failure packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
channelFailure(channel: number): boolean;
/**
* (Client/Server)
* Writes a channel EOF packet for the given `channel`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
channelEOF(channel: number): boolean;
/**
* (Client/Server)
* Writes a channel close packet for the given `channel`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
channelClose(channel: number): boolean;
/**
* (Client/Server)
* Writes a channel window adjust packet for the given `channel` where `amount` is the
* number of bytes to add to the channel window.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
channelWindowAdjust(channel: number, amount: number): boolean;
/**
* (Client/Server)
* Writes a channel data packet for the given `channel` where `data` is a _Buffer_ or _string_.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
channelData(channel: number, data: string | Buffer): boolean;
/**
* (Client/Server)
* Writes a channel extended data packet for the given `channel` where `data is a _Buffer_
* or _string_.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
channelExtData(channel: number, data: string | Buffer, type: number): boolean;
/**
* (Client/Server)
* Writes a channel open confirmation packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
channelOpenConfirm(remoteChannel: number, localChannel: number, initWindow: number, maxPacket: number): boolean;
/**
* (Client/Server)
* Writes a channel open failure packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
channelOpenFail(remoteChannel: number, reasonCode: number, description?: string, lang?: string): boolean;
/**
* (Client-only)
* Writes a service request packet for `serviceName`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
service(serviceName: string): boolean;
/**
* (Client-only)
* Writes a tcpip forward global request packet. `wantReply` defaults to `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
tcpipForward(bindAddr: string, bindPort: number, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes a cancel tcpip forward global request packet. `wantReply` defaults to `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
cancelTcpipForward(bindAddr: string, bindPort: number, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes a password userauth request packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
authPassword(username: string, password: string): boolean;
/**
* (Client-only)
* Writes a publickey userauth request packet. `pubKey` is the object returned from using
* `utils.parseKey()` on a private or public key. If `cbSign` is not present, a pubkey
* check userauth packet is written. Otherwise `cbSign` is called with `(blob, callback)`,
* where `blob` is the data to sign with the private key and the resulting signature
* _Buffer_ is passed to `callback` as the first argument.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
authPK(username: string, pubKey: ParsedKey, cbSign?: (blob: Buffer, callback: (signedBlob: Buffer) => void) => void): boolean;
/**
* (Client-only)
* Writes a hostbased userauth request packet. `pubKey` is the object returned from using
* `utils.parseKey()` on a private or public key. `cbSign` is called with `(blob, callback)`,
* where `blob` is the data to sign with the private key and the resulting signature
* _Buffer_ is passed to `callback` as the first argument.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
authHostBased(username: string, pubKey: ParsedKey, localHostname: string, localUsername: string, cbSign?: (blob: Buffer, callback: (signedBlob: Buffer) => void) => void): boolean;
/**
* (Client-only)
* Writes a keyboard-interactive userauth request packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
authKeyboard(username: string): boolean;
/**
* (Client-only)
* Writes a "none" userauth request packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
authNone(username: string): boolean;
/**
* (Client-only)
* Writes a userauth info response packet. `responses` is an _array_ of zero or more strings
* corresponding to responses to prompts previously sent by the server.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
authInfoRes(responses?: string[]): boolean;
/**
* (Client-only)
* Writes a direct tcpip channel open packet. `config` must contain `srcIP`, `srcPort`,
* `dstIP`, and `dstPort`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
directTcpip(channel: number, initWindow: number, maxPacket: number, config: TcpipForwardingConfig): boolean;
/**
* (Client-only)
* Writes a session channel open packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
session(channel: number, initWindow: number, maxPacket: number): boolean;
/**
* (Client-only)
* Writes an `[email protected]` channel request packet. `wantReply` defaults to
* `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
openssh_agentForward(channel: number, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes a window change channel request packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
windowChange(channel: number, rows: number, cols: number, height: number, width: number): boolean;
/**
* (Client-only)
* Writes a pty channel request packet. If `terminalType` is falsey, `vt100` is used.
* `terminalModes` can be the raw bytes, an _object_ of the terminal modes to set, or a falsey value for no modes. `wantReply` defaults to `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
pty(channel: number, rows: number, cols: number, height: number, width: number, terminalType?: string, terminalModes?: any, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes an env channel request packet. `value` can be a _string_ or _Buffer_. `wantReply`
* defaults to `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
env(channel: number, key: string, value: string | Buffer, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes a shell channel request packet. `wantReply` defaults to `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
shell(channel: number, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes an exec channel request packet. `wantReply` defaults to `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
exec(channel: number, command: string, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes a signal channel request packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
signal(channel: number, signalName: string): boolean;
/**
* (Client-only)
* Writes an X11 forward channel request packet. `wantReply` defaults to `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
x11Forward(channel: number, config: X11ForwardingConfig, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes a subsystem channel request packet. `name` is the name of the subsystem (e.g.
* `sftp` or `netconf`). `wantReply` defaults to `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
subsystem(channel: number, name: string, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes a [email protected] request packet. `wantReply` defaults to `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
openssh_noMoreSessions(wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes a [email protected] request packet. `wantReply` defaults to `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
openssh_streamLocalForward(socketPath: string, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes a [email protected] request packet. `wantReply` defaults to
* `true`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
openssh_cancelStreamLocalForward(socketPath: string, wantReply?: boolean): boolean;
/**
* (Client-only)
* Writes a [email protected] channel open packet. `config` must contain
* `socketPath`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
openssh_directStreamLocal(channel: number, initWindow: number, maxPacket: number, config: SocketForwardingConfig): boolean;
/**
* (Server-only)
* Writes a service accept packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
serviceAccept(serviceName: string): boolean;
/**
* (Server-only)
* Writes a userauth failure packet. `authMethods` is an _array_ of authentication methods
* that can continue.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
authFailure(authMethods?: string[], partialSuccess?: boolean): boolean;
/**
* (Server-only)
* Writes a userauth success packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
authSuccess(): boolean;
/**
* (Server-only)
* Writes a userauth PK OK packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
authPKOK(keyAlgorithm: string, keyData: Buffer): boolean;
/**
* (Server-only)
* Writes a userauth info request packet. `prompts` is an array of `Prompt` objects.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
authInfoReq(name: string, instructions: string, prompts: Prompt[]): boolean;
/**
* (Server-only)
* Writes a forwarded tcpip channel open packet. `info` must contain `boundAddr`,
* `boundPort`, `remoteAddr`, and `remotePort`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
forwardedTcpip(channel: number, initWindow: number, maxPacket: number, info: ForwardedTcpip): boolean;
/**
* (Server-only)
* Writes an X11 channel open packet. `info` must contain `originAddr` and `originPort`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
x11(channel: number, initWindow: number, maxPacket: number, info: ForwardedX11): boolean;
/**
* (Server-only)
* Writes an [email protected] channel open packet. `info` must contain
* `socketPath`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
openssh_forwardedStreamLocal(channel: number, initWindow: number, maxPacket: number, info: ForwardedSocket): boolean;
/**
* (Server-only)
* Writes an exit status channel request packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
exitStatus(channel: number, exitCode: number): boolean;
/**
* (Server-only)
* Writes an exit signal channel request packet.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
exitSignal(channel: number, signalName: string, coreDumped: boolean, errorMessage: string): boolean;
/**
* (Client/Server)
* Emitted when the protocol header is seen.
*/
on(event: "header", listener: (header: Header) => void): this;
/**
* (Client/Server)
*/
on(event: "GLOBAL_REQUEST", listener: (reqName: string, wantReply: boolean, request: GlobalRequest | Buffer | undefined) => void): this;
/**
* (Client/Server)
*/
on(event: "DISCONNECT", listener: (reason: string, reasonCode: number, description: string) => void): this;
/**
* (Client/Server)
*/
on(event: "DEBUG", listener: (message: string) => void): this;
/**
* (Client/Server)
*/
on(event: "NEWKEYS", listener: () => void): this;
/**
* (Client/Server)
*/
on(event: "REQUEST_SUCCESS", listener: (resData: Buffer) => void): this;
/**
* (Client/Server)
*/
on(event: "REQUEST_FAILURE", listener: () => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_OPEN", listener: (channelInfo: ChannelOpenInfo) => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_OPEN_CONFIRMATION:0", listener: (channelInfo: ChannelOpenConfirmationInfo) => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_OPEN_FAILURE:0", listener: (failInfo: ChannelOpenFailureInfo) => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_REQUEST:0", listener: (request: ChannelRequest) => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_DATA:0", listener: (data: Buffer) => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_EXTENDED_DATA:0", listener: (type: number, data: Buffer) => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_WINDOW_ADJUST:0", listener: (bytesToAdd: number) => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_SUCCESS:0", listener: () => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_FAILURE:0", listener: () => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_EOF:0", listener: () => void): this;
/**
* (Client/Server)
*/
on(event: "CHANNEL_CLOSE:0", listener: () => void): this;
/**
* (Client-only)
* This event allows you to verify a host's key. If `callback` is called with `true`, the
* handshake continues. Otherwise a disconnection will occur if `callback` is called with
* `false`. The default behavior is to auto-allow any host key if there are no handlers
* for this event.
*/
on(event: "fingerprint", listener: (hostKey: Buffer, callback: (success: boolean) => void) => void): this;
/**
* (Client-only)
*/
on(event: "SERVICE_ACCEPT", listener: (serviceName: string) => void): this;
/**
* (Client-only)
*/
on(event: "USERAUTH_PASSWD_CHANGEREQ", listener: (message: string) => void): this;
/**
* (Client-only)
*/
on(event: "USERAUTH_INFO_REQUEST", listener: (name: string, instructions: string, lang: string, prompts: Prompt[]) => void): this;
/**
* (Client-only)
*/
on(event: "USERAUTH_PK_OK", listener: () => void): this;
/**
* (Client-only)
*/
on(event: "USERAUTH_SUCCESS", listener: () => void): this;
/**
* (Client-only)
*/
on(event: "USERAUTH_FAILURE", listener: (methodsContinue: string[], partialSuccess: boolean) => void): this;
/**
* (Client-only)
*/
on(event: "USERAUTH_BANNER", listener: (message: string) => void): this;
/**
* (Server-only)
*/
on(event: "SERVICE_REQUEST", listener: (serviceName: string) => void): this;
/**
* (Server-only)
*/
on(event: "USERAUTH_REQUEST", listener: (username: string, serviceName: string, authMethod: string, authMethodData: AuthMethodData) => void): this;
/**
* (Server-only)
*/
on(event: "USERAUTH_INFO_RESPONSE", listener: (responses: string[]) => void): this;
/**
* Emitted when the connection has authenticated.
*/
on(event: "ready", listener: () => void): this;
/**
* Emitted when the socket has disconnected.
*/
on(event: "end", listener: () => void): this;
/**
* Emitted when the client socket was closed.
*/
on(event: "close", listener: () => void): this;
/**
* Emitted when more requests/data can be sent to the stream.
*/
on(event: "continue", listener: () => void): this;
/**
* Emitted when an error occurred.
*/
on(event: "error", listener: (err: any) => void): this;
on(event: string | symbol, listener: Function): this;
}
export interface SSH2StreamConfig {
/**
* Set to true to create an instance in server mode.
*/
server?: boolean;
/**
* If in server mode, an object keyed on host key format.
*/
hostKeys?: HostKeys;
/**
* A message that is sent to clients immediately upon connection, before handshaking begins.
*/
banner?: string;
/**
* A custom server software name/version identifier.
* @default 'ssh2js' + moduleVersion + 'srv'
*/
ident?: string;
/**
* This is the maximum packet size that will be accepted. It should be 35000 bytes or larger to be compatible with other SSH2 implementations.
* @default 35000
*/
maxPacketSize?: number;
/**
* This is the highWaterMark to use for the parser stream.
* @default 32 * 1024
*/
highWaterMark?: number;
/**
* This option allows you to explicitly override the default transport layer algorithms used for the connection. Each value must be an array of valid algorithms for that category. The order of the algorithms in the arrays are important, with the most favorable being first.
*/
algorithms?: Algorithms;
/**
* Set this to a function that receives a single string argument to get detailed (local) debug information.
*/
debug?: (information: string) => any;
}
export interface HostKeys {
[format: string]: HostKey;
}
export interface HostKey {
privateKey: ParsedKey;
publicKey: ParsedKey;
}
/**
* Overrides for the default transport layer algorithms used for the connection.
*
* The order of the algorithms in the arrays are important, with the most favorable being first.
*/
export interface Algorithms {
kex?: string[];
cipher?: string[];
serverHostKey?: string[];
hmac?: string[];
compress?: string[];
}
export interface Header {
/**
* (Client-only) An optional greeting message presented by the server.
*/
greeting?: string;
/**
* The raw identification string sent by the remote party.
*/
identRaw: string;
/**
* Contains various version information parsed from identRaw.
*/
versions: Versions;
/**
* Any text that comes after the software name/version.
*/
comments: string;
}
export interface Versions {
/**
* The SSH protocol version supported by the remote party.
*/
protocol: string;
/**
* The software name and version used by the remote party.
*/
software: string;
}
export interface TcpipForwardGlobalRequest {
/**
* The IP address to start/stop binding to.
*/
bindAddr: string;
/**
* The port to start/stop binding to.
*/
bindPort: number;
}
export interface openssh_StreamLocalForwardGlobalRequest {
socketPath: string;
}
export type GlobalRequest = TcpipForwardGlobalRequest | openssh_StreamLocalForwardGlobalRequest | Buffer;
export interface ChannelOpenConfirmationInfo {
recipient: number;
sender: number;
window: number;
packetSize: number;
}
export interface ChannelOpenFailureInfo {
recipient: number;
reasonCode: number;
reason: string;
description: string;
}
export interface X11ChannelInfo {
type: "x11";
sender: number;
window: number;
packetSize: number;
data: X11ChannelData;
}
export interface X11ChannelData {
srcIP: string;
srcPort: number;
}
export interface ForwardedTcpipChannelInfo {
type: "forwarded-tcpip";
sender: number;
window: number;
packetSize: number;
data: TcpipChannelData;
}
export interface DirectTcpipChannelInfo {
type: "direct-tcpip";
sender: number;
window: number;
packetSize: number;
data: TcpipChannelData;
}
export interface TcpipChannelData {
srcIP: string;
srcPort: number;
destIP: string;
destPort: number;
}
export interface openssh_ForwardedStreamLocalChannelInfo {
type: "[email protected]";
sender: number;
window: number;
packetSize: number;
data: SocketChannelData;
}
export interface openssh_DirectStreamLocalChannelInfo {
type: "[email protected]";
sender: number;
window: number;
packetSize: number;
data: SocketChannelData;
}
export interface SocketChannelData {
socketPath: string;
}
export interface openssh_AuthAgentChannelInfo {
type: "[email protected]";
sender: number;
window: number;
packetSize: number;
}
export interface SessionChannelInfo {
type: "session";
sender: number;
window: number;
packetSize: number;
}
export type ChannelOpenInfo = X11ChannelInfo | ForwardedTcpipChannelInfo | openssh_ForwardedStreamLocalChannelInfo | openssh_AuthAgentChannelInfo | DirectTcpipChannelInfo | openssh_DirectStreamLocalChannelInfo | SessionChannelInfo;
export interface ExitStatusChannelRequest {
request: "exit-status";
recipient: number;
code: number;
}
export interface ExitSignalChannelRequest {
request: "exit-signal";
recipient: number;
signal: string;
coredump: boolean;
description: string;
}
export interface PseudoTtyChannelRequest {
request: "pty-req";
recipient: number;
wantReply: boolean;
term: string;
cols: number;
rows: number;
width: number;
height: number;
modes: any;
}
export interface WindowChangeChannelRequest {
request: "window-change";
recipient: number;
cols: number;
rows: number;
width: number;
height: number;
}
export interface X11ChannelRequest {
request: "x11-req";
recipient: number;
wantReply: boolean;
single: boolean;
protocol: string;
cookie: string;
screen: number;
}
export interface EnvChannelRequest {
request: "env";
recipient: number;
wantReply: boolean;
key: string;
val: string;
}
export interface ShellChannelRequest {
request: "shell";
recipient: number;
wantReply: boolean;
}
export interface ExecChannelRequest {
request: "exec";
recipient: number;
wantReply: boolean;
command: string;
}
export interface SubsystemChannelRequest {
request: "subsystem";
recipient: number;
wantReply: boolean;
subsystem: string;
}
export interface SignalChannelRequest {
request: "signal";
recipient: number;
signal: string;
}
export interface FlowControlChannelRequest {
request: "xon-xoff";
recipient: number;
clientControl: boolean;
}
export interface openssh_AuthAgentChannelRequest {
request: "[email protected]";
recipient: number;
}
export type ChannelRequest = ExitStatusChannelRequest | ExitSignalChannelRequest | PseudoTtyChannelRequest | WindowChangeChannelRequest | X11ChannelRequest | EnvChannelRequest | ShellChannelRequest | ExecChannelRequest | SubsystemChannelRequest | SignalChannelRequest | FlowControlChannelRequest;
export interface PublicKeyAuthMethodData {
keyAlgo: string;
key: Buffer;
signature?: Buffer;
blob?: Buffer;
}
export interface HostbasedAuthMethodData {
keyAlgo: string;
key: Buffer;
signature?: Buffer;
blob?: Buffer;
localHostname: string;
localUsername: string;
}
export type AuthMethodData = string | PublicKeyAuthMethodData | HostbasedAuthMethodData;
export interface TcpipForwardingConfig {
/**
* Source IP address of outgoing connection.
*/
srcIP: string;
/**
* Source port of outgoing connection.
*/
srcPort: number;
/**
* Destination IP address of outgoing connection.
*/
destIP: string;
/**
* Destination port of outgoing connection.
*/
destPort: number;
}
export interface X11ForwardingConfig {
/**
* true if only a single connection should be forwarded.
*/
single: boolean;
/**
* The name of the X11 authentication method used (e.g. MIT-MAGIC-COOKIE-1).
*/
protocol: string;
/**
* The X11 authentication cookie encoded in hexadecimal.
*/
cookie: string;
/**
* The screen number to forward X11 connections for.
*/
screen: number;
}
export interface SocketForwardingConfig {
socketPath: string;
}
export interface Prompt {
prompt: string;
echo?: boolean;
}
export interface ForwardedTcpip {
bindAddr: string;
bindPort: number;
remoteAddr: string;
remotePort: number;
}
<|fim▁hole|>}
export interface ForwardedSocket {
socketPath: string;
}
export class SFTPStream extends stream.Transform {
/**
* Creates and returns a new SFTPStream instance.
*/
constructor(remoteIdentRaw: string);
/**
* Creates and returns a new SFTPStream instance.
*/
constructor(cfg?: SFTPStreamConfig, remoteIdentRaw?: string);
/**
* Converts string flags (e.g. `'r'`, `'a+'`, etc.) to the appropriate
* `SFTPStream.OPEN_MODE` flag mask.
*
* Returns `null` if conversion failed.
*/
static stringToFlags(flagsStr: string): number;
/**
* Converts flag mask (e.g. number containing `SFTPStream.OPEN_MODE` values) to the
* appropriate string value.
*
* Returns `null` if conversion failed.
*/
static flagsToString(flagsMask: number): string;
/**
* (Client-only)
* Downloads a file at `remotePath` to `localPath` using parallel reads for faster throughput.
*/
fastGet(remotePath: string, localPath: string, options: TransferOptions, callback: (err: any) => void): void;
/**
* (Client-only)
* Downloads a file at `remotePath` to `localPath` using parallel reads for faster throughput.
*/
fastGet(remotePath: string, localPath: string, callback: (err: any) => void): void;
/**
* (Client-only)
* Uploads a file from `localPath` to `remotePath` using parallel reads for faster throughput.
*/
fastPut(localPath: string, remotePath: string, options: TransferOptions, callback: (err: any) => void): void;
/**
* (Client-only)
* Uploads a file from `localPath` to `remotePath` using parallel reads for faster throughput.
*/
fastPut(localPath: string, remotePath: string, callback: (err: any) => void): void;
/**
* (Client-only)
* Reads a file in memory and returns its contents
*/
readFile(remotePath: string, options: ReadFileOptions, callback: (err: any, handle: Buffer) => void): void;
/**
* (Client-only)
* Reads a file in memory and returns its contents
*/
readFile(remotePath: string, encoding: string, callback: (err: any, handle: Buffer) => void): void;
/**
* (Client-only)
* Reads a file in memory and returns its contents
*/
readFile(remotePath: string, callback: (err: any, handle: Buffer) => void): void;
/**
* (Client-only)
* Returns a new readable stream for `path`.
*/
createReadStream(path: string, options?: ReadStreamOptions): stream.Readable;
/**
* (Client-only)
* Returns a new writable stream for `path`.
*/
createWriteStream(path: string, options?: WriteStreamOptions): stream.Writable;
/**
* (Client-only)
* Opens a file `filename` for `mode` with optional `attributes`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
open(filename: string, mode: string, attributes: InputAttributes, callback: (err: any, handle: Buffer) => void): boolean;
/**
* (Client-only)
* Opens a file `filename` for `mode`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
open(filename: string, mode: string, callback: (err: any, handle: Buffer) => void): boolean;
/**
* (Client-only)
* Closes the resource associated with `handle` given by `open()` or `opendir()`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
close(handle: Buffer, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Reads `length` bytes from the resource associated with `handle` starting at `position`
* and stores the bytes in `buffer` starting at `offset`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
readData(handle: Buffer, buffer: Buffer, offset: number, length: number, position: number, callback: (err: any, bytesRead: number, buffer: Buffer, position: number) => void): boolean;
/**
* (Client-only)
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
writeData(handle: Buffer, buffer: Buffer, offset: number, length: number, position: number, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Retrieves attributes for the resource associated with `handle`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
fstat(handle: Buffer, callback: (err: any, stats: Stats) => void): boolean;
/**
* (Client-only)
* Sets the attributes defined in `attributes` for the resource associated with `handle`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
fsetstat(handle: Buffer, attributes: InputAttributes, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Sets the access time and modified time for the resource associated with `handle`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
futimes(handle: Buffer, atime: number | Date, mtime: number | Date, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Sets the owner for the resource associated with `handle`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
fchown(handle: Buffer, uid: number, gid: number, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Sets the mode for the resource associated with `handle`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
fchmod(handle: Buffer, mode: number | string, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Opens a directory `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
opendir(path: string, callback: (err: any, handle: Buffer) => void): boolean;
/**
* (Client-only)
* Retrieves a directory listing.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
readdir(location: string | Buffer, callback: (err: any, list: FileEntry[]) => void): boolean;
/**
* (Client-only)
* Removes the file/symlink at `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
unlink(path: string, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Renames/moves `srcPath` to `destPath`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
rename(srcPath: string, destPath: string, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Creates a new directory `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
mkdir(path: string, attributes: InputAttributes, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Creates a new directory `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
mkdir(path: string, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Removes the directory at `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
rmdir(path: string, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Retrieves attributes for `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
stat(path: string, callback: (err: any, stats: Stats) => void): boolean;
/**
* (Client-only)
* Retrieves attributes for `path`. If `path` is a symlink, the link itself is stat'ed
* instead of the resource it refers to.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
lstat(path: string, callback: (err: any, stats: Stats) => void): boolean;
/**
* (Client-only)
* Sets the attributes defined in `attributes` for `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
setstat(path: string, attributes: InputAttributes, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Sets the access time and modified time for `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
utimes(path: string, atime: number | Date, mtime: number | Date, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Sets the owner for `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
chown(path: string, uid: number, gid: number, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Sets the mode for `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
chmod(path: string, mode: number | string, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Retrieves the target for a symlink at `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
readlink(path: string, callback: (err: any, target: string) => void): boolean;
/**
* (Client-only)
* Creates a symlink at `linkPath` to `targetPath`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
symlink(targetPath: string, linkPath: string, callback: (err: any) => void): boolean;
/**
* (Client-only)
* Resolves `path` to an absolute path.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
realpath(path: string, callback: (err: any, absPath: string) => void): boolean;
/**
* (Client-only, OpenSSH extension)
* Performs POSIX rename(3) from `srcPath` to `destPath`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
ext_openssh_rename(srcPath: string, destPath: string, callback: (err: any) => void): boolean;
/**
* (Client-only, OpenSSH extension)
* Performs POSIX statvfs(2) on `path`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
ext_openssh_statvfs(path: string, callback: (err: any, fsInfo: any) => void): boolean;
/**
* (Client-only, OpenSSH extension)
* Performs POSIX fstatvfs(2) on open handle `handle`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
ext_openssh_fstatvfs(handle: Buffer, callback: (err: any, fsInfo: any) => void): boolean;
/**
* (Client-only, OpenSSH extension)
* Performs POSIX link(2) to create a hard link to `targetPath` at `linkPath`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
ext_openssh_hardlink(targetPath: string, linkPath: string, callback: (err: any) => void): boolean;
/**
* (Client-only, OpenSSH extension)
* Performs POSIX fsync(3) on the open handle `handle`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
ext_openssh_fsync(handle: Buffer, callback: (err: any, fsInfo: any) => void): boolean;
/**
* (Server-only)
* Sends a status response for the request identified by `id`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
status(reqID: number, statusCode: number, message?: string): boolean;
/**
* (Server-only)
* Sends a handle response for the request identified by `id`.
*
* @param handle A handle must be less than 256 bytes and is an opaque value that could
* merely contain the value of a backing file descriptor or some other unique,
* custom value.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
handle(reqID: number, handle: Buffer): boolean;
/**
* (Server-only)
* Sends a data response for the request identified by `id`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
data(reqID: number, data: string | Buffer, encoding?: string): boolean;
/**
* (Server-only)
* Sends a name response for the request identified by `id`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
name(reqID: number, names: FileEntry[]): boolean;
/**
* (Server-only)
* Sends an attrs response for the request identified by `id`.
*
* Returns `false` if you should wait for the `continue` event before sending any more traffic.
*/
attrs(reqID: number, attrs: Attributes): boolean;
/**
* (Client/Server)
* Emitted after initial protocol version check has passed.
*/
on(event: "ready", listener: () => void): this;
/**
* (Server-only)
* Emitted when the client requests to open a file.
*
* Respond with:
* * `handle()` - This indicates a successful opening of the file and passes the given handle back to the client to use to refer to this open file for future operations (e.g. reading, writing, closing).
* * `status()` - Use this to indicate a failure to open the requested file.
*/
on(event: "OPEN", listener: (reqID: number, filename: string, flags: number, attrs: InputAttributes) => void): this;
/**
* (Server-only)
* Emitted when the client requests to read data from a file handle.
*
* Respond with:
* * `data()` - Use this to send the requested chunk of data back to the client. The amount of data sent is allowed to be less than the `length` requested.
* * `status()` - Use this to indicate either end of file (`STATUS_CODE.EOF`) has been reached (`offset` is past the end of the file) or if an error occurred while reading the requested part of the file.
*/
on(event: "READ", listener: (reqID: number, handle: Buffer, offset: number, length: number) => void): this;
/**
* (Server-only)
* Emitted when the client requests to write data to a file handle.
*
* Respond with:
* * `status()` - Use this to indicate success/failure of the write to the file.
*/
on(event: "WRITE", listener: (reqID: number, handle: Buffer, offset: number, data: Buffer) => void): this;
/**
* (Server-only)
* Emitted when the client requests attributes for the resource associated with `handle`.
*
* Respond with:
* * `attrs()` - Use this to send the attributes for the requested file/directory back to the client.
* * `status()` - Use this to indicate an error occurred while accessing the file/directory.
*/
on(event: "FSTAT", listener: (reqID: number, handle: Buffer) => void): this;
/**
* (Server-only)
* Emitted when the client requests to write attributes for the resource associated with `handle`.
*
* Respond with:
* * `status()` - Use this to indicates success/failure of the setting of the given file/directory attributes.
*/
on(event: "FSETSTAT", listener: (reqID: number, handle: Buffer, attrs: InputAttributes) => void): this;
/**
* (Server-only)
* Emitted when the client requests to close a handle.
*
* Respond with:
* * `status()` - Use this to indicate success (`STATUS_CODE.OK`) or failure of the closing of the file identified by `handle`.
*/
on(event: "CLOSE", listener: (reqID: number, handle: Buffer) => void): this;
/**
* (Server-only)
* Emitted when the client requests to open a directory.
*
* Respond with:
* * `handle()` - This indicates a successful opening of the directory and passes the given handle back to the client to use to refer to this open directory for future operations (e.g. reading directory contents, closing).
* * `status()` - Use this to indicate a failure to open the requested directory.
*/
on(event: "OPENDIR", listener: (reqID: number, path: string) => void): this;
/**
* (Server-only)
* Emitted when the client requests to read the contents of a directory.
*
* Respond with:
* * `name()` - Use this to send one or more directory listings for the open directory back to the client.
* * `status()` - Use this to indicate either end of directory contents (`STATUS_CODE.EOF`) or if an error occurred while reading the directory contents.
*/
on(event: "READDIR", listener: (reqID: number, handle: Buffer) => void): this;
/**
* (Server-only)
* Emitted when the client requests attributes for a path. If `path` is a symlink, the
* link itself should stat'ed instead of the resource it refers to.
*
* Respond with:
* * `attrs()` - Use this to send the attributes for the requested file/directory back to the client.
* * `status()` - Use this to indicate an error occurred while accessing the file/directory.
*/
on(event: "LSTAT", listener: (reqID: number, path: string) => void): this;
/**
* (Server-only)
* Emitted when the client requests attributes for a path.
*
* Respond with:
* * `attrs()` - Use this to send the attributes for the requested file/directory back to the client.
* * `status()` - Use this to indicate an error occurred while accessing the file/directory.
*/
on(event: "STAT", listener: (reqID: number, path: string) => void): this;
/**
* (Server-only)
* Emitted when the client requests to delete a file or symlink.
*
* Respond with:
* * `status()` - Use this to indicate success/failure of the removal of the file at `path`.
*/
on(event: "REMOVE", listener: (reqID: number, path: string) => void): this;
/**
* (Server-only)
* Emitted when the client requests to remove a directory.
*
* Respond with:
* * `status()` - Use this to indicate success/failure of the removal of the directory at `path`.
*/
on(event: "RMDIR", listener: (reqID: number, path: string) => void): this;
/**
* (Server-only)
* Emitted when the client requests the absolute path for a path.
*
* Respond with:
* * `name()` - Use this to respond with a normalized version of `path`. No file/directory attributes are required to be sent in this response.
* * `status()` - Use this to indicate a failure in normalizing `path`.
*/
on(event: "REALPATH", listener: (reqID: number, path: string) => void): this;
/**
* (Server-only)
* Emitted when the client requests the target for a symlink at `path`.
*
* Respond with:
* * `name()` - Use this to respond with the target of the symlink at `path`. No file/directory attributes are required to be sent in this response.
* * `status()` - Use this to indicate a failure in reading the symlink at `path`.
*/
on(event: "READLINK", listener: (reqID: number, path: string) => void): this;
/**
* (Server-only)
* Emitted when the client requests to set the attributes defined in `attrs` for `path`.
*
* Respond with:
* * `status()` - Use this to indicates success/failure of the setting of the given file/directory attributes.
*/
on(event: "SETSTAT", listener: (reqID: number, path: string, attrs: InputAttributes) => void): this;
/**
* (Server-only)
* Emitted when the client requests a new directory be created.
*
* Respond with:
* * `status()` - Use this to indicate success/failure of the creation of the directory at `path`.
*/
on(event: "MKDIR", listener: (reqID: number, path: string, attrs: InputAttributes) => void): this;
/**
* (Server-only)
* Emitted when the client requests a path be renamed.
*
* Respond with:
* * `status()` - Use this to indicate success/failure of the renaming of the file/directory at `oldPath` to `newPath`.
*/
on(event: "RENAME", listener: (reqID: number, oldPath: string, newPath: string) => void): this;
/**
* (Server-only)
* Emitted when the client requests a new symlink be created for a path.
*
* Respond with:
* * `status()` - Use this to indicate success/failure of the symlink creation.
*/
on(event: "SYMLINK", listener: (reqID: number, linkPath: string, targetPath: string) => void): this;
/**
* Emitted when the socket has disconnected.
*/
on(event: "end", listener: () => void): this;
/**
* Emitted when the client socket was closed.
*/
on(event: "close", listener: () => void): this;
/**
* Emitted when more requests/data can be sent to the stream.
*/
on(event: "continue", listener: () => void): this;
/**
* Emitted when an error occurred.
*/
on(event: "error", listener: (err: any) => void): this;
on(event: string | symbol, listener: Function): this;
}
export namespace SFTPStream {
/**
* Contains the various status codes (for use especially with SFTPStream#status())
*/
export enum STATUS_CODE {
OK = 0,
EOF = 1,
NO_SUCH_FILE = 2,
PERMISSION_DENIED = 3,
FAILURE = 4,
BAD_MESSAGE = 5,
NO_CONNECTION = 6,
CONNECTION_LOST = 7,
OP_UNSUPPORTED = 8
}
/**
* Contains the various open file flags
*/
export enum OPEN_MODE {
READ = 0x00000001,
WRITE = 0x00000002,
APPEND = 0x00000004,
CREAT = 0x00000008,
TRUNC = 0x00000010,
EXCL = 0x00000020
}
}
export interface SFTPStreamConfig {
/**
* Set to true to create an instance in server mode.
*/
server?: boolean;
/**
* This is the highWaterMark to use for the stream.
*/
highWaterMark?: number;
/**
* Set this to a function that receives a single string argument to get detailed (local) debug information.
*/
debug?: (information: string) => any;
}
export interface TransferOptions {
/**
* Number of concurrent reads
*/
concurrency?: number;
/**
* Size of each read in bytes
*/
chunkSize?: number;
/**
* Called every time a part of a file was transferred
*/
step?: (total_transferred: number, chunk: number, total: number) => void;
/**
* Integer or string representing the file mode to set for the uploaded file.
*/
mode?: number | string;
}
export interface ReadStreamOptions {
flags?: string;
encoding?: string;
handle?: Buffer;
mode?: number;
autoClose?: boolean;
start?: number;
end?: number;
}
export interface WriteStreamOptions {
flags?: string;
encoding?: string;
mode?: number;
}
export interface FileEntry {
filename: string;
longname: string;
attrs: Attributes;
}
export interface InputAttributes {
mode?: number | string;
uid?: number;
gid?: number;
size?: number;
atime?: number | Date;
mtime?: number | Date;
}
export interface Attributes {
mode: number;
uid: number;
gid: number;
size: number;
atime: number;
mtime: number;
}
export interface Stats extends Attributes {
isDirectory(): boolean;
isFile(): boolean;
isBlockDevice(): boolean;
isCharacterDevice(): boolean;
isSymbolicLink(): boolean;
isFIFO(): boolean;
isSocket(): boolean;
}
export namespace utils {
export function parseKey(keyData: string | Buffer, passphrase?: string): ParsedKey | {}[];
}
export interface ParsedKey {
type: string;
comment: string;
getPrivatePEM(): string;
getPublicPEM(): string;
getPublicSSH(): string;
sign(data: string | Buffer): Buffer | Error;
verify(data: string | Buffer, signature: Buffer): boolean | Error;
}
export interface ReadFileOptions {
encoding?: string;
flag?: string;
}<|fim▁end|> | export interface ForwardedX11 {
originAddr: string;
originPort: number; |
<|file_name|>ingress.go<|end_file_name|><|fim▁begin|>/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
"net/http"
"path/filepath"
. "github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
)
// IngressUpgradeTest adapts the Ingress e2e for upgrade testing
type IngressUpgradeTest struct {
gceController *framework.GCEIngressController
jig *framework.IngressTestJig
httpClient *http.Client
ip string
ipName string
}
func (IngressUpgradeTest) Name() string { return "ingress-upgrade" }
// Setup creates a GLBC, allocates an ip, and an ingress resource,
// then waits for a successful connectivity check to the ip.
func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
framework.SkipUnlessProviderIs("gce", "gke")
// jig handles all Kubernetes testing logic
jig := framework.NewIngressTestJig(f.ClientSet)
ns := f.Namespace
// gceController handles all cloud testing logic
gceController := &framework.GCEIngressController{
Ns: ns.Name,
Client: jig.Client,
Cloud: framework.TestContext.CloudConfig,
}
gceController.Init()
t.gceController = gceController
t.jig = jig
t.httpClient = framework.BuildInsecureClient(framework.IngressReqTimeout)
// Allocate a static-ip for the Ingress, this IP is cleaned up via CleanupGCEIngressController
t.ipName = fmt.Sprintf("%s-static-ip", ns.Name)
t.ip = t.gceController.CreateStaticIP(t.ipName)
// Create a working basic Ingress
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip))<|fim▁hole|> "kubernetes.io/ingress.allow-http": "false",
})
By("waiting for Ingress to come up with ip: " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", t.ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, t.httpClient, false))
}
// Test waits for the upgrade to complete, and then verifies
// with a connectvity check to the loadbalancer ip.
func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
switch upgrade {
case MasterUpgrade:
// Restarting the ingress controller shouldn't disrupt a steady state
// Ingress. Restarting the ingress controller and deleting ingresses
// while it's down will leak cloud resources, because the ingress
// controller doesn't checkpoint to disk.
t.verify(f, done, true)
default:
// Currently ingress gets disrupted across node upgrade, because endpoints
// get killed and we don't have any guarantees that 2 nodes don't overlap
// their upgrades (even on cloud platforms like GCE, because VM level
// rolling upgrades are not Kubernetes aware).
t.verify(f, done, false)
}
}
// Teardown cleans up any remaining resources.
func (t *IngressUpgradeTest) Teardown(f *framework.Framework) {
if CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(t.gceController.Ns)
}
if t.jig.Ingress != nil {
By("Deleting ingress")
t.jig.DeleteIngress()
} else {
By("No ingress created, no cleanup necessary")
}
By("Cleaning up cloud resources")
framework.CleanupGCEIngressController(t.gceController)
}
func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption {
By("continuously hitting the Ingress IP")
wait.Until(func() {
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", t.ip), "", framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
}, t.jig.PollInterval, done)
} else {
By("waiting for upgrade to finish without checking if Ingress remains up")
<-done
}
By("hitting the Ingress IP " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", t.ip), "", framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
}<|fim▁end|> | jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), ns.Name, map[string]string{
"kubernetes.io/ingress.global-static-ip-name": t.ipName, |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Category(models.Model):
"""
Django 要求模型必须继承 models.Model 类。
Category 只需要一个简单的分类名 name 就可以了。
CharField 指定了分类名 name 的数据类型,CharField 是字符型,
CharField 的 max_length 参数指定其最大长度,超过这个长度的分类名就不能被存入数据库。
当然 Django 还为我们提供了多种其它的数据类型,如日期时间类型 DateTimeField、整数类型 IntegerField 等等。
Django 内置的全部类型可查看文档:
https://docs.djangoproject.com/en/1.10/ref/models/fields/#field-types
"""
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Tag(models.Model):
"""
标签 Tag 也比较简单,和 Category 一样。
再次强调一定要继承 models.Model 类!
"""
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Post(models.Model):
"""
文章的数据库表稍微复杂一点,主要是涉及的字段更多。
"""
# 文章标题
title = models.CharField(max_length=70)
# 阅读量
views = models.PositiveIntegerField(default=0)
# 文章正文,我们使用了 TextField。<|fim▁hole|> body = models.TextField()
# 这两个列分别表示文章的创建时间和最后一次修改时间,存储时间的字段用 DateTimeField 类型。
created_time = models.DateTimeField()
modified_time = models.DateTimeField()
# 文章摘要,可以没有文章摘要,但默认情况下 CharField 要求我们必须存入数据,否则就会报错。
# 指定 CharField 的 blank=True 参数值后就可以允许空值了。
excerpt = models.CharField(max_length=200, blank=True)
# 这是分类与标签,分类与标签的模型我们已经定义在上面。
# 我们在这里把文章对应的数据库表和分类、标签对应的数据库表关联了起来,但是关联形式稍微有点不同。
# 我们规定一篇文章只能对应一个分类,但是一个分类下可以有多篇文章,所以我们使用的是 ForeignKey,即一对多的关联关系。
# 而对于标签来说,一篇文章可以有多个标签,同一个标签下也可能有多篇文章,所以我们使用 ManyToManyField,表明这是多对多的关联关系。
# 同时我们规定文章可以没有标签,因此为标签 tags 指定了 blank=True。
# 如果你对 ForeignKey、ManyToManyField 不了解,请看教程中的解释,亦可参考官方文档:
# https://docs.djangoproject.com/en/1.10/topics/db/models/#relationships
category = models.ForeignKey(
Category,
on_delete=models.CASCADE,
)
tags = models.ManyToManyField(Tag, blank=True)
# 文章作者,这里 User 是从 django.contrib.auth.models 导入的。
# django.contrib.auth 是 Django 内置的应用,专门用于处理网站用户的注册、登录等流程,User 是 Django 为我们已经写好的用户模型。
# 这里我们通过 ForeignKey 把文章和 User 关联了起来。
# 因为我们规定一篇文章只能有一个作者,而一个作者可能会写多篇文章,因此这是一对多的关联关系,和 Category 类似。
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:detail',kwargs={'pk':self.pk})
def increase_views(self):
self.views += 1
self.save(update_fields=['views'])
class Meta:
ordering = ['-created_time']<|fim▁end|> | # 存储比较短的字符串可以使用 CharField,但对于文章的正文来说可能会是一大段文本,因此使用 TextField 来存储大段文本。 |
<|file_name|>ObjectFactory.java<|end_file_name|><|fim▁begin|>package org.hyperimage.connector.fedora3.ws;
import javax.xml.bind.JAXBElement;
import javax.xml.bind.annotation.XmlElementDecl;
import javax.xml.bind.annotation.XmlRegistry;
import javax.xml.namespace.QName;
/**
* This object contains factory methods for each
* Java content interface and Java element interface
* generated in the org.hyperimage.connector.fedora3.ws package.
* <p>An ObjectFactory allows you to programatically
* construct new instances of the Java representation
* for XML content. The Java representation of XML
* content can consist of schema derived interfaces
* and classes representing the binding of schema
* type definitions, element declarations and model
* groups. Factory methods for each of these are
* provided in this class.
*
*/
@XmlRegistry
public class ObjectFactory {
private final static QName _AssetURN_QNAME = new QName("http://connector.ws.hyperimage.org/", "assetURN");
private final static QName _Token_QNAME = new QName("http://connector.ws.hyperimage.org/", "token");
private final static QName _GetAssetPreviewDataResponse_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getAssetPreviewDataResponse");
private final static QName _ParentURN_QNAME = new QName("http://connector.ws.hyperimage.org/", "parentURN");
private final static QName _Username_QNAME = new QName("http://connector.ws.hyperimage.org/", "username");
private final static QName _GetAssetData_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getAssetData");
private final static QName _GetAssetPreviewData_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getAssetPreviewData");
private final static QName _GetHierarchyLevelResponse_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getHierarchyLevelResponse");
private final static QName _Authenticate_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "authenticate");
private final static QName _HIWSLoggedException_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "HIWSLoggedException");
private final static QName _GetMetadataRecord_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getMetadataRecord");
private final static QName _HIWSNotBinaryException_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "HIWSNotBinaryException");
private final static QName _Session_QNAME = new QName("http://connector.ws.hyperimage.org/", "session");
private final static QName _HIWSDCMetadataException_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "HIWSDCMetadataException");
private final static QName _HIWSAuthException_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "HIWSAuthException");
private final static QName _HIWSAssetNotFoundException_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "HIWSAssetNotFoundException");
private final static QName _GetWSVersion_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getWSVersion");
private final static QName _GetMetadataRecordResponse_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getMetadataRecordResponse");
private final static QName _HIWSUTF8EncodingException_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "HIWSUTF8EncodingException");
private final static QName _GetWSVersionResponse_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getWSVersionResponse");
private final static QName _GetReposInfo_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getReposInfo");
private final static QName _HIWSXMLParserException_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "HIWSXMLParserException");
private final static QName _AuthenticateResponse_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "authenticateResponse");
private final static QName _GetAssetDataResponse_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getAssetDataResponse");
private final static QName _GetHierarchyLevel_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getHierarchyLevel");
private final static QName _GetReposInfoResponse_QNAME = new QName("http://fedora3.connector.hyperimage.org/", "getReposInfoResponse");
private final static QName _GetAssetPreviewDataResponseReturn_QNAME = new QName("", "return");
/**
* Create a new ObjectFactory that can be used to create new instances of schema derived classes for package: org.hyperimage.connector.fedora3.ws
*
*/
public ObjectFactory() {
}
/**
* Create an instance of {@link HIWSDCMetadataException }
*
*/
public HIWSDCMetadataException createHIWSDCMetadataException() {
return new HIWSDCMetadataException();
}
/**
* Create an instance of {@link GetAssetDataResponse }
*
*/
public GetAssetDataResponse createGetAssetDataResponse() {
return new GetAssetDataResponse();
}
/**
* Create an instance of {@link HIWSAuthException }
*
*/
public HIWSAuthException createHIWSAuthException() {
return new HIWSAuthException();
}
/**
* Create an instance of {@link HIWSAssetNotFoundException }
*
*/
public HIWSAssetNotFoundException createHIWSAssetNotFoundException() {
return new HIWSAssetNotFoundException();
}
/**
* Create an instance of {@link HIWSNotBinaryException }
*
*/
public HIWSNotBinaryException createHIWSNotBinaryException() {
return new HIWSNotBinaryException();
}
/**
* Create an instance of {@link GetHierarchyLevelResponse }
*
*/
public GetHierarchyLevelResponse createGetHierarchyLevelResponse() {
return new GetHierarchyLevelResponse();
}
/**
* Create an instance of {@link Authenticate }
*
*/
public Authenticate createAuthenticate() {
return new Authenticate();
}
/**
* Create an instance of {@link HiHierarchyLevel }
*
*/
public HiHierarchyLevel createHiHierarchyLevel() {
return new HiHierarchyLevel();
}
/**
* Create an instance of {@link HIWSLoggedException }
*
*/
public HIWSLoggedException createHIWSLoggedException() {
return new HIWSLoggedException();
}
/**
* Create an instance of {@link GetHierarchyLevel }
*
*/
public GetHierarchyLevel createGetHierarchyLevel() {
return new GetHierarchyLevel();
}
/**
* Create an instance of {@link AuthenticateResponse }
*
*/
public AuthenticateResponse createAuthenticateResponse() {
return new AuthenticateResponse();
}
/**
* Create an instance of {@link GetReposInfoResponse }
*
*/
public GetReposInfoResponse createGetReposInfoResponse() {
return new GetReposInfoResponse();
}
/**
* Create an instance of {@link GetAssetPreviewDataResponse }
*
*/
public GetAssetPreviewDataResponse createGetAssetPreviewDataResponse() {
return new GetAssetPreviewDataResponse();
}
/**
* Create an instance of {@link GetWSVersion }
*
*/
public GetWSVersion createGetWSVersion() {
return new GetWSVersion();
}
/**
* Create an instance of {@link GetMetadataRecordResponse }
*
*/
public GetMetadataRecordResponse createGetMetadataRecordResponse() {
return new GetMetadataRecordResponse();
}
/**
* Create an instance of {@link HiMetadataRecord }
*
*/
public HiMetadataRecord createHiMetadataRecord() {
return new HiMetadataRecord();
}
/**
* Create an instance of {@link HiTypedDatastream }
*
*/
public HiTypedDatastream createHiTypedDatastream() {
return new HiTypedDatastream();
}
/**
* Create an instance of {@link HIWSXMLParserException }
*
*/
public HIWSXMLParserException createHIWSXMLParserException() {
return new HIWSXMLParserException();
}
/**
* Create an instance of {@link GetMetadataRecord }
*
*/
public GetMetadataRecord createGetMetadataRecord() {<|fim▁hole|>
/**
* Create an instance of {@link GetAssetPreviewData }
*
*/
public GetAssetPreviewData createGetAssetPreviewData() {
return new GetAssetPreviewData();
}
/**
* Create an instance of {@link HIWSUTF8EncodingException }
*
*/
public HIWSUTF8EncodingException createHIWSUTF8EncodingException() {
return new HIWSUTF8EncodingException();
}
/**
* Create an instance of {@link GetReposInfo }
*
*/
public GetReposInfo createGetReposInfo() {
return new GetReposInfo();
}
/**
* Create an instance of {@link GetWSVersionResponse }
*
*/
public GetWSVersionResponse createGetWSVersionResponse() {
return new GetWSVersionResponse();
}
/**
* Create an instance of {@link GetAssetData }
*
*/
public GetAssetData createGetAssetData() {
return new GetAssetData();
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link String }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://connector.ws.hyperimage.org/", name = "assetURN")
public JAXBElement<String> createAssetURN(String value) {
return new JAXBElement<String>(_AssetURN_QNAME, String.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link String }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://connector.ws.hyperimage.org/", name = "token")
public JAXBElement<String> createToken(String value) {
return new JAXBElement<String>(_Token_QNAME, String.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetAssetPreviewDataResponse }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getAssetPreviewDataResponse")
public JAXBElement<GetAssetPreviewDataResponse> createGetAssetPreviewDataResponse(GetAssetPreviewDataResponse value) {
return new JAXBElement<GetAssetPreviewDataResponse>(_GetAssetPreviewDataResponse_QNAME, GetAssetPreviewDataResponse.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link String }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://connector.ws.hyperimage.org/", name = "parentURN")
public JAXBElement<String> createParentURN(String value) {
return new JAXBElement<String>(_ParentURN_QNAME, String.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link String }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://connector.ws.hyperimage.org/", name = "username")
public JAXBElement<String> createUsername(String value) {
return new JAXBElement<String>(_Username_QNAME, String.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetAssetData }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getAssetData")
public JAXBElement<GetAssetData> createGetAssetData(GetAssetData value) {
return new JAXBElement<GetAssetData>(_GetAssetData_QNAME, GetAssetData.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetAssetPreviewData }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getAssetPreviewData")
public JAXBElement<GetAssetPreviewData> createGetAssetPreviewData(GetAssetPreviewData value) {
return new JAXBElement<GetAssetPreviewData>(_GetAssetPreviewData_QNAME, GetAssetPreviewData.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetHierarchyLevelResponse }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getHierarchyLevelResponse")
public JAXBElement<GetHierarchyLevelResponse> createGetHierarchyLevelResponse(GetHierarchyLevelResponse value) {
return new JAXBElement<GetHierarchyLevelResponse>(_GetHierarchyLevelResponse_QNAME, GetHierarchyLevelResponse.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link Authenticate }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "authenticate")
public JAXBElement<Authenticate> createAuthenticate(Authenticate value) {
return new JAXBElement<Authenticate>(_Authenticate_QNAME, Authenticate.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link HIWSLoggedException }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "HIWSLoggedException")
public JAXBElement<HIWSLoggedException> createHIWSLoggedException(HIWSLoggedException value) {
return new JAXBElement<HIWSLoggedException>(_HIWSLoggedException_QNAME, HIWSLoggedException.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetMetadataRecord }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getMetadataRecord")
public JAXBElement<GetMetadataRecord> createGetMetadataRecord(GetMetadataRecord value) {
return new JAXBElement<GetMetadataRecord>(_GetMetadataRecord_QNAME, GetMetadataRecord.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link HIWSNotBinaryException }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "HIWSNotBinaryException")
public JAXBElement<HIWSNotBinaryException> createHIWSNotBinaryException(HIWSNotBinaryException value) {
return new JAXBElement<HIWSNotBinaryException>(_HIWSNotBinaryException_QNAME, HIWSNotBinaryException.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link String }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://connector.ws.hyperimage.org/", name = "session")
public JAXBElement<String> createSession(String value) {
return new JAXBElement<String>(_Session_QNAME, String.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link HIWSDCMetadataException }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "HIWSDCMetadataException")
public JAXBElement<HIWSDCMetadataException> createHIWSDCMetadataException(HIWSDCMetadataException value) {
return new JAXBElement<HIWSDCMetadataException>(_HIWSDCMetadataException_QNAME, HIWSDCMetadataException.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link HIWSAuthException }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "HIWSAuthException")
public JAXBElement<HIWSAuthException> createHIWSAuthException(HIWSAuthException value) {
return new JAXBElement<HIWSAuthException>(_HIWSAuthException_QNAME, HIWSAuthException.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link HIWSAssetNotFoundException }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "HIWSAssetNotFoundException")
public JAXBElement<HIWSAssetNotFoundException> createHIWSAssetNotFoundException(HIWSAssetNotFoundException value) {
return new JAXBElement<HIWSAssetNotFoundException>(_HIWSAssetNotFoundException_QNAME, HIWSAssetNotFoundException.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetWSVersion }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getWSVersion")
public JAXBElement<GetWSVersion> createGetWSVersion(GetWSVersion value) {
return new JAXBElement<GetWSVersion>(_GetWSVersion_QNAME, GetWSVersion.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetMetadataRecordResponse }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getMetadataRecordResponse")
public JAXBElement<GetMetadataRecordResponse> createGetMetadataRecordResponse(GetMetadataRecordResponse value) {
return new JAXBElement<GetMetadataRecordResponse>(_GetMetadataRecordResponse_QNAME, GetMetadataRecordResponse.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link HIWSUTF8EncodingException }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "HIWSUTF8EncodingException")
public JAXBElement<HIWSUTF8EncodingException> createHIWSUTF8EncodingException(HIWSUTF8EncodingException value) {
return new JAXBElement<HIWSUTF8EncodingException>(_HIWSUTF8EncodingException_QNAME, HIWSUTF8EncodingException.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetWSVersionResponse }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getWSVersionResponse")
public JAXBElement<GetWSVersionResponse> createGetWSVersionResponse(GetWSVersionResponse value) {
return new JAXBElement<GetWSVersionResponse>(_GetWSVersionResponse_QNAME, GetWSVersionResponse.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetReposInfo }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getReposInfo")
public JAXBElement<GetReposInfo> createGetReposInfo(GetReposInfo value) {
return new JAXBElement<GetReposInfo>(_GetReposInfo_QNAME, GetReposInfo.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link HIWSXMLParserException }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "HIWSXMLParserException")
public JAXBElement<HIWSXMLParserException> createHIWSXMLParserException(HIWSXMLParserException value) {
return new JAXBElement<HIWSXMLParserException>(_HIWSXMLParserException_QNAME, HIWSXMLParserException.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link AuthenticateResponse }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "authenticateResponse")
public JAXBElement<AuthenticateResponse> createAuthenticateResponse(AuthenticateResponse value) {
return new JAXBElement<AuthenticateResponse>(_AuthenticateResponse_QNAME, AuthenticateResponse.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetAssetDataResponse }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getAssetDataResponse")
public JAXBElement<GetAssetDataResponse> createGetAssetDataResponse(GetAssetDataResponse value) {
return new JAXBElement<GetAssetDataResponse>(_GetAssetDataResponse_QNAME, GetAssetDataResponse.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetHierarchyLevel }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getHierarchyLevel")
public JAXBElement<GetHierarchyLevel> createGetHierarchyLevel(GetHierarchyLevel value) {
return new JAXBElement<GetHierarchyLevel>(_GetHierarchyLevel_QNAME, GetHierarchyLevel.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link GetReposInfoResponse }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://fedora3.connector.hyperimage.org/", name = "getReposInfoResponse")
public JAXBElement<GetReposInfoResponse> createGetReposInfoResponse(GetReposInfoResponse value) {
return new JAXBElement<GetReposInfoResponse>(_GetReposInfoResponse_QNAME, GetReposInfoResponse.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link byte[]}{@code >}}
*
*/
@XmlElementDecl(namespace = "", name = "return", scope = GetAssetPreviewDataResponse.class)
public JAXBElement<byte[]> createGetAssetPreviewDataResponseReturn(byte[] value) {
return new JAXBElement<byte[]>(_GetAssetPreviewDataResponseReturn_QNAME, byte[].class, GetAssetPreviewDataResponse.class, ((byte[]) value));
}
}<|fim▁end|> | return new GetMetadataRecord();
} |
<|file_name|>1127.js<|end_file_name|><|fim▁begin|>var __v=[
{
"Id": 2257,
"Panel": 1127,
"Name": "刪除 舊 kernel",
"Sort": 0,
"Str": ""
}<|fim▁hole|><|fim▁end|> | ] |
<|file_name|>resources.js<|end_file_name|><|fim▁begin|>define(
({
map: {
error: "Karte kann nicht erstellt werden",
mouseToolTip: "Klicken Sie auf die Karte, um den Service zu überprüfen"
},
geocoder: {<|fim▁hole|> layerNotFound: "Layer ist nicht in der Webkarte enthalten",
fieldNotFound: "Feld nicht gefunden",
popupNotSet: "Pop-up ist für diesen Layer nicht aktiviert",
noLayersSet: "In der Konfiguration wurden keine Layer definiert; das Suchfeld funktioniert nicht"
},
page: {
title: "Informationssuche",
},
splashscreen: {
buttonText: "OK",
},
ui:{
basemapButton: "Grundkarte"
},
popup: {
urlMoreInfo: "Weitere Informationen"
}
})
);<|fim▁end|> | defaultText: "Geben Sie eine Adresse oder einen Point of Interest ein"
},
error: { |
<|file_name|>test_dominating_set.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from nose.tools import ok_
from nose.tools import eq_
import networkx as nx
from networkx.algorithms.approximation import min_weighted_dominating_set
from networkx.algorithms.approximation import min_edge_dominating_set
class TestMinWeightDominatingSet:
def test_min_weighted_dominating_set(self):
graph = nx.Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 5)
graph.add_edge(2, 3)
graph.add_edge(2, 5)
graph.add_edge(3, 4)
graph.add_edge(3, 6)
graph.add_edge(5, 6)
vertices = set([1, 2, 3, 4, 5, 6])
# due to ties, this might be hard to test tight bounds
dom_set = min_weighted_dominating_set(graph)
for vertex in vertices - dom_set:
neighbors = set(graph.neighbors(vertex))
ok_(len(neighbors & dom_set) > 0, "Non dominating set found!")
def test_star_graph(self):
"""Tests that an approximate dominating set for the star graph,
even when the center node does not have the smallest integer
label, gives just the center node.
For more information, see #1527.
"""
# Create a star graph in which the center node has the highest
# label instead of the lowest.
G = nx.star_graph(10)
G = nx.relabel_nodes(G, {0: 9, 9: 0})
eq_(min_weighted_dominating_set(G), {9})
def test_min_edge_dominating_set(self):
graph = nx.path_graph(5)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges_iter():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
ok_(found, "Non adjacent edge found!")
graph = nx.complete_graph(10)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges_iter():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]<|fim▁hole|><|fim▁end|> | ok_(found, "Non adjacent edge found!") |
<|file_name|>highlight.js<|end_file_name|><|fim▁begin|>var Highlight = function() {
/* Utility functions */
function escape(value) {
return value.replace(/&/gm, '&').replace(/</gm, '<').replace(/>/gm, '>');
}
function tag(node) {
return node.nodeName.toLowerCase();
}
function testRe(re, lexeme) {
var match = re && re.exec(lexeme);
return match && match.index == 0;
}
function blockLanguage(block) {
var classes = (block.className + ' ' + (block.parentNode ? block.parentNode.className : '')).split(/\s+/);
classes = classes.map(function(c) {return c.replace(/^lang(uage)?-/, '');});
return classes.filter(function(c) {return getLanguage(c) || /no(-?)highlight/.test(c);})[0];
}
function inherit(parent, obj) {
var result = {};
for (var key in parent)
result[key] = parent[key];
if (obj)
for (var key in obj)
result[key] = obj[key];
return result;
};
/* Stream merging */
function nodeStream(node) {
var result = [];
(function _nodeStream(node, offset) {
for (var child = node.firstChild; child; child = child.nextSibling) {
if (child.nodeType == 3)
offset += child.nodeValue.length;
else if (child.nodeType == 1) {
result.push({
event: 'start',
offset: offset,<|fim▁hole|> });
offset = _nodeStream(child, offset);
result.push({
event: 'stop',
offset: offset,
node: child
});
}
}
return offset;
})(node, 0);
return result;
}
function mergeStreams(original, highlighted, value) {
var processed = 0;
var result = '';
var nodeStack = [];
function selectStream() {
if (!original.length || !highlighted.length) {
return original.length ? original : highlighted;
}
if (original[0].offset != highlighted[0].offset) {
return (original[0].offset < highlighted[0].offset) ? original : highlighted;
}
/*
To avoid starting the stream just before it should stop the order is
ensured that original always starts first and closes last:
if (event1 == 'start' && event2 == 'start')
return original;
if (event1 == 'start' && event2 == 'stop')
return highlighted;
if (event1 == 'stop' && event2 == 'start')
return original;
if (event1 == 'stop' && event2 == 'stop')
return highlighted;
... which is collapsed to:
*/
return highlighted[0].event == 'start' ? original : highlighted;
}
function open(node) {
function attr_str(a) {return ' ' + a.nodeName + '="' + escape(a.value) + '"';}
result += '<' + tag(node) + Array.prototype.map.call(node.attributes, attr_str).join('') + '>';
}
function close(node) {
result += '</' + tag(node) + '>';
}
function render(event) {
(event.event == 'start' ? open : close)(event.node);
}
while (original.length || highlighted.length) {
var stream = selectStream();
result += escape(value.substr(processed, stream[0].offset - processed));
processed = stream[0].offset;
if (stream == original) {
/*
On any opening or closing tag of the original markup we first close
the entire highlighted node stack, then render the original tag along
with all the following original tags at the same offset and then
reopen all the tags on the highlighted stack.
*/
nodeStack.reverse().forEach(close);
do {
render(stream.splice(0, 1)[0]);
stream = selectStream();
} while (stream == original && stream.length && stream[0].offset == processed);
nodeStack.reverse().forEach(open);
} else {
if (stream[0].event == 'start') {
nodeStack.push(stream[0].node);
} else {
nodeStack.pop();
}
render(stream.splice(0, 1)[0]);
}
}
return result + escape(value.substr(processed));
}
/* Initialization */
function compileLanguage(language) {
function reStr(re) {
return (re && re.source) || re;
}
function langRe(value, global) {
return RegExp(
reStr(value),
'm' + (language.case_insensitive ? 'i' : '') + (global ? 'g' : '')
);
}
function compileMode(mode, parent) {
if (mode.compiled)
return;
mode.compiled = true;
mode.keywords = mode.keywords || mode.beginKeywords;
if (mode.keywords) {
var compiled_keywords = {};
var flatten = function(className, str) {
if (language.case_insensitive) {
str = str.toLowerCase();
}
str.split(' ').forEach(function(kw) {
var pair = kw.split('|');
compiled_keywords[pair[0]] = [className, pair[1] ? Number(pair[1]) : 1];
});
};
if (typeof mode.keywords == 'string') { // string
flatten('keyword', mode.keywords);
} else {
Object.keys(mode.keywords).forEach(function (className) {
flatten(className, mode.keywords[className]);
});
}
mode.keywords = compiled_keywords;
}
mode.lexemesRe = langRe(mode.lexemes || /\b[A-Za-z0-9_]+\b/, true);
if (parent) {
if (mode.beginKeywords) {
mode.begin = '\\b(' + mode.beginKeywords.split(' ').join('|') + ')\\b';
}
if (!mode.begin)
mode.begin = /\B|\b/;
mode.beginRe = langRe(mode.begin);
if (!mode.end && !mode.endsWithParent)
mode.end = /\B|\b/;
if (mode.end)
mode.endRe = langRe(mode.end);
mode.terminator_end = reStr(mode.end) || '';
if (mode.endsWithParent && parent.terminator_end)
mode.terminator_end += (mode.end ? '|' : '') + parent.terminator_end;
}
if (mode.illegal)
mode.illegalRe = langRe(mode.illegal);
if (mode.relevance === undefined)
mode.relevance = 1;
if (!mode.contains) {
mode.contains = [];
}
var expanded_contains = [];
mode.contains.forEach(function(c) {
if (c.variants) {
c.variants.forEach(function(v) {expanded_contains.push(inherit(c, v));});
} else {
expanded_contains.push(c == 'self' ? mode : c);
}
});
mode.contains = expanded_contains;
mode.contains.forEach(function(c) {compileMode(c, mode);});
if (mode.starts) {
compileMode(mode.starts, parent);
}
var terminators =
mode.contains.map(function(c) {
return c.beginKeywords ? '\\.?(' + c.begin + ')\\.?' : c.begin;
})
.concat([mode.terminator_end, mode.illegal])
.map(reStr)
.filter(Boolean);
mode.terminators = terminators.length ? langRe(terminators.join('|'), true) : {exec: function(s) {return null;}};
}
compileMode(language);
}
/*
Core highlighting function. Accepts a language name, or an alias, and a
string with the code to highlight. Returns an object with the following
properties:
- relevance (int)
- value (an HTML string with highlighting markup)
*/
function highlight(name, value, ignore_illegals, continuation) {
function subMode(lexeme, mode) {
for (var i = 0; i < mode.contains.length; i++) {
if (testRe(mode.contains[i].beginRe, lexeme)) {
return mode.contains[i];
}
}
}
function endOfMode(mode, lexeme) {
if (testRe(mode.endRe, lexeme)) {
return mode;
}
if (mode.endsWithParent) {
return endOfMode(mode.parent, lexeme);
}
}
function isIllegal(lexeme, mode) {
return !ignore_illegals && testRe(mode.illegalRe, lexeme);
}
function keywordMatch(mode, match) {
var match_str = language.case_insensitive ? match[0].toLowerCase() : match[0];
return mode.keywords.hasOwnProperty(match_str) && mode.keywords[match_str];
}
function buildSpan(classname, insideSpan, leaveOpen, noPrefix) {
var classPrefix = noPrefix ? '' : options.classPrefix,
openSpan = '<span class="' + classPrefix,
closeSpan = leaveOpen ? '' : '</span>';
openSpan += classname + '">';
return openSpan + insideSpan + closeSpan;
}
function processKeywords() {
if (!top.keywords)
return escape(mode_buffer);
var result = '';
var last_index = 0;
top.lexemesRe.lastIndex = 0;
var match = top.lexemesRe.exec(mode_buffer);
while (match) {
result += escape(mode_buffer.substr(last_index, match.index - last_index));
var keyword_match = keywordMatch(top, match);
if (keyword_match) {
relevance += keyword_match[1];
result += buildSpan(keyword_match[0], escape(match[0]));
} else {
result += escape(match[0]);
}
last_index = top.lexemesRe.lastIndex;
match = top.lexemesRe.exec(mode_buffer);
}
return result + escape(mode_buffer.substr(last_index));
}
function processSubLanguage() {
if (top.subLanguage && !languages[top.subLanguage]) {
return escape(mode_buffer);
}
var result = top.subLanguage ? highlight(top.subLanguage, mode_buffer, true, subLanguageTop) : highlightAuto(mode_buffer);
// Counting embedded language score towards the host language may be disabled
// with zeroing the containing mode relevance. Usecase in point is Markdown that
// allows XML everywhere and makes every XML snippet to have a much larger Markdown
// score.
if (top.relevance > 0) {
relevance += result.relevance;
}
if (top.subLanguageMode == 'continuous') {
subLanguageTop = result.top;
}
return buildSpan(result.language, result.value, false, true);
}
function processBuffer() {
return top.subLanguage !== undefined ? processSubLanguage() : processKeywords();
}
function startNewMode(mode, lexeme) {
var markup = mode.className? buildSpan(mode.className, '', true): '';
if (mode.returnBegin) {
result += markup;
mode_buffer = '';
} else if (mode.excludeBegin) {
result += escape(lexeme) + markup;
mode_buffer = '';
} else {
result += markup;
mode_buffer = lexeme;
}
top = Object.create(mode, {parent: {value: top}});
}
function processLexeme(buffer, lexeme) {
mode_buffer += buffer;
if (lexeme === undefined) {
result += processBuffer();
return 0;
}
var new_mode = subMode(lexeme, top);
if (new_mode) {
result += processBuffer();
startNewMode(new_mode, lexeme);
return new_mode.returnBegin ? 0 : lexeme.length;
}
var end_mode = endOfMode(top, lexeme);
if (end_mode) {
var origin = top;
if (!(origin.returnEnd || origin.excludeEnd)) {
mode_buffer += lexeme;
}
result += processBuffer();
do {
if (top.className) {
result += '</span>';
}
relevance += top.relevance;
top = top.parent;
} while (top != end_mode.parent);
if (origin.excludeEnd) {
result += escape(lexeme);
}
mode_buffer = '';
if (end_mode.starts) {
startNewMode(end_mode.starts, '');
}
return origin.returnEnd ? 0 : lexeme.length;
}
if (isIllegal(lexeme, top))
throw new Error('Illegal lexeme "' + lexeme + '" for mode "' + (top.className || '<unnamed>') + '"');
/*
Parser should not reach this point as all types of lexemes should be caught
earlier, but if it does due to some bug make sure it advances at least one
character forward to prevent infinite looping.
*/
mode_buffer += lexeme;
return lexeme.length || 1;
}
var language = getLanguage(name);
if (!language) {
throw new Error('Unknown language: "' + name + '"');
}
compileLanguage(language);
var top = continuation || language;
var subLanguageTop;
var result = '';
for(var current = top; current != language; current = current.parent) {
if (current.className) {
result = buildSpan(current.className, '', true) + result;
}
}
var mode_buffer = '';
var relevance = 0;
try {
var match, count, index = 0;
while (true) {
top.terminators.lastIndex = index;
match = top.terminators.exec(value);
if (!match)
break;
count = processLexeme(value.substr(index, match.index - index), match[0]);
index = match.index + count;
}
processLexeme(value.substr(index));
for(var current = top; current.parent; current = current.parent) { // close dangling modes
if (current.className) {
result += '</span>';
}
};
return {
relevance: relevance,
value: result,
language: name,
top: top
};
} catch (e) {
if (e.message.indexOf('Illegal') != -1) {
return {
relevance: 0,
value: escape(value)
};
} else {
throw e;
}
}
}
/*
Highlighting with language detection. Accepts a string with the code to
highlight. Returns an object with the following properties:
- language (detected language)
- relevance (int)
- value (an HTML string with highlighting markup)
- second_best (object with the same structure for second-best heuristically
detected language, may be absent)
*/
function highlightAuto(text, languageSubset) {
languageSubset = languageSubset || options.languages || Object.keys(languages);
var result = {
relevance: 0,
value: escape(text)
};
var second_best = result;
languageSubset.forEach(function(name) {
if (!getLanguage(name)) {
return;
}
var current = highlight(name, text, false);
current.language = name;
if (current.relevance > second_best.relevance) {
second_best = current;
}
if (current.relevance > result.relevance) {
second_best = result;
result = current;
}
});
if (second_best.language) {
result.second_best = second_best;
}
return result;
}
/*
Post-processing of the highlighted markup:
- replace TABs with something more useful
- replace real line-breaks with '<br>' for non-pre containers
*/
function fixMarkup(value) {
if (options.tabReplace) {
value = value.replace(/^((<[^>]+>|\t)+)/gm, function(match, p1, offset, s) {
return p1.replace(/\t/g, options.tabReplace);
});
}
if (options.useBR) {
value = value.replace(/\n/g, '<br>');
}
return value;
}
/*
Applies highlighting to a DOM node containing code. Accepts a DOM node and
two optional parameters for fixMarkup.
*/
function highlightBlock(block) {
var language = blockLanguage(block);
if (/no(-?)highlight/.test(language))
return;
var node;
if (options.useBR) {
node = document.createElementNS('http://www.w3.org/1999/xhtml', 'div');
node.innerHTML = block.innerHTML.replace(/\n/g, '').replace(/<br[ \/]*>/g, '\n');
} else {
node = block;
}
var text = node.textContent;
var result = language ? highlight(language, text, true) : highlightAuto(text);
var originalStream = nodeStream(node);
if (originalStream.length) {
var resultNode = document.createElementNS('http://www.w3.org/1999/xhtml', 'div');
resultNode.innerHTML = result.value;
result.value = mergeStreams(originalStream, nodeStream(resultNode), text);
}
result.value = fixMarkup(result.value);
block.innerHTML = result.value;
block.className += ' hljs ' + (!language && result.language || '');
block.result = {
language: result.language,
re: result.relevance
};
if (result.second_best) {
block.second_best = {
language: result.second_best.language,
re: result.second_best.relevance
};
}
}
var options = {
classPrefix: 'hljs-',
tabReplace: null,
useBR: false,
languages: undefined
};
/*
Updates highlight.js global options with values passed in the form of an object
*/
function configure(user_options) {
options = inherit(options, user_options);
}
/*
Applies highlighting to all <pre><code>..</code></pre> blocks on a page.
*/
function initHighlighting() {
if (initHighlighting.called)
return;
initHighlighting.called = true;
var blocks = document.querySelectorAll('pre code');
Array.prototype.forEach.call(blocks, highlightBlock);
}
/*
Attaches highlighting to the page load event.
*/
function initHighlightingOnLoad() {
addEventListener('DOMContentLoaded', initHighlighting, false);
addEventListener('load', initHighlighting, false);
}
var languages = {};
var aliases = {};
function registerLanguage(name, language) {
var lang = languages[name] = language(this);
if (lang.aliases) {
lang.aliases.forEach(function(alias) {aliases[alias] = name;});
}
}
function listLanguages() {
return Object.keys(languages);
}
function getLanguage(name) {
return languages[name] || languages[aliases[name]];
}
/* Interface definition */
this.highlight = highlight;
this.highlightAuto = highlightAuto;
this.fixMarkup = fixMarkup;
this.highlightBlock = highlightBlock;
this.configure = configure;
this.initHighlighting = initHighlighting;
this.initHighlightingOnLoad = initHighlightingOnLoad;
this.registerLanguage = registerLanguage;
this.listLanguages = listLanguages;
this.getLanguage = getLanguage;
this.inherit = inherit;
// Common regexps
this.IDENT_RE = '[a-zA-Z][a-zA-Z0-9_]*';
this.UNDERSCORE_IDENT_RE = '[a-zA-Z_][a-zA-Z0-9_]*';
this.NUMBER_RE = '\\b\\d+(\\.\\d+)?';
this.C_NUMBER_RE = '(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)'; // 0x..., 0..., decimal, float
this.BINARY_NUMBER_RE = '\\b(0b[01]+)'; // 0b...
this.RE_STARTERS_RE = '!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~';
// Common modes
this.BACKSLASH_ESCAPE = {
begin: '\\\\[\\s\\S]', relevance: 0
};
this.APOS_STRING_MODE = {
className: 'string',
begin: '\'', end: '\'',
illegal: '\\n',
contains: [this.BACKSLASH_ESCAPE]
};
this.QUOTE_STRING_MODE = {
className: 'string',
begin: '"', end: '"',
illegal: '\\n',
contains: [this.BACKSLASH_ESCAPE]
};
this.PHRASAL_WORDS_MODE = {
begin: /\b(a|an|the|are|I|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such)\b/
};
this.C_LINE_COMMENT_MODE = {
className: 'comment',
begin: '//', end: '$',
contains: [this.PHRASAL_WORDS_MODE]
};
this.C_BLOCK_COMMENT_MODE = {
className: 'comment',
begin: '/\\*', end: '\\*/',
contains: [this.PHRASAL_WORDS_MODE]
};
this.HASH_COMMENT_MODE = {
className: 'comment',
begin: '#', end: '$',
contains: [this.PHRASAL_WORDS_MODE]
};
this.NUMBER_MODE = {
className: 'number',
begin: this.NUMBER_RE,
relevance: 0
};
this.C_NUMBER_MODE = {
className: 'number',
begin: this.C_NUMBER_RE,
relevance: 0
};
this.BINARY_NUMBER_MODE = {
className: 'number',
begin: this.BINARY_NUMBER_RE,
relevance: 0
};
this.CSS_NUMBER_MODE = {
className: 'number',
begin: this.NUMBER_RE + '(' +
'%|em|ex|ch|rem' +
'|vw|vh|vmin|vmax' +
'|cm|mm|in|pt|pc|px' +
'|deg|grad|rad|turn' +
'|s|ms' +
'|Hz|kHz' +
'|dpi|dpcm|dppx' +
')?',
relevance: 0
};
this.REGEXP_MODE = {
className: 'regexp',
begin: /\//, end: /\/[gim]*/,
illegal: /\n/,
contains: [
this.BACKSLASH_ESCAPE,
{
begin: /\[/, end: /\]/,
relevance: 0,
contains: [this.BACKSLASH_ESCAPE]
}
]
};
this.TITLE_MODE = {
className: 'title',
begin: this.IDENT_RE,
relevance: 0
};
this.UNDERSCORE_TITLE_MODE = {
className: 'title',
begin: this.UNDERSCORE_IDENT_RE,
relevance: 0
};
};
module.exports = Highlight;<|fim▁end|> | node: child |
<|file_name|>blob.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::BlobBinding;
use dom::bindings::codegen::Bindings::BlobBinding::BlobMethods;
use dom::bindings::codegen::InheritTypes::FileDerived;
use dom::bindings::error::Fallible;
use dom::bindings::global::{GlobalRef, GlobalField};
use dom::bindings::js::Root;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use num::ToPrimitive;
use std::ascii::AsciiExt;
use std::borrow::ToOwned;
use std::cell::{Cell};
use std::cmp::{min, max};
use std::sync::mpsc::Sender;
use util::str::DOMString;
#[derive(JSTraceable, HeapSizeOf)]
pub enum BlobTypeId {
Blob,
File,
}
// http://dev.w3.org/2006/webapi/FileAPI/#blob
#[dom_struct]
pub struct Blob {
reflector_: Reflector,
type_: BlobTypeId,
bytes: Option<Vec<u8>>,
typeString: DOMString,
global: GlobalField,
isClosed_: Cell<bool>
}
fn is_ascii_printable(string: &DOMString) -> bool {
// Step 5.1 in Sec 5.1 of File API spec
// http://dev.w3.org/2006/webapi/FileAPI/#constructorBlob
return string.chars().all(|c| { c >= '\x20' && c <= '\x7E' })
}
impl Blob {
pub fn new_inherited(global: GlobalRef, type_: BlobTypeId,
bytes: Option<Vec<u8>>, typeString: &str) -> Blob {
Blob {
reflector_: Reflector::new(),
type_: type_,
bytes: bytes,
typeString: typeString.to_owned(),
global: GlobalField::from_rooted(&global),
isClosed_: Cell::new(false)
}
}
pub fn new(global: GlobalRef, bytes: Option<Vec<u8>>,
typeString: &str) -> Root<Blob> {
reflect_dom_object(box Blob::new_inherited(global, BlobTypeId::Blob, bytes, typeString),
global,
BlobBinding::Wrap)
}
// http://dev.w3.org/2006/webapi/FileAPI/#constructorBlob
pub fn Constructor(global: GlobalRef) -> Fallible<Root<Blob>> {
Ok(Blob::new(global, None, ""))
}
// http://dev.w3.org/2006/webapi/FileAPI/#constructorBlob
pub fn Constructor_(global: GlobalRef, blobParts: DOMString,
blobPropertyBag: &BlobBinding::BlobPropertyBag) -> Fallible<Root<Blob>> {
//TODO: accept other blobParts types - ArrayBuffer or ArrayBufferView or Blob
let bytes: Option<Vec<u8>> = Some(blobParts.into_bytes());
let typeString = if is_ascii_printable(&blobPropertyBag.type_) {
&*blobPropertyBag.type_
} else {
""
};
Ok(Blob::new(global, bytes, &typeString.to_ascii_lowercase()))
}
}
impl Blob {
pub fn read_out_buffer(&self, send: Sender<Vec<u8>>) {
send.send(self.bytes.clone().unwrap_or(vec![])).unwrap();
}
}
impl BlobMethods for Blob {
// https://dev.w3.org/2006/webapi/FileAPI/#dfn-size<|fim▁hole|> match self.bytes {
None => 0,
Some(ref bytes) => bytes.len() as u64
}
}
// https://dev.w3.org/2006/webapi/FileAPI/#dfn-type
fn Type(&self) -> DOMString {
self.typeString.clone()
}
// https://dev.w3.org/2006/webapi/FileAPI/#slice-method-algo
fn Slice(&self, start: Option<i64>, end: Option<i64>,
contentType: Option<DOMString>) -> Root<Blob> {
let size: i64 = self.Size().to_i64().unwrap();
let relativeStart: i64 = match start {
None => 0,
Some(start) => {
if start < 0 {
max(size.to_i64().unwrap() + start, 0)
} else {
min(start, size)
}
}
};
let relativeEnd: i64 = match end {
None => size,
Some(end) => {
if end < 0 {
max(size + end, 0)
} else {
min(end, size)
}
}
};
let relativeContentType = match contentType {
None => "".to_owned(),
Some(str) => {
if is_ascii_printable(&str) {
str.to_ascii_lowercase()
} else {
"".to_owned()
}
}
};
let span: i64 = max(relativeEnd - relativeStart, 0);
let global = self.global.root();
match self.bytes {
None => Blob::new(global.r(), None, &relativeContentType),
Some(ref vec) => {
let start = relativeStart.to_usize().unwrap();
let end = (relativeStart + span).to_usize().unwrap();
let mut bytes: Vec<u8> = Vec::new();
bytes.push_all(&vec[start..end]);
Blob::new(global.r(), Some(bytes), &relativeContentType)
}
}
}
// https://dev.w3.org/2006/webapi/FileAPI/#dfn-isClosed
fn IsClosed(&self) -> bool {
self.isClosed_.get()
}
// https://dev.w3.org/2006/webapi/FileAPI/#dfn-close
fn Close(&self) {
// Step 1
if self.isClosed_.get() {
return;
}
// Step 2
self.isClosed_.set(true);
// TODO Step 3 if Blob URL Store is implemented
}
}
impl FileDerived for Blob {
fn is_file(&self) -> bool {
match self.type_ {
BlobTypeId::File => true,
_ => false
}
}
}<|fim▁end|> | fn Size(&self) -> u64 { |
<|file_name|>config.js<|end_file_name|><|fim▁begin|>module.exports = function(grunt){
var config = {
docs: {
dir: "./test",
target: "./doc/TestPlan.md"<|fim▁hole|>};<|fim▁end|> | }
};
return config; |
<|file_name|>auth.js<|end_file_name|><|fim▁begin|>'use strict';
angular.module('cheeperApp')
.controller('AuthCtrl', function ($scope, $http) {
$scope.signin = function() {
$http
.post('http://127.0.0.1:8000/auth-token/', $scope.credentials)
.success(function(data, status, headers, config) {
$scope.token = data.token;<|fim▁hole|> };
});<|fim▁end|> | })
.error(function(data, status, headers, config) {
console.log(data);
}); |
<|file_name|>Telecom.unit.js<|end_file_name|><|fim▁begin|>let EventEmitter = require('events').EventEmitter;<|fim▁hole|>describe("Interface Unit Tests", function () {
it('should create a new interface', function () {
telecom = new Telecom();
expect(telecom).to.be.an.instanceOf(EventEmitter);
expect(telecom).to.have.property('parallelize');
expect(telecom).to.have.property('pipeline');
expect(telecom).to.have.property('isMaster', true);
});
it('should return bundled interfaces', function () {
expect(telecom.interfaces).to.be.an.Object;
expect(telecom.interfaces).to.have.property('TCP');
});
it('should create a new pipeline', function () {
let intf = new telecom.interfaces.TCP(8000);
expect(intf).to.be.an.instanceOf(Interface);
let pipeline = telecom.pipeline(intf);
expect(pipeline).to.be.an.instanceOf(Pipeline);
});
});<|fim▁end|> | let telecom;
|
<|file_name|>countries.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# TGiT, Music Tagger for Professionals
# Copyright (C) 2013 Iconoclaste Musique Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
COUNTRIES = {
"AF": "Afghanistan",
"AX": "Aland Islan",
"AL": "Albania",
"DZ": "Algeria",
"AS": "American Samoa",
"AD": "Andorra",
"AO": "Angola",
"AI": "Anguilla",
"AQ": "Antarctica",
"AG": "Antigua and Barbuda",
"AR": "Argentina",
"AM": "Armenia",
"AW": "Aruba",
"AU": "Australia",
"AT": "Austria",
"AZ": "Azerbaijan",
"BS": "Bahamas",
"BH": "Bahrain",
"BD": "Bangladesh",
"BB": "Barbados",
"BY": "Belarus",
"BE": "Belgium",
"BZ": "Belize",
"BJ": "Benin",
"BM": "Bermuda",
"BT": "Bhutan",
"BO": "Bolivia",
"BA": "Bosnia and Herzegovina",
"BW": "Botswana",
"BV": "Bouvet Island",
"BR": "Brazil",
"VG": "British Virgin Islands",
"IO": "British Indian Ocean Territory",
"BN": "Brunei Darussalam",
"BG": "Bulgaria",
"BF": "Burkina Faso",
"BI": "Burundi",
"KH": "Cambodia",
"CM": "Cameroon",
"CA": "Canada",
"CV": "Cape Verde",
"KY": "Cayman Islands",
"CF": "Central African Republic",
"TD": "Chad",
"CL": "Chile",
"CN": "China",
"HK": "Hong Kong, Special Administrative Region of China",
"MO": "Macao, Special Administrative Region of China",
"CX": "Christmas Island",
"CC": "Cocos (Keeling) Islands",
"CO": "Colombia",
"KM": "Comoros",
"CG": "Congo (Brazzaville)",
"CD": "Congo, Democratic Republic of the",
"CK": "Cook Islands",
"CR": "Costa Rica",
"CI": "Côte d'Ivoire",
"HR": "Croatia",
"CU": "Cuba",
"CY": "Cyprus",
"CZ": "Czech Republic",
"DK": "Denmark",
"DJ": "Djibouti",
"DM": "Dominica",
"DO": "Dominican Republic",
"EC": "Ecuador",
"EG": "Egypt",
"SV": "El Salvador",
"GQ": "Equatorial Guinea",
"ER": "Eritrea",
"EE": "Estonia",
"ET": "Ethiopia",
"FK": "Falkland Islands (Malvinas)",
"FO": "Faroe Islands",
"FJ": "Fiji",
"FI": "Finland",
"FR": "France",
"GF": "French Guiana",
"PF": "French Polynesia",
"TF": "French Southern Territories",
"GA": "Gabon",
"GM": "Gambia",
"GE": "Georgia",
"DE": "Germany",
"GH": "Ghana",
"GI": "Gibraltar",
"GR": "Greece",
"GL": "Greenland",
"GD": "Grenada",
"GP": "Guadeloupe",
"GU": "Guam",
"GT": "Guatemala",
"GG": "Guernsey",
"GN": "Guinea",
"GW": "Guinea-Bissau",
"GY": "Guyana",
"HT": "Haiti",
"HM": "Heard Island and Mcdonald Islands",
"VA": "Holy See (Vatican City State)",
"HN": "Honduras",
"HU": "Hungary",
"IS": "Iceland",
"IN": "India",
"ID": "Indonesia",
"IR": "Iran, Islamic Republic of",
"IQ": "Iraq",
"IE": "Ireland",
"IM": "Isle of Man",
"IL": "Israel",
"IT": "Italy",
"JM": "Jamaica",
"JP": "Japan",
"JE": "Jersey",
"JO": "Jordan",
"KZ": "Kazakhstan",
"KE": "Kenya",
"KI": "Kiribati",
"KP": "Korea, Democratic People's Republic of",
"KR": "Korea, Republic of",
"KW": "Kuwait",
"KG": "Kyrgyzstan",
"LA": "Lao PDR",
"LV": "Latvia",
"LB": "Lebanon",
"LS": "Lesotho",
"LR": "Liberia",
"LY": "Libya",
"LI": "Liechtenstein",
"LT": "Lithuania",
"LU": "Luxembourg",
"MK": "Macedonia, Republic of",
"MG": "Madagascar",
"MW": "Malawi",
"MY": "Malaysia",
"MV": "Maldives",
"ML": "Mali",
"MT": "Malta",
"MH": "Marshall Islands",
"MQ": "Martinique",
"MR": "Mauritania",
"MU": "Mauritius",
"YT": "Mayotte",
"MX": "Mexico",
"FM": "Micronesia, Federated States of",
"MD": "Moldova",
"MC": "Monaco",
"MN": "Mongolia",
"ME": "Montenegro",
"MS": "Montserrat",
"MA": "Morocco",
"MZ": "Mozambique",
"MM": "Myanmar",
"NA": "Namibia",
"NR": "Nauru",
"NP": "Nepal",
"NL": "Netherlands",
"AN": "Netherlands Antilles",
"NC": "New Caledonia",
"NZ": "New Zealand",
"NI": "Nicaragua",
"NE": "Niger",
"NG": "Nigeria",
"NU": "Niue",
"NF": "Norfolk Island",
"MP": "Northern Mariana Islands",
"NO": "Norway",
"OM": "Oman",
"PK": "Pakistan",
"PW": "Palau",
"PS": "Palestinian Territory, Occupied",
"PA": "Panama",
"PG": "Papua New Guinea",
"PY": "Paraguay",
"PE": "Peru",
"PH": "Philippines",
"PN": "Pitcairn",
"PL": "Poland",
"PT": "Portugal",
"PR": "Puerto Rico",
"QA": "Qatar",
"RE": "Réunion",
"RO": "Romania",
"RU": "Russian Federation",
"RW": "Rwanda",
"BL": "Saint-Barthélemy",
"SH": "Saint Helena",
"KN": "Saint Kitts and Nevis",
"LC": "Saint Lucia",
"MF": "Saint-Martin (French part)",
"PM": "Saint Pierre and Miquelon",
"VC": "Saint Vincent and Grenadines",
"WS": "Samoa",
"SM": "San Marino",
"ST": "Sao Tome and Principe",
"SA": "Saudi Arabia",
"SN": "Senegal",
"RS": "Serbia",
"SC": "Seychelles",
"SL": "Sierra Leone",
"SG": "Singapore",
"SK": "Slovakia",
"SI": "Slovenia",
"SB": "Solomon Islands",
"SO": "Somalia",
"ZA": "South Africa",
"GS": "South Georgia and the South Sandwich Islands",
"SS": "South Sudan",
"ES": "Spain",
"LK": "Sri Lanka",
"SD": "Sudan",
"SR": "Suriname",
"SJ": "Svalbard and Jan Mayen Islands",
"SZ": "Swaziland",
"SE": "Sweden",
"CH": "Switzerland",
"SY": "Syrian Arab Republic (Syria)",
"TW": "Taiwan, Republic of China",
"TJ": "Tajikistan",
"TZ": "Tanzania, United Republic of",
"TH": "Thailand",
"TL": "Timor-Leste",
"TG": "Togo",
"TK": "Tokelau",
"TO": "Tonga",
"TT": "Trinidad and Tobago",
"TN": "Tunisia",
"TR": "Turkey",
"TM": "Turkmenistan",
"TC": "Turks and Caicos Islands",
"TV": "Tuvalu",
"UG": "Uganda",
"UA": "Ukraine",
"AE": "United Arab Emirates",
"GB": "United Kingdom",
"US": "United States of America",
"UM": "United States Minor Outlying Islands",
"UY": "Uruguay",
"UZ": "Uzbekistan",
"VU": "Vanuatu",
"VE": "Venezuela (Bolivarian Republic of)",
"VN": "Viet Nam",
"VI": "Virgin Islands, US",
"WF": "Wallis and Futuna Islands",
"EH": "Western Sahara",
"YE": "Yemen",
"ZM": "Zambia",
"ZW": "Zimbabwe"
}
ISO3166_2_A2_TO_ISO3166_2_A3 = {
"AF": "AFG",
"AX": "ALA",
"AL": "ALB",
"DZ": "DZA",
"AS": "ASM",
"AD": "AND",
"AO": "AGO",
"AI": "AIA",
"AQ": "ATA",
"AG": "ATG",
"AR": "ARG",
"AM": "ARM",
"AW": "ABW",
"AU": "AUS",
"AT": "AUT",
"AZ": "AZE",
"BS": "BHS",
"BH": "BHR",
"BD": "BGD",
"BB": "BRB",
"BY": "BLR",
"BE": "BEL",
"BZ": "BLZ",
"BJ": "BEN",
"BM": "BMU",
"BT": "BTN",
"BO": "BOL",
"BA": "BIH",
"BW": "BWA",
"BV": "BVT",
"BR": "BRA",
"VG": "VGB",
"IO": "IOT",
"BN": "BRN",
"BG": "BGR",
"BF": "BFA",
"BI": "BDI",
"KH": "KHM",
"CM": "CMR",
"CA": "CAN",
"CV": "CPV",
"KY": "CYM",
"CF": "CAF",
"TD": "TCD",
"CL": "CHL",
"CN": "CHN",
"HK": "HKG",
"MO": "MAC",
"CX": "CXR",
"CC": "CCK",
"CO": "COL",
"KM": "COM",
"CG": "COG",
"CD": "COD",
"CK": "COK",
"CR": "CRI",
"CI": "CIV",
"HR": "HRV",
"CU": "CUB",
"CY": "CYP",
"CZ": "CZE",
"DK": "DNK",
"DJ": "DJI",
"DM": "DMA",
"DO": "DOM",
"EC": "ECU",
"EG": "EGY",
"SV": "SLV",
"GQ": "GNQ",
"ER": "ERI",
"EE": "EST",
"ET": "ETH",
"FK": "FLK",
"FO": "FRO",
"FJ": "FJI",
"FI": "FIN",
"FR": "FRA",
"GF": "GUF",
"PF": "PYF",
"TF": "ATF",
"GA": "GAB",
"GM": "GMB",
"GE": "GEO",
"DE": "DEU",
"GH": "GHA",
"GI": "GIB",
"GR": "GRC",
"GL": "GRL",
"GD": "GRD",
"GP": "GLP",
"GU": "GUM",
"GT": "GTM",
"GG": "GGY",
"GN": "GIN",
"GW": "GNB",
"GY": "GUY",
"HT": "HTI",
"HM": "HMD",
"VA": "VAT",
"HN": "HND",
"HU": "HUN",
"IS": "ISL",
"IN": "IND",
"ID": "IDN",
"IR": "IRN",
"IQ": "IRQ",
"IE": "IRL",
"IM": "IMN",
"IL": "ISR",
"IT": "ITA",
"JM": "JAM",
"JP": "JPN",
"JE": "JEY",
"JO": "JOR",
"KZ": "KAZ",
"KE": "KEN",
"KI": "KIR",
"KP": "PRK",
"KR": "KOR",
"KW": "KWT",
"KG": "KGZ",
"LA": "LAO",
"LV": "LVA",
"LB": "LBN",
"LS": "LSO",
"LR": "LBR",
"LY": "LBY",
"LI": "LIE",
"LT": "LTU",
"LU": "LUX",
"MK": "MKD",
"MG": "MDG",
"MW": "MWI",
"MY": "MYS",
"MV": "MDV",
"ML": "MLI",
"MT": "MLT",
"MH": "MHL",
"MQ": "MTQ",
"MR": "MRT",
"MU": "MUS",
"YT": "MYT",
"MX": "MEX",
"FM": "FSM",
"MD": "MDA",
"MC": "MCO",
"MN": "MNG",
"ME": "MNE",
"MS": "MSR",
"MA": "MAR",
"MZ": "MOZ",
"MM": "MMR",
"NA": "NAM",
"NR": "NRU",
"NP": "NPL",
"NL": "NLD",
"AN": "ANT",
"NC": "NCL",
"NZ": "NZL",
"NI": "NIC",
"NE": "NER",
"NG": "NGA",
"NU": "NIU",
"NF": "NFK",
"MP": "MNP",
"NO": "NOR",
"OM": "OMN",
"PK": "PAK",
"PW": "PLW",
"PS": "PSE",
"PA": "PAN",
"PG": "PNG",
"PY": "PRY",
"PE": "PER",
"PH": "PHL",
"PN": "PCN",
"PL": "POL",
"PT": "PRT",
"PR": "PRI",
"QA": "QAT",
"RE": "REU",
"RO": "ROU",
"RU": "RUS",
"RW": "RWA",
"BL": "BLM",
"SH": "SHN",
"KN": "KNA",
"LC": "LCA",
"MF": "MAF",
"PM": "SPM",
"VC": "VCT",
"WS": "WSM",
"SM": "SMR",
"ST": "STP",
"SA": "SAU",
"SN": "SEN",
"RS": "SRB",
"SC": "SYC",
"SL": "SLE",
"SG": "SGP",
"SK": "SVK",
"SI": "SVN",
"SB": "SLB",
"SO": "SOM",
"ZA": "ZAF",
"GS": "SGS",
"SS": "SSD",
"ES": "ESP",
"LK": "LKA",
"SD": "SDN",
<|fim▁hole|> "SR": "SUR",
"SJ": "SJM",
"SZ": "SWZ",
"SE": "SWE",
"CH": "CHE",
"SY": "SYR",
"TW": "TWN",
"TJ": "TJK",
"TZ": "TZA",
"TH": "THA",
"TL": "TLS",
"TG": "TGO",
"TK": "TKL",
"TO": "TON",
"TT": "TTO",
"TN": "TUN",
"TR": "TUR",
"TM": "TKM",
"TC": "TCA",
"TV": "TUV",
"UG": "UGA",
"UA": "UKR",
"AE": "ARE",
"GB": "GBR",
"US": "USA",
"UM": "UMI",
"UY": "URY",
"UZ": "UZB",
"VU": "VUT",
"VE": "VEN",
"VN": "VNM",
"VI": "VIR",
"WF": "WLF",
"EH": "ESH",
"YE": "YEM",
"ZM": "ZMB",
"ZW": "ZWE",
}<|fim▁end|> | |
<|file_name|>product_product.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from odoo import api, fields, models
class ProductProduct(models.Model):
_inherit = "product.product"
date_from = fields.Date(compute='_compute_product_margin_fields_values', string='Margin Date From')
date_to = fields.Date(compute='_compute_product_margin_fields_values', string='Margin Date To')
invoice_state = fields.Selection(compute='_compute_product_margin_fields_values',
selection=[
('paid', 'Paid'),
('open_paid', 'Open and Paid'),
('draft_open_paid', 'Draft, Open and Paid')
], string='Invoice State', readonly=True)
sale_avg_price = fields.Float(compute='_compute_product_margin_fields_values', string='Avg. Sale Unit Price',
help="Avg. Price in Customer Invoices.")
purchase_avg_price = fields.Float(compute='_compute_product_margin_fields_values', string='Avg. Purchase Unit Price',
help="Avg. Price in Vendor Bills ")
sale_num_invoiced = fields.Float(compute='_compute_product_margin_fields_values', string='# Invoiced in Sale',
help="Sum of Quantity in Customer Invoices")
purchase_num_invoiced = fields.Float(compute='_compute_product_margin_fields_values', string='# Invoiced in Purchase',
help="Sum of Quantity in Vendor Bills")
sales_gap = fields.Float(compute='_compute_product_margin_fields_values', string='Sales Gap',
help="Expected Sale - Turn Over")
purchase_gap = fields.Float(compute='_compute_product_margin_fields_values', string='Purchase Gap',
help="Normal Cost - Total Cost")
turnover = fields.Float(compute='_compute_product_margin_fields_values', string='Turnover',
help="Sum of Multiplication of Invoice price and quantity of Customer Invoices")
total_cost = fields.Float(compute='_compute_product_margin_fields_values', string='Total Cost',
help="Sum of Multiplication of Invoice price and quantity of Vendor Bills ")
sale_expected = fields.Float(compute='_compute_product_margin_fields_values', string='Expected Sale',
help="Sum of Multiplication of Sale Catalog price and quantity of Customer Invoices")
normal_cost = fields.Float(compute='_compute_product_margin_fields_values', string='Normal Cost',
help="Sum of Multiplication of Cost price and quantity of Vendor Bills")
total_margin = fields.Float(compute='_compute_product_margin_fields_values', string='Total Margin',
help="Turnover - Standard price")
expected_margin = fields.Float(compute='_compute_product_margin_fields_values', string='Expected Margin',
help="Expected Sale - Normal Cost")
total_margin_rate = fields.Float(compute='_compute_product_margin_fields_values', string='Total Margin Rate(%)',
help="Total margin * 100 / Turnover")
expected_margin_rate = fields.Float(compute='_compute_product_margin_fields_values', string='Expected Margin (%)',
help="Expected margin * 100 / Expected Sale")
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
"""
Inherit read_group to calculate the sum of the non-stored fields, as it is not automatically done anymore through the XML.
"""
res = super(ProductProduct, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
fields_list = ['turnover', 'sale_avg_price', 'sale_purchase_price', 'sale_num_invoiced', 'purchase_num_invoiced',
'sales_gap', 'purchase_gap', 'total_cost', 'sale_expected', 'normal_cost', 'total_margin',
'expected_margin', 'total_margin_rate', 'expected_margin_rate']
if any(x in fields for x in fields_list):
# Calculate first for every product in which line it needs to be applied
re_ind = 0
prod_re = {}
tot_products = self.browse([])
for re in res:
if re.get('__domain'):
products = self.search(re['__domain'])
tot_products |= products
for prod in products:
prod_re[prod.id] = re_ind
re_ind += 1
res_val = tot_products._compute_product_margin_fields_values(field_names=[x for x in fields if fields in fields_list])
for key in res_val:
for l in res_val[key]:
re = res[prod_re[key]]
if re.get(l):
re[l] += res_val[key][l]
else:
re[l] = res_val[key][l]
return res
def _compute_product_margin_fields_values(self, field_names=None):
res = {}
if field_names is None:
field_names = []
for val in self:
res[val.id] = {}
date_from = self.env.context.get('date_from', time.strftime('%Y-01-01'))
date_to = self.env.context.get('date_to', time.strftime('%Y-12-31'))
invoice_state = self.env.context.get('invoice_state', 'open_paid')
res[val.id]['date_from'] = date_from
res[val.id]['date_to'] = date_to
res[val.id]['invoice_state'] = invoice_state
states = ()
payment_states = ()
if invoice_state == 'paid':
states = ('posted',)
payment_states = ('paid',)
elif invoice_state == 'open_paid':
states = ('posted',)
payment_states = ('not_paid', 'paid')
elif invoice_state == 'draft_open_paid':
states = ('posted', 'draft')
payment_states = ('not_paid', 'paid')
company_id = self.env.company.id
#Cost price is calculated afterwards as it is a property
self.env['account.move.line'].flush(['price_unit', 'quantity', 'balance', 'product_id', 'display_type'])
self.env['account.move'].flush(['state', 'payment_state', 'move_type', 'invoice_date', 'company_id'])
self.env['product.template'].flush(['list_price'])
sqlstr = """
WITH currency_rate AS ({})
SELECT
SUM(l.price_unit / (CASE COALESCE(cr.rate, 0) WHEN 0 THEN 1.0 ELSE cr.rate END) * l.quantity) / NULLIF(SUM(l.quantity),0) AS avg_unit_price,
SUM(l.quantity * (CASE WHEN i.move_type IN ('out_invoice', 'in_invoice') THEN 1 ELSE -1 END)) AS num_qty,
SUM(ABS(l.balance) * (CASE WHEN i.move_type IN ('out_invoice', 'in_invoice') THEN 1 ELSE -1 END)) AS total,
SUM(l.quantity * pt.list_price * (CASE WHEN i.move_type IN ('out_invoice', 'in_invoice') THEN 1 ELSE -1 END)) AS sale_expected
FROM account_move_line l
LEFT JOIN account_move i ON (l.move_id = i.id)
LEFT JOIN product_product product ON (product.id=l.product_id)
LEFT JOIN product_template pt ON (pt.id = product.product_tmpl_id)
left join currency_rate cr on
(cr.currency_id = i.currency_id and
cr.company_id = i.company_id and
cr.date_start <= COALESCE(i.invoice_date, NOW()) and
(cr.date_end IS NULL OR cr.date_end > COALESCE(i.invoice_date, NOW())))
WHERE l.product_id = %s
AND i.state IN %s
AND i.payment_state IN %s
AND i.move_type IN %s
AND i.invoice_date BETWEEN %s AND %s
AND i.company_id = %s
AND l.display_type IS NULL
AND l.exclude_from_invoice_tab = false
""".format(self.env['res.currency']._select_companies_rates())
invoice_types = ('out_invoice', 'out_refund')
self.env.cr.execute(sqlstr, (val.id, states, payment_states, invoice_types, date_from, date_to, company_id))
result = self.env.cr.fetchall()[0]
res[val.id]['sale_avg_price'] = result[0] and result[0] or 0.0
res[val.id]['sale_num_invoiced'] = result[1] and result[1] or 0.0
res[val.id]['turnover'] = result[2] and result[2] or 0.0
res[val.id]['sale_expected'] = result[3] and result[3] or 0.0
res[val.id]['sales_gap'] = res[val.id]['sale_expected'] - res[val.id]['turnover']
invoice_types = ('in_invoice', 'in_refund')
self.env.cr.execute(sqlstr, (val.id, states, payment_states, invoice_types, date_from, date_to, company_id))<|fim▁hole|> res[val.id]['normal_cost'] = val.standard_price * res[val.id]['purchase_num_invoiced']
res[val.id]['purchase_gap'] = res[val.id]['normal_cost'] - res[val.id]['total_cost']
res[val.id]['total_margin'] = res[val.id]['turnover'] - res[val.id]['total_cost']
res[val.id]['expected_margin'] = res[val.id]['sale_expected'] - res[val.id]['normal_cost']
res[val.id]['total_margin_rate'] = res[val.id]['turnover'] and res[val.id]['total_margin'] * 100 / res[val.id]['turnover'] or 0.0
res[val.id]['expected_margin_rate'] = res[val.id]['sale_expected'] and res[val.id]['expected_margin'] * 100 / res[val.id]['sale_expected'] or 0.0
for k, v in res[val.id].items():
setattr(val, k, v)
return res<|fim▁end|> | result = self.env.cr.fetchall()[0]
res[val.id]['purchase_avg_price'] = result[0] and result[0] or 0.0
res[val.id]['purchase_num_invoiced'] = result[1] and result[1] or 0.0
res[val.id]['total_cost'] = result[2] and result[2] or 0.0 |
<|file_name|>hash.rs<|end_file_name|><|fim▁begin|>#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::hash::Hash;
use core::hash::Hasher;
use core::hash::SipHasher;
// pub struct SipHasher {
// k0: u64,
// k1: u64,
// length: usize, // how many bytes we've processed
// v0: u64, // hash state
// v1: u64,
// v2: u64,
// v3: u64,
// tail: u64, // unprocessed bytes le
// ntail: usize, // how many bytes in tail are valid
// }
// macro_rules! u8to64_le {
// ($buf:expr, $i:expr) =>
// ($buf[0+$i] as u64 |
// ($buf[1+$i] as u64) << 8 |
// ($buf[2+$i] as u64) << 16 |
// ($buf[3+$i] as u64) << 24 |
// ($buf[4+$i] as u64) << 32 |
// ($buf[5+$i] as u64) << 40 |
// ($buf[6+$i] as u64) << 48 |
// ($buf[7+$i] as u64) << 56);
// ($buf:expr, $i:expr, $len:expr) =>
// ({
// let mut t = 0;
// let mut out = 0;
// while t < $len {
// out |= ($buf[t+$i] as u64) << t*8;
// t += 1;
// }
// out
// });
// }
// macro_rules! rotl {
// ($x:expr, $b:expr) =>
// (($x << $b) | ($x >> (64_i32.wrapping_sub($b))))
// }
// macro_rules! compress {
// ($v0:expr, $v1:expr, $v2:expr, $v3:expr) =>
// ({
// $v0 = $v0.wrapping_add($v1); $v1 = rotl!($v1, 13); $v1 ^= $v0;
// $v0 = rotl!($v0, 32);
// $v2 = $v2.wrapping_add($v3); $v3 = rotl!($v3, 16); $v3 ^= $v2;
// $v0 = $v0.wrapping_add($v3); $v3 = rotl!($v3, 21); $v3 ^= $v0;
// $v2 = $v2.wrapping_add($v1); $v1 = rotl!($v1, 17); $v1 ^= $v2;
// $v2 = rotl!($v2, 32);
// })
// }
// impl SipHasher {
// /// Creates a new `SipHasher` with the two initial keys set to 0.
// #[inline]
// #[stable(feature = "rust1", since = "1.0.0")]
// pub fn new() -> SipHasher {
// SipHasher::new_with_keys(0, 0)
// }
//
// /// Creates a `SipHasher` that is keyed off the provided keys.
// #[inline]
// #[stable(feature = "rust1", since = "1.0.0")]
// pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
// let mut state = SipHasher {
// k0: key0,
// k1: key1,
// length: 0,
// v0: 0,
// v1: 0,
// v2: 0,
// v3: 0,
// tail: 0,
// ntail: 0,
// };
// state.reset();
// state
// }
//
// #[inline]
// fn reset(&mut self) {
// self.length = 0;
// self.v0 = self.k0 ^ 0x736f6d6570736575;
// self.v1 = self.k1 ^ 0x646f72616e646f6d;
// self.v2 = self.k0 ^ 0x6c7967656e657261;
// self.v3 = self.k1 ^ 0x7465646279746573;
// self.ntail = 0;
// }
//
// #[inline]
// fn write(&mut self, msg: &[u8]) {
// let length = msg.len();
// self.length += length;
//
// let mut needed = 0;
//
// if self.ntail != 0 {
// needed = 8 - self.ntail;
// if length < needed {
// self.tail |= u8to64_le!(msg, 0, length) << 8*self.ntail;
// self.ntail += length;
// return
// }
//
// let m = self.tail | u8to64_le!(msg, 0, needed) << 8*self.ntail;
//
// self.v3 ^= m;
// compress!(self.v0, self.v1, self.v2, self.v3);
// compress!(self.v0, self.v1, self.v2, self.v3);
// self.v0 ^= m;
//
// self.ntail = 0;
// }
//
// // Buffered tail is now flushed, process new input.
// let len = length - needed;
// let end = len & (!0x7);
// let left = len & 0x7;
//
// let mut i = needed;
// while i < end {
// let mi = u8to64_le!(msg, i);
//
// self.v3 ^= mi;
// compress!(self.v0, self.v1, self.v2, self.v3);
// compress!(self.v0, self.v1, self.v2, self.v3);
// self.v0 ^= mi;
//
// i += 8;
// }
//
// self.tail = u8to64_le!(msg, i, left);
// self.ntail = left;
// }
// }
// impl Hasher for SipHasher {
// #[inline]
// fn write(&mut self, msg: &[u8]) {
// self.write(msg)
// }
//
// #[inline]
// fn finish(&self) -> u64 {
// let mut v0 = self.v0;
// let mut v1 = self.v1;
// let mut v2 = self.v2;
// let mut v3 = self.v3;
//
// let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
//
// v3 ^= b;
// compress!(v0, v1, v2, v3);
// compress!(v0, v1, v2, v3);<|fim▁hole|> // compress!(v0, v1, v2, v3);
// compress!(v0, v1, v2, v3);
// compress!(v0, v1, v2, v3);
//
// v0 ^ v1 ^ v2 ^ v3
// }
// }
// pub trait Hash {
// /// Feeds this value into the state given, updating the hasher as necessary.
// #[stable(feature = "rust1", since = "1.0.0")]
// fn hash<H: Hasher>(&self, state: &mut H);
//
// /// Feeds a slice of this type into the state provided.
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn hash_slice<H: Hasher>(data: &[Self], state: &mut H) where Self: Sized {
// for piece in data {
// piece.hash(state);
// }
// }
// }
// pub trait Hasher {
// /// Completes a round of hashing, producing the output hash generated.
// #[stable(feature = "rust1", since = "1.0.0")]
// fn finish(&self) -> u64;
//
// /// Writes some data into this `Hasher`
// #[stable(feature = "rust1", since = "1.0.0")]
// fn write(&mut self, bytes: &[u8]);
//
// /// Write a single `u8` into this hasher
// #[inline]
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn write_u8(&mut self, i: u8) { self.write(&[i]) }
// /// Write a single `u16` into this hasher.
// #[inline]
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn write_u16(&mut self, i: u16) {
// self.write(&unsafe { mem::transmute::<_, [u8; 2]>(i) })
// }
// /// Write a single `u32` into this hasher.
// #[inline]
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn write_u32(&mut self, i: u32) {
// self.write(&unsafe { mem::transmute::<_, [u8; 4]>(i) })
// }
// /// Write a single `u64` into this hasher.
// #[inline]
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn write_u64(&mut self, i: u64) {
// self.write(&unsafe { mem::transmute::<_, [u8; 8]>(i) })
// }
// /// Write a single `usize` into this hasher.
// #[inline]
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn write_usize(&mut self, i: usize) {
// if cfg!(target_pointer_width = "32") {
// self.write_u32(i as u32)
// } else {
// self.write_u64(i as u64)
// }
// }
//
// /// Write a single `i8` into this hasher.
// #[inline]
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn write_i8(&mut self, i: i8) { self.write_u8(i as u8) }
// /// Write a single `i16` into this hasher.
// #[inline]
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn write_i16(&mut self, i: i16) { self.write_u16(i as u16) }
// /// Write a single `i32` into this hasher.
// #[inline]
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn write_i32(&mut self, i: i32) { self.write_u32(i as u32) }
// /// Write a single `i64` into this hasher.
// #[inline]
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn write_i64(&mut self, i: i64) { self.write_u64(i as u64) }
// /// Write a single `isize` into this hasher.
// #[inline]
// #[unstable(feature = "hash", reason = "module was recently redesigned")]
// fn write_isize(&mut self, i: isize) { self.write_usize(i as usize) }
// }
// macro_rules! impl_write {
// ($(($ty:ident, $meth:ident),)*) => {$(
// #[stable(feature = "rust1", since = "1.0.0")]
// impl Hash for $ty {
// fn hash<H: Hasher>(&self, state: &mut H) {
// state.$meth(*self)
// }
//
// fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
// // FIXME(#23542) Replace with type ascription.
// #![allow(trivial_casts)]
// let newlen = data.len() * ::$ty::BYTES;
// let ptr = data.as_ptr() as *const u8;
// state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
// }
// }
// )*}
// }
// impl_write! {
// (u8, write_u8),
// (u16, write_u16),
// (u32, write_u32),
// (u64, write_u64),
// (usize, write_usize),
// (i8, write_i8),
// (i16, write_i16),
// (i32, write_i32),
// (i64, write_i64),
// (isize, write_isize),
// }
type H = SipHasher; // H: Hasher
#[test]
fn hash_test1() {
let mut state: H = <H>::new();
let finish: u64 = state.finish();
assert_eq!(finish, 0x1e924b9d737700d7);
let data: i64 = 0x0102030405060708;
data.hash::<H>(&mut state);
let finish: u64 = state.finish();
assert_eq!(finish, 0x6e30be5baa0e07fe);
}
}<|fim▁end|> | // v0 ^= b;
//
// v2 ^= 0xff;
// compress!(v0, v1, v2, v3); |
<|file_name|>StandardCountCalculatingTransformer.java<|end_file_name|><|fim▁begin|>package org.seasar.doma.internal.jdbc.dialect;
import org.seasar.doma.internal.jdbc.sql.SimpleSqlNodeVisitor;
import org.seasar.doma.internal.jdbc.sql.node.AnonymousNode;
import org.seasar.doma.jdbc.SqlNode;
public class StandardCountCalculatingTransformer extends SimpleSqlNodeVisitor<SqlNode, Void> {
protected boolean processed;
public SqlNode transform(SqlNode sqlNode) {
AnonymousNode result = new AnonymousNode();
for (SqlNode child : sqlNode.getChildren()) {
result.appendNode(child.accept(this, null));
}
return result;
}
@Override<|fim▁hole|> return node;
}
}<|fim▁end|> | protected SqlNode defaultAction(SqlNode node, Void p) { |
<|file_name|>labels.py<|end_file_name|><|fim▁begin|>import hashlib
import requests
import threading
import json
import sys
import traceback
import base64
import electrum_vtc as electrum
from electrum_vtc.plugins import BasePlugin, hook
from electrum_vtc.i18n import _
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.bauerj.eu'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,
msg.encode('utf8'))
return base64.b64encode(encrypted)
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]<|fim▁hole|>
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.print_error("set", wallet.basename(), "nonce to", nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if not wallet in self.wallets:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise BaseException(response.status_code, response.text)
response = response.json()
if "error" in response:
raise BaseException(response["error"])
return response
def push_thread(self, wallet):
wallet_id = self.wallets[wallet][2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.iteritems():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.print_error('cannot encode', repr(key), repr(value))
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_id = self.wallets[wallet][2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.print_error("asking for labels since nonce", nonce)
try:
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
self.print_error('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('error: no json', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.print_error("received %d labels" % len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.print_error("could not retrieve labels")
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
self.print_error("wallet", wallet.basename(), "nonce is", nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
password = hashlib.sha1(mpk).digest().encode('hex')[:32]
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).digest().encode('hex')
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)<|fim▁end|> | decoded = base64.b64decode(message)
decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8') |
<|file_name|>device-information-service.js<|end_file_name|><|fim▁begin|>var util = require('util');
var bleno = require('bleno');
var BlenoPrimaryService = bleno.PrimaryService;
<|fim▁hole|>var ParkCharacteristic = require('./parkCharacteristic');
function DeviceInformationService() {
DeviceInformationService.super_.call(this, {
uuid: 'ec00',
characteristics: [
new ParkCharacteristic()
]
});
}
util.inherits(DeviceInformationService, BlenoPrimaryService);
module.exports = DeviceInformationService;<|fim▁end|> | |
<|file_name|>performance_test.py<|end_file_name|><|fim▁begin|>import copy
from nive.utils.dataPool2.mysql.tests import test_MySql
try:
from nive.utils.dataPool2.mysql.mySqlPool import *
except:
pass
from . import test_db
from nive.utils.dataPool2.sqlite.sqlite3Pool import *
mode = "mysql"
printed = [""]
def print_(*kw):
if type(kw)!=type(""):
v = ""
for a in kw:
v += " "+str(a)
else:
v = kw
if v == "":
print(".",)
printed.append("")
else:
printed[-1] += v
def getConnection():
if mode == "mysql":
c = MySqlConn(test_MySql.conn, 0)
print_( "MySQL -")
elif mode == "mysqlinno":
c = test_MySql.conn
c["dbName"] = "ut_dataPool2inno"
c = MySqlConn(c, 0)
print_( "MySQL InnoDB -")
else:
c = Sqlite3Conn(test_db.conn, 0)
print_( "Sqlite 3 -")
return c
def getPool():
if mode == "mysql":
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
pool.CreateConnection(test_MySql.conn)
print_( "MySQL -")
elif mode == "mysqlinno":
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
c = test_MySql.conn
c["dbName"] = "ut_dataPool2inno"
pool.CreateConnection(c)
print_( "MySQL InnoDB -")
else:
pool = Sqlite3(test_db.conf)
pool.SetStdMeta(copy.copy(test_db.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_db.struct)
pool.CreateConnection(test_db.conn)
print_( "Sqlite 3 -")
return pool
def empty():
#if mode == "mysql":
# test_MySql.emptypool()
#elif mode == "mysqlinno":
# test_MySql.emptypool()
#else:
# t_db.emptypool()
pass
def connects(n):
c = getConnection()
print_( "Connection: ")
t = time.time()
for i in range(0,n):
c.connect()
c.Close()
t2 = time.time()
print_( n, " connects in ", t2-t, "secs. ", (t2-t)/n, " per connect")
print_()
def cursors(n):
c = getConnection()
c.connect()
print_( "Cursor: ")
t = time.time()
for i in range(0,n):
cu = c.cursor()
cu.close()
t2 = time.time()
c.Close()
print_( n, " cursors in ", t2-t, "secs. ", (t2-t)/n, " per cursor")
print_()
def createsql(n):
pool = getPool()
print_( "Create SQL: ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"pool_type": "data1", "ftext": "", "fnumber": 3},
sort = "title, id, fnumber",
ascending = 0,
dataTable = "data1",
operators={"pool_type":"=", "ftext": "<>", "fnumber": ">"},
start=1,
max=123)
t2 = time.time()
pool.Close()
print_( n, " sql statements in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery1(n, start):
pool = getPool()
print_( "SQL Query data+meta (join no index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"pool_type": "data1", "ftext": "123", "fnumber": i+start},
sort = "title, id, fnumber",
ascending = 0,
dataTable = "data1",
operators={"pool_type":"=", "ftext": "LIKE", "fnumber": "!="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery2(n, start):
pool = getPool()
print_( "SQL Query data+meta=id (join index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"id": i+start},
sort = "title",
ascending = 0,
dataTable = "data1",
operators={},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery3(n, start):
pool = getPool()
print_( "SQL Query meta=id (index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i},
sort = "id",
ascending = 0,
dataTable = "pool_meta",
singleTable = 1,
operators={},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery4(n, start):
pool = getPool()
print_( "SQL Query meta=id+pool_type=data1 (index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i, "pool_type": "data1"},
sort = "id",
ascending = 0,
dataTable = "pool_meta",
singleTable = 1,
operators={"pool_type": "="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery5(n, start):
pool = getPool()
print_( "SQL Query meta=id+pool_type=data1+data.funit (join index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i, "pool_type": "data1", "funit": 35},
sort = "id",
ascending = 0,
dataTable = "data1",
operators={"pool_type": "="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery6(n):
pool = getPool()
print_( "SQL Query filename (text index): ")
t = time.time()
for i in range(0,n):
files = pool.SearchFilename("file1xxx.txt")
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def createentries(n):
pool = getPool()
print_( "Create entries (nodb): ")
t = time.time()
for i in range(0,n):
e=pool._GetPoolEntry(i, version=None, datatbl="data1", preload="skip", virtual=True)
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def checkentries(n):
pool = getPool()
print_( "Create entries (nodb) and check exists: ")
t = time.time()
for i in range(0,n):
e=pool._GetPoolEntry(i, version=None, datatbl="data1", preload="skip", virtual=True)
e.Exists()
t2 = time.time()
pool.Close()
print_( n, " checks in ", t2-t, "secs. ", (t2-t)/n, " per check")
print_()
<|fim▁hole|> pool = getPool()
print_( "Create entries (nodata): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
#e.data.update(data1_1)
#e.meta.update(meta1)
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def createentries3(n):
pool = getPool()
print_( "Create entries (data+meta): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
if i==0: id = e.GetID()
e.data.update(test_MySql.data1_1)
e.meta.update(test_MySql.meta1)
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
return id
def createentries4(n):
pool = getPool()
print_( "Create entries (data+meta+file): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
if i==0: id = e.GetID()
e.data.update(test_MySql.data1_1)
e.meta.update(test_MySql.meta1)
e.CommitFile("file1", {"file":test_MySql.file1_1, "filename": "file1.txt"})
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
return id
def getentries1(n, start):
pool = getPool()
print_( "Get entries (all): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start)
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getentries2(n, start):
pool = getPool()
print_( "Get entries (all+file): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start, preload="all")
f=e.GetFile("file1")
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getentries5(n, start):
pool = getPool()
print_( "Get entries (all+filestream): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start, preload="all")
#f=e.GetFile("file1")
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getentries4(n, start):
pool = getPool()
print_( "Get entries (meta): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start, preload="meta")
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getbatch1(n, start):
pool = getPool()
print_( "Get batch (no preload): ")
t = time.time()
ids = []
for i in range(0,n):
ids.append(i+start)
e=pool.GetBatch(ids, preload="skip")
t2 = time.time()
del e
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getbatch2(n, start):
pool = getPool()
print_( "Get batch (meta): ")
t = time.time()
ids = []
for i in range(0,n):
ids.append(i+start)
e=pool.GetBatch(ids, preload="meta")
t2 = time.time()
del e
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getbatch3(n, start):
pool = getPool()
print_( "Get batch (all): ")
t = time.time()
ids = []
for i in range(0,n):
ids.append(i+start)
e=pool.GetBatch(ids, preload="all")
t2 = time.time()
del e
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def delentries(n, start):
pool = getPool()
print_( "Delete entries (meta+data): ")
t = time.time()
for i in range(0,n):
pool.DeleteEntry(i+start)
pool.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def delentries2(n, start):
pool = getPool()
print_( "Delete entries (meta+data+file): ")
t = time.time()
for i in range(0,n):
pool.DeleteEntry(i+start)
pool.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def report(modes, printed):
rep=[]
c = len(printed)/len(modes)
for n in range(0, c):
p = 0
for m in modes:
rep.append(printed[p*c+n])
p+=1
print()
print()
i=0
for p in rep:
print(p)
i+=1
if i==len(modes):
print()
i=0
def run(modes):
global mode , printed
n = 1000
printed = [""]
for m in modes:
mode = m
print()
print(mode,)
empty()
connects(n)
cursors(n)
createsql(n)
createentries(n)
checkentries(n)
createentries2(n)
id = createentries3(n)
id2 = createentries4(n)
getentries1(n, id2)
getentries2(n, id2)
getentries5(n, id2)
getentries4(n, id2)
getbatch1(n, id2)
getbatch2(n, id2)
getbatch3(n, id2)
sqlquery1(n, id2)
sqlquery2(n, id)
sqlquery3(n, id2)
sqlquery4(n, id)
sqlquery5(n, id2)
sqlquery6(n)
delentries(n, id)
delentries2(n, id2)
report(modes, printed)
if __name__ == '__main__':
#run(("sqlite3",))
run(("sqlite3","mysql","mysqlinno"))<|fim▁end|> | def createentries2(n): |
<|file_name|>bs_set.hpp<|end_file_name|><|fim▁begin|>/////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2013-2013
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/intrusive for documentation.
//
/////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_INTRUSIVE_BS_SET_HPP
#define BOOST_INTRUSIVE_BS_SET_HPP
#include <boost/intrusive/detail/config_begin.hpp>
#include <boost/intrusive/intrusive_fwd.hpp>
#include <boost/intrusive/detail/mpl.hpp>
#include <boost/intrusive/bstree.hpp>
#include <iterator>
#include <boost/move/move.hpp>
namespace boost {
namespace intrusive {
//! The class template bs_set is an intrusive container, that mimics most of
//! the interface of std::set as described in the C++ standard.
//!
//! The template parameter \c T is the type to be managed by the container.
//! The user can specify additional options and if no options are provided
//! default options are used.
//!
//! The container supports the following options:
//! \c base_hook<>/member_hook<>/value_traits<>,
//! \c constant_time_size<>, \c size_type<> and
//! \c compare<>.
#if defined(BOOST_INTRUSIVE_DOXYGEN_INVOKED)
template<class T, class ...Options>
#else
template<class ValueTraits, class Compare, class SizeType, bool ConstantTimeSize, typename HeaderHolder>
#endif
class bs_set_impl
#ifndef BOOST_INTRUSIVE_DOXYGEN_INVOKED
: public bstree_impl<ValueTraits, Compare, SizeType, ConstantTimeSize, BsTreeAlgorithms, HeaderHolder>
#endif
{
/// @cond
typedef bstree_impl<ValueTraits, Compare, SizeType, ConstantTimeSize, BsTreeAlgorithms, HeaderHolder> tree_type;
BOOST_MOVABLE_BUT_NOT_COPYABLE(bs_set_impl)
typedef tree_type implementation_defined;
/// @endcond
public:
typedef typename implementation_defined::value_type value_type;
typedef typename implementation_defined::value_traits value_traits;
typedef typename implementation_defined::pointer pointer;
typedef typename implementation_defined::const_pointer const_pointer;
typedef typename implementation_defined::reference reference;
typedef typename implementation_defined::const_reference const_reference;
typedef typename implementation_defined::difference_type difference_type;
typedef typename implementation_defined::size_type size_type;
typedef typename implementation_defined::value_compare value_compare;
typedef typename implementation_defined::key_compare key_compare;
typedef typename implementation_defined::iterator iterator;
typedef typename implementation_defined::const_iterator const_iterator;
typedef typename implementation_defined::reverse_iterator reverse_iterator;
typedef typename implementation_defined::const_reverse_iterator const_reverse_iterator;
typedef typename implementation_defined::insert_commit_data insert_commit_data;
typedef typename implementation_defined::node_traits node_traits;
typedef typename implementation_defined::node node;
typedef typename implementation_defined::node_ptr node_ptr;
typedef typename implementation_defined::const_node_ptr const_node_ptr;
typedef typename implementation_defined::node_algorithms node_algorithms;
static const bool constant_time_size = tree_type::constant_time_size;
public:
//! @copydoc ::boost::intrusive::bstree::bstree(const value_compare &,const value_traits &)
explicit bs_set_impl( const value_compare &cmp = value_compare()
, const value_traits &v_traits = value_traits())
: tree_type(cmp, v_traits)
{}
//! @copydoc ::boost::intrusive::bstree::bstree(bool,Iterator,Iterator,const value_compare &,const value_traits &)
template<class Iterator>
bs_set_impl( Iterator b, Iterator e
, const value_compare &cmp = value_compare()
, const value_traits &v_traits = value_traits())
: tree_type(true, b, e, cmp, v_traits)
{}
//! @copydoc ::boost::intrusive::bstree::bstree(bstree &&)
bs_set_impl(BOOST_RV_REF(bs_set_impl) x)
: tree_type(::boost::move(static_cast<tree_type&>(x)))
{}
//! @copydoc ::boost::intrusive::bstree::operator=(bstree &&)
bs_set_impl& operator=(BOOST_RV_REF(bs_set_impl) x)
{ return static_cast<bs_set_impl&>(tree_type::operator=(::boost::move(static_cast<tree_type&>(x)))); }
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree::~bstree()
~bs_set_impl();
//! @copydoc ::boost::intrusive::bstree::begin()
iterator begin();
//! @copydoc ::boost::intrusive::bstree::begin()const
const_iterator begin() const;
//! @copydoc ::boost::intrusive::bstree::cbegin()const
const_iterator cbegin() const;
//! @copydoc ::boost::intrusive::bstree::end()
iterator end();
//! @copydoc ::boost::intrusive::bstree::end()const
const_iterator end() const;
//! @copydoc ::boost::intrusive::bstree::cend()const
const_iterator cend() const;
//! @copydoc ::boost::intrusive::bstree::rbegin()
reverse_iterator rbegin();
//! @copydoc ::boost::intrusive::bstree::rbegin()const
const_reverse_iterator rbegin() const;
//! @copydoc ::boost::intrusive::bstree::crbegin()const
const_reverse_iterator crbegin() const;
//! @copydoc ::boost::intrusive::bstree::rend()
reverse_iterator rend();
//! @copydoc ::boost::intrusive::bstree::rend()const
const_reverse_iterator rend() const;
//! @copydoc ::boost::intrusive::bstree::crend()const
const_reverse_iterator crend() const;
//! @copydoc ::boost::intrusive::bstree::container_from_end_iterator(iterator)
static bs_set_impl &container_from_end_iterator(iterator end_iterator);
//! @copydoc ::boost::intrusive::bstree::container_from_end_iterator(const_iterator)
static const bs_set_impl &container_from_end_iterator(const_iterator end_iterator);
//! @copydoc ::boost::intrusive::bstree::container_from_iterator(iterator)
static bs_set_impl &container_from_iterator(iterator it);
//! @copydoc ::boost::intrusive::bstree::container_from_iterator(const_iterator)
static const bs_set_impl &container_from_iterator(const_iterator it);
//! @copydoc ::boost::intrusive::bstree::key_comp()const
key_compare key_comp() const;
//! @copydoc ::boost::intrusive::bstree::value_comp()const
value_compare value_comp() const;
//! @copydoc ::boost::intrusive::bstree::empty()const
bool empty() const;
//! @copydoc ::boost::intrusive::bstree::size()const
size_type size() const;
//! @copydoc ::boost::intrusive::bstree::swap
void swap(bs_set_impl& other);
//! @copydoc ::boost::intrusive::bstree::clone_from
template <class Cloner, class Disposer>
void clone_from(const bs_set_impl &src, Cloner cloner, Disposer disposer);
#endif //#ifdef BOOST_iNTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree::insert_unique(reference)
std::pair<iterator, bool> insert(reference value)
{ return tree_type::insert_unique(value); }
//! @copydoc ::boost::intrusive::bstree::insert_unique(const_iterator,reference)
iterator insert(const_iterator hint, reference value)
{ return tree_type::insert_unique(hint, value); }
//! @copydoc ::boost::intrusive::bstree::insert_unique_check(const KeyType&,KeyValueCompare,insert_commit_data&)
template<class KeyType, class KeyValueCompare>
std::pair<iterator, bool> insert_check
(const KeyType &key, KeyValueCompare key_value_comp, insert_commit_data &commit_data)
{ return tree_type::insert_unique_check(key, key_value_comp, commit_data); }
//! @copydoc ::boost::intrusive::bstree::insert_unique_check(const_iterator,const KeyType&,KeyValueCompare,insert_commit_data&)
template<class KeyType, class KeyValueCompare>
std::pair<iterator, bool> insert_check
(const_iterator hint, const KeyType &key
,KeyValueCompare key_value_comp, insert_commit_data &commit_data)
{ return tree_type::insert_unique_check(hint, key, key_value_comp, commit_data); }
//! @copydoc ::boost::intrusive::bstree::insert_unique(Iterator,Iterator)
template<class Iterator>
void insert(Iterator b, Iterator e)
{ tree_type::insert_unique(b, e); }
//! @copydoc ::boost::intrusive::bstree::insert_unique_commit
iterator insert_commit(reference value, const insert_commit_data &commit_data)
{ return tree_type::insert_unique_commit(value, commit_data); }
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree::insert_before
iterator insert_before(const_iterator pos, reference value);
//! @copydoc ::boost::intrusive::bstree::push_back
void push_back(reference value);
//! @copydoc ::boost::intrusive::bstree::push_front
void push_front(reference value);
//! @copydoc ::boost::intrusive::bstree::erase(const_iterator)
iterator erase(const_iterator i);
//! @copydoc ::boost::intrusive::bstree::erase(const_iterator,const_iterator)
iterator erase(const_iterator b, const_iterator e);
//! @copydoc ::boost::intrusive::bstree::erase(const_reference)
size_type erase(const_reference value);
//! @copydoc ::boost::intrusive::bstree::erase(const KeyType&,KeyValueCompare)
template<class KeyType, class KeyValueCompare>
size_type erase(const KeyType& key, KeyValueCompare comp);
//! @copydoc ::boost::intrusive::bstree::erase_and_dispose(const_iterator,Disposer)
template<class Disposer>
iterator erase_and_dispose(const_iterator i, Disposer disposer);
//! @copydoc ::boost::intrusive::bstree::erase_and_dispose(const_iterator,const_iterator,Disposer)
template<class Disposer>
iterator erase_and_dispose(const_iterator b, const_iterator e, Disposer disposer);
//! @copydoc ::boost::intrusive::bstree::erase_and_dispose(const_reference, Disposer)
template<class Disposer>
size_type erase_and_dispose(const_reference value, Disposer disposer);
//! @copydoc ::boost::intrusive::bstree::erase_and_dispose(const KeyType&,KeyValueCompare,Disposer)
template<class KeyType, class KeyValueCompare, class Disposer>
size_type erase_and_dispose(const KeyType& key, KeyValueCompare comp, Disposer disposer);
//! @copydoc ::boost::intrusive::bstree::clear
void clear();
//! @copydoc ::boost::intrusive::bstree::clear_and_dispose
template<class Disposer>
void clear_and_dispose(Disposer disposer);
#endif // #ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree::count(const_reference)const
size_type count(const_reference value) const
{ return static_cast<size_type>(this->tree_type::find(value) == this->tree_type::cend()); }
//! @copydoc ::boost::intrusive::bstree::count(const KeyType&,KeyValueCompare)const
template<class KeyType, class KeyValueCompare>
size_type count(const KeyType& key, KeyValueCompare comp) const
{ return static_cast<size_type>(this->tree_type::find(key, comp) == this->tree_type::cend()); }
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree::lower_bound(const_reference)
iterator lower_bound(const_reference value);
//! @copydoc ::boost::intrusive::bstree::lower_bound(const KeyType&,KeyValueCompare)
template<class KeyType, class KeyValueCompare>
iterator lower_bound(const KeyType& key, KeyValueCompare comp);
//! @copydoc ::boost::intrusive::bstree::lower_bound(const_reference)const
const_iterator lower_bound(const_reference value) const;
//! @copydoc ::boost::intrusive::bstree::lower_bound(const KeyType&,KeyValueCompare)const
template<class KeyType, class KeyValueCompare>
const_iterator lower_bound(const KeyType& key, KeyValueCompare comp) const;
//! @copydoc ::boost::intrusive::bstree::upper_bound(const_reference)
iterator upper_bound(const_reference value);
//! @copydoc ::boost::intrusive::bstree::upper_bound(const KeyType&,KeyValueCompare)
template<class KeyType, class KeyValueCompare>
iterator upper_bound(const KeyType& key, KeyValueCompare comp);
//! @copydoc ::boost::intrusive::bstree::upper_bound(const_reference)const
const_iterator upper_bound(const_reference value) const;
//! @copydoc ::boost::intrusive::bstree::upper_bound(const KeyType&,KeyValueCompare)const
template<class KeyType, class KeyValueCompare>
const_iterator upper_bound(const KeyType& key, KeyValueCompare comp) const;
//! @copydoc ::boost::intrusive::bstree::find(const_reference)
iterator find(const_reference value);
//! @copydoc ::boost::intrusive::bstree::find(const KeyType&,KeyValueCompare)
template<class KeyType, class KeyValueCompare>
iterator find(const KeyType& key, KeyValueCompare comp);
//! @copydoc ::boost::intrusive::bstree::find(const_reference)const
const_iterator find(const_reference value) const;
//! @copydoc ::boost::intrusive::bstree::find(const KeyType&,KeyValueCompare)const
template<class KeyType, class KeyValueCompare>
const_iterator find(const KeyType& key, KeyValueCompare comp) const;
#endif // #ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::rbtree::equal_range(const_reference)
std::pair<iterator,iterator> equal_range(const_reference value)
{ return this->tree_type::lower_bound_range(value); }
//! @copydoc ::boost::intrusive::rbtree::equal_range(const KeyType&,KeyValueCompare)
template<class KeyType, class KeyValueCompare>
std::pair<iterator,iterator> equal_range(const KeyType& key, KeyValueCompare comp)
{ return this->tree_type::lower_bound_range(key, comp); }
//! @copydoc ::boost::intrusive::rbtree::equal_range(const_reference)const
std::pair<const_iterator, const_iterator>
equal_range(const_reference value) const
{ return this->tree_type::lower_bound_range(value); }
//! @copydoc ::boost::intrusive::rbtree::equal_range(const KeyType&,KeyValueCompare)const
template<class KeyType, class KeyValueCompare>
std::pair<const_iterator, const_iterator>
equal_range(const KeyType& key, KeyValueCompare comp) const
{ return this->tree_type::lower_bound_range(key, comp); }
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree::bounded_range(const_reference,const_reference,bool,bool)
std::pair<iterator,iterator> bounded_range
(const_reference lower_value, const_reference upper_value, bool left_closed, bool right_closed);
//! @copydoc ::boost::intrusive::bstree::bounded_range(const KeyType&,const KeyType&,KeyValueCompare,bool,bool)
template<class KeyType, class KeyValueCompare>
std::pair<iterator,iterator> bounded_range
(const KeyType& lower_key, const KeyType& upper_key, KeyValueCompare comp, bool left_closed, bool right_closed);
//! @copydoc ::boost::intrusive::bstree::bounded_range(const_reference,const_reference,bool,bool)const
std::pair<const_iterator, const_iterator>
bounded_range(const_reference lower_value, const_reference upper_value, bool left_closed, bool right_closed) const;
//! @copydoc ::boost::intrusive::bstree::bounded_range(const KeyType&,const KeyType&,KeyValueCompare,bool,bool)const
template<class KeyType, class KeyValueCompare>
std::pair<const_iterator, const_iterator> bounded_range
(const KeyType& lower_key, const KeyType& upper_key, KeyValueCompare comp, bool left_closed, bool right_closed) const;
//! @copydoc ::boost::intrusive::bstree::s_iterator_to(reference)
static iterator s_iterator_to(reference value);
//! @copydoc ::boost::intrusive::bstree::s_iterator_to(const_reference)
static const_iterator s_iterator_to(const_reference value);
//! @copydoc ::boost::intrusive::bstree::iterator_to(reference)
iterator iterator_to(reference value);
//! @copydoc ::boost::intrusive::bstree::iterator_to(const_reference)const
const_iterator iterator_to(const_reference value) const;
//! @copydoc ::boost::intrusive::bstree::init_node(reference)
static void init_node(reference value);
//! @copydoc ::boost::intrusive::bstree::unlink_leftmost_without_rebalance
pointer unlink_leftmost_without_rebalance();
//! @copydoc ::boost::intrusive::bstree::replace_node
void replace_node(iterator replace_this, reference with_this);
//! @copydoc ::boost::intrusive::bstree::remove_node
void remove_node(reference value);
#endif //#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
};
#if defined(BOOST_INTRUSIVE_DOXYGEN_INVOKED)
template<class T, class ...Options>
bool operator!= (const bs_set_impl<T, Options...> &x, const bs_set_impl<T, Options...> &y);
template<class T, class ...Options>
bool operator>(const bs_set_impl<T, Options...> &x, const bs_set_impl<T, Options...> &y);
template<class T, class ...Options>
bool operator<=(const bs_set_impl<T, Options...> &x, const bs_set_impl<T, Options...> &y);
template<class T, class ...Options>
bool operator>=(const bs_set_impl<T, Options...> &x, const bs_set_impl<T, Options...> &y);
template<class T, class ...Options>
void swap(bs_set_impl<T, Options...> &x, bs_set_impl<T, Options...> &y);
#endif //#if defined(BOOST_INTRUSIVE_DOXYGEN_INVOKED)
//! Helper metafunction to define a \c bs_set that yields to the same type when the
//! same options (either explicitly or implicitly) are used.
#if defined(BOOST_INTRUSIVE_DOXYGEN_INVOKED) || defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES)
template<class T, class ...Options>
#else
template<class T, class O1 = void, class O2 = void
, class O3 = void, class O4 = void
, class O5 = void>
#endif
struct make_bs_set
{
/// @cond
typedef typename pack_options
< bstree_defaults,
#if !defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES)
O1, O2, O3, O4, O5
#else
Options...
#endif
>::type packed_options;
typedef typename detail::get_value_traits
<T, typename packed_options::proto_value_traits>::type value_traits;
typedef typename detail::get_header_holder_type
< value_traits, typename packed_options::header_holder_type >::type header_holder_type;
typedef bs_set_impl
< value_traits
, typename packed_options::compare
, typename packed_options::size_type
, packed_options::constant_time_size
, header_holder_type
> implementation_defined;
/// @endcond
typedef implementation_defined type;
};
#ifndef BOOST_INTRUSIVE_DOXYGEN_INVOKED
#if !defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES)
template<class T, class O1, class O2, class O3, class O4, class O5>
#else
template<class T, class ...Options>
#endif
class bs_set
: public make_bs_set<T,
#if !defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES)
O1, O2, O3, O4, O5
#else
Options...
#endif
>::type
{
typedef typename make_bs_set
<T,
#if !defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES)
O1, O2, O3, O4, O5
#else
Options...
#endif
>::type Base;
BOOST_MOVABLE_BUT_NOT_COPYABLE(bs_set)
public:
typedef typename Base::value_compare value_compare;
typedef typename Base::value_traits value_traits;
typedef typename Base::iterator iterator;
typedef typename Base::const_iterator const_iterator;
//Assert if passed value traits are compatible with the type
BOOST_STATIC_ASSERT((detail::is_same<typename value_traits::value_type, T>::value));
explicit bs_set( const value_compare &cmp = value_compare()
, const value_traits &v_traits = value_traits())
: Base(cmp, v_traits)
{}
template<class Iterator>
bs_set( Iterator b, Iterator e
, const value_compare &cmp = value_compare()
, const value_traits &v_traits = value_traits())
: Base(b, e, cmp, v_traits)
{}
bs_set(BOOST_RV_REF(bs_set) x)
: Base(::boost::move(static_cast<Base&>(x)))
{}
bs_set& operator=(BOOST_RV_REF(bs_set) x)
{ return static_cast<bs_set &>(this->Base::operator=(::boost::move(static_cast<Base&>(x)))); }
static bs_set &container_from_end_iterator(iterator end_iterator)
{ return static_cast<bs_set &>(Base::container_from_end_iterator(end_iterator)); }
static const bs_set &container_from_end_iterator(const_iterator end_iterator)
{ return static_cast<const bs_set &>(Base::container_from_end_iterator(end_iterator)); }
static bs_set &container_from_iterator(iterator it)
{ return static_cast<bs_set &>(Base::container_from_iterator(it)); }
static const bs_set &container_from_iterator(const_iterator it)
{ return static_cast<const bs_set &>(Base::container_from_iterator(it)); }
};
#endif
//! The class template bs_multiset is an intrusive container, that mimics most of
//! the interface of std::multiset as described in the C++ standard.
//!
//! The template parameter \c T is the type to be managed by the container.
//! The user can specify additional options and if no options are provided
//! default options are used.
//!
//! The container supports the following options:
//! \c base_hook<>/member_hook<>/value_traits<>,
//! \c constant_time_size<>, \c size_type<> and
//! \c compare<>.
#if defined(BOOST_INTRUSIVE_DOXYGEN_INVOKED)
template<class T, class ...Options>
#else
template<class ValueTraits, class Compare, class SizeType, bool ConstantTimeSize, typename HeaderHolder>
#endif
class bs_multiset_impl
#ifndef BOOST_INTRUSIVE_DOXYGEN_INVOKED
: public bstree_impl<ValueTraits, Compare, SizeType, ConstantTimeSize, RbTreeAlgorithms, HeaderHolder>
#endif
{
/// @cond
typedef bstree_impl<ValueTraits, Compare, SizeType, ConstantTimeSize, RbTreeAlgorithms, HeaderHolder> tree_type;
BOOST_MOVABLE_BUT_NOT_COPYABLE(bs_multiset_impl)
typedef tree_type implementation_defined;
/// @endcond
public:
typedef typename implementation_defined::value_type value_type;
typedef typename implementation_defined::value_traits value_traits;
typedef typename implementation_defined::pointer pointer;
typedef typename implementation_defined::const_pointer const_pointer;
typedef typename implementation_defined::reference reference;
typedef typename implementation_defined::const_reference const_reference;
typedef typename implementation_defined::difference_type difference_type;
typedef typename implementation_defined::size_type size_type;
typedef typename implementation_defined::value_compare value_compare;
typedef typename implementation_defined::key_compare key_compare;
typedef typename implementation_defined::iterator iterator;
typedef typename implementation_defined::const_iterator const_iterator;
typedef typename implementation_defined::reverse_iterator reverse_iterator;
typedef typename implementation_defined::const_reverse_iterator const_reverse_iterator;
typedef typename implementation_defined::insert_commit_data insert_commit_data;
typedef typename implementation_defined::node_traits node_traits;
typedef typename implementation_defined::node node;
typedef typename implementation_defined::node_ptr node_ptr;
typedef typename implementation_defined::const_node_ptr const_node_ptr;
typedef typename implementation_defined::node_algorithms node_algorithms;
static const bool constant_time_size = tree_type::constant_time_size;
public:
//! @copydoc ::boost::intrusive::bstree::bstree(const value_compare &,const value_traits &)
explicit bs_multiset_impl( const value_compare &cmp = value_compare()
, const value_traits &v_traits = value_traits())
: tree_type(cmp, v_traits)
{}
//! @copydoc ::boost::intrusive::bstree::bstree(bool,Iterator,Iterator,const value_compare &,const value_traits &)
template<class Iterator>
bs_multiset_impl( Iterator b, Iterator e
, const value_compare &cmp = value_compare()
, const value_traits &v_traits = value_traits())
: tree_type(false, b, e, cmp, v_traits)
{}
//! @copydoc ::boost::intrusive::bstree::bstree(bstree &&)
bs_multiset_impl(BOOST_RV_REF(bs_multiset_impl) x)
: tree_type(::boost::move(static_cast<tree_type&>(x)))
{}
//! @copydoc ::boost::intrusive::bstree::operator=(bstree &&)
bs_multiset_impl& operator=(BOOST_RV_REF(bs_multiset_impl) x)
{ return static_cast<bs_multiset_impl&>(tree_type::operator=(::boost::move(static_cast<tree_type&>(x)))); }
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree::~bstree()
~bs_multiset_impl();
//! @copydoc ::boost::intrusive::bstree::begin()
iterator begin();
//! @copydoc ::boost::intrusive::bstree::begin()const
const_iterator begin() const;
//! @copydoc ::boost::intrusive::bstree::cbegin()const
const_iterator cbegin() const;
//! @copydoc ::boost::intrusive::bstree::end()
iterator end();
//! @copydoc ::boost::intrusive::bstree::end()const
const_iterator end() const;
//! @copydoc ::boost::intrusive::bstree::cend()const
const_iterator cend() const;
//! @copydoc ::boost::intrusive::bstree::rbegin()
reverse_iterator rbegin();
//! @copydoc ::boost::intrusive::bstree::rbegin()const
const_reverse_iterator rbegin() const;
//! @copydoc ::boost::intrusive::bstree::crbegin()const
const_reverse_iterator crbegin() const;
//! @copydoc ::boost::intrusive::bstree::rend()
reverse_iterator rend();
//! @copydoc ::boost::intrusive::bstree::rend()const
const_reverse_iterator rend() const;
//! @copydoc ::boost::intrusive::bstree::crend()const
const_reverse_iterator crend() const;
//! @copydoc ::boost::intrusive::bstree::container_from_end_iterator(iterator)
static bs_multiset_impl &container_from_end_iterator(iterator end_iterator);
//! @copydoc ::boost::intrusive::bstree::container_from_end_iterator(const_iterator)
static const bs_multiset_impl &container_from_end_iterator(const_iterator end_iterator);
//! @copydoc ::boost::intrusive::bstree::container_from_iterator(iterator)
static bs_multiset_impl &container_from_iterator(iterator it);
//! @copydoc ::boost::intrusive::bstree::container_from_iterator(const_iterator)
static const bs_multiset_impl &container_from_iterator(const_iterator it);
//! @copydoc ::boost::intrusive::bstree::key_comp()const
key_compare key_comp() const;
//! @copydoc ::boost::intrusive::bstree::value_comp()const
value_compare value_comp() const;
//! @copydoc ::boost::intrusive::bstree::empty()const
bool empty() const;
//! @copydoc ::boost::intrusive::bstree::size()const
size_type size() const;
//! @copydoc ::boost::intrusive::bstree::swap
void swap(bs_multiset_impl& other);
//! @copydoc ::boost::intrusive::bstree::clone_from
template <class Cloner, class Disposer>
void clone_from(const bs_multiset_impl &src, Cloner cloner, Disposer disposer);
#endif //#ifdef BOOST_iNTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree::insert_equal(reference)
iterator insert(reference value)
{ return tree_type::insert_equal(value); }
//! @copydoc ::boost::intrusive::bstree::insert_equal(const_iterator,reference)
iterator insert(const_iterator hint, reference value)
{ return tree_type::insert_equal(hint, value); }
//! @copydoc ::boost::intrusive::bstree::insert_equal(Iterator,Iterator)
template<class Iterator>
void insert(Iterator b, Iterator e)
{ tree_type::insert_equal(b, e); }
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree::insert_before
iterator insert_before(const_iterator pos, reference value);
//! @copydoc ::boost::intrusive::bstree::push_back
void push_back(reference value);
//! @copydoc ::boost::intrusive::bstree::push_front
void push_front(reference value);<|fim▁hole|> //! @copydoc ::boost::intrusive::bstree::erase(const_iterator)
iterator erase(const_iterator i);
//! @copydoc ::boost::intrusive::bstree::erase(const_iterator,const_iterator)
iterator erase(const_iterator b, const_iterator e);
//! @copydoc ::boost::intrusive::bstree::erase(const_reference)
size_type erase(const_reference value);
//! @copydoc ::boost::intrusive::bstree::erase(const KeyType&,KeyValueCompare)
template<class KeyType, class KeyValueCompare>
size_type erase(const KeyType& key, KeyValueCompare comp);
//! @copydoc ::boost::intrusive::bstree::erase_and_dispose(const_iterator,Disposer)
template<class Disposer>
iterator erase_and_dispose(const_iterator i, Disposer disposer);
//! @copydoc ::boost::intrusive::bstree::erase_and_dispose(const_iterator,const_iterator,Disposer)
template<class Disposer>
iterator erase_and_dispose(const_iterator b, const_iterator e, Disposer disposer);
//! @copydoc ::boost::intrusive::bstree::erase_and_dispose(const_reference, Disposer)
template<class Disposer>
size_type erase_and_dispose(const_reference value, Disposer disposer);
//! @copydoc ::boost::intrusive::bstree::erase_and_dispose(const KeyType&,KeyValueCompare,Disposer)
template<class KeyType, class KeyValueCompare, class Disposer>
size_type erase_and_dispose(const KeyType& key, KeyValueCompare comp, Disposer disposer);
//! @copydoc ::boost::intrusive::bstree::clear
void clear();
//! @copydoc ::boost::intrusive::bstree::clear_and_dispose
template<class Disposer>
void clear_and_dispose(Disposer disposer);
//! @copydoc ::boost::intrusive::bstree::count(const_reference)const
size_type count(const_reference value) const;
//! @copydoc ::boost::intrusive::bstree::count(const KeyType&,KeyValueCompare)const
template<class KeyType, class KeyValueCompare>
size_type count(const KeyType& key, KeyValueCompare comp) const;
//! @copydoc ::boost::intrusive::bstree::lower_bound(const_reference)
iterator lower_bound(const_reference value);
//! @copydoc ::boost::intrusive::bstree::lower_bound(const KeyType&,KeyValueCompare)
template<class KeyType, class KeyValueCompare>
iterator lower_bound(const KeyType& key, KeyValueCompare comp);
//! @copydoc ::boost::intrusive::bstree::lower_bound(const_reference)const
const_iterator lower_bound(const_reference value) const;
//! @copydoc ::boost::intrusive::bstree::lower_bound(const KeyType&,KeyValueCompare)const
template<class KeyType, class KeyValueCompare>
const_iterator lower_bound(const KeyType& key, KeyValueCompare comp) const;
//! @copydoc ::boost::intrusive::bstree::upper_bound(const_reference)
iterator upper_bound(const_reference value);
//! @copydoc ::boost::intrusive::bstree::upper_bound(const KeyType&,KeyValueCompare)
template<class KeyType, class KeyValueCompare>
iterator upper_bound(const KeyType& key, KeyValueCompare comp);
//! @copydoc ::boost::intrusive::bstree::upper_bound(const_reference)const
const_iterator upper_bound(const_reference value) const;
//! @copydoc ::boost::intrusive::bstree::upper_bound(const KeyType&,KeyValueCompare)const
template<class KeyType, class KeyValueCompare>
const_iterator upper_bound(const KeyType& key, KeyValueCompare comp) const;
//! @copydoc ::boost::intrusive::bstree::find(const_reference)
iterator find(const_reference value);
//! @copydoc ::boost::intrusive::bstree::find(const KeyType&,KeyValueCompare)
template<class KeyType, class KeyValueCompare>
iterator find(const KeyType& key, KeyValueCompare comp);
//! @copydoc ::boost::intrusive::bstree::find(const_reference)const
const_iterator find(const_reference value) const;
//! @copydoc ::boost::intrusive::bstree::find(const KeyType&,KeyValueCompare)const
template<class KeyType, class KeyValueCompare>
const_iterator find(const KeyType& key, KeyValueCompare comp) const;
//! @copydoc ::boost::intrusive::bstree::equal_range(const_reference)
std::pair<iterator,iterator> equal_range(const_reference value);
//! @copydoc ::boost::intrusive::bstree::equal_range(const KeyType&,KeyValueCompare)
template<class KeyType, class KeyValueCompare>
std::pair<iterator,iterator> equal_range(const KeyType& key, KeyValueCompare comp);
//! @copydoc ::boost::intrusive::bstree::equal_range(const_reference)const
std::pair<const_iterator, const_iterator>
equal_range(const_reference value) const;
//! @copydoc ::boost::intrusive::bstree::equal_range(const KeyType&,KeyValueCompare)const
template<class KeyType, class KeyValueCompare>
std::pair<const_iterator, const_iterator>
equal_range(const KeyType& key, KeyValueCompare comp) const;
//! @copydoc ::boost::intrusive::bstree::bounded_range(const_reference,const_reference,bool,bool)
std::pair<iterator,iterator> bounded_range
(const_reference lower_value, const_reference upper_value, bool left_closed, bool right_closed);
//! @copydoc ::boost::intrusive::bstree::bounded_range(const KeyType&,const KeyType&,KeyValueCompare,bool,bool)
template<class KeyType, class KeyValueCompare>
std::pair<iterator,iterator> bounded_range
(const KeyType& lower_key, const KeyType& upper_key, KeyValueCompare comp, bool left_closed, bool right_closed);
//! @copydoc ::boost::intrusive::bstree::bounded_range(const_reference,const_reference,bool,bool)const
std::pair<const_iterator, const_iterator>
bounded_range(const_reference lower_value, const_reference upper_value, bool left_closed, bool right_closed) const;
//! @copydoc ::boost::intrusive::bstree::bounded_range(const KeyType&,const KeyType&,KeyValueCompare,bool,bool)const
template<class KeyType, class KeyValueCompare>
std::pair<const_iterator, const_iterator> bounded_range
(const KeyType& lower_key, const KeyType& upper_key, KeyValueCompare comp, bool left_closed, bool right_closed) const;
//! @copydoc ::boost::intrusive::bstree::s_iterator_to(reference)
static iterator s_iterator_to(reference value);
//! @copydoc ::boost::intrusive::bstree::s_iterator_to(const_reference)
static const_iterator s_iterator_to(const_reference value);
//! @copydoc ::boost::intrusive::bstree::iterator_to(reference)
iterator iterator_to(reference value);
//! @copydoc ::boost::intrusive::bstree::iterator_to(const_reference)const
const_iterator iterator_to(const_reference value) const;
//! @copydoc ::boost::intrusive::bstree::init_node(reference)
static void init_node(reference value);
//! @copydoc ::boost::intrusive::bstree::unlink_leftmost_without_rebalance
pointer unlink_leftmost_without_rebalance();
//! @copydoc ::boost::intrusive::bstree::replace_node
void replace_node(iterator replace_this, reference with_this);
//! @copydoc ::boost::intrusive::bstree::remove_node
void remove_node(reference value);
#endif //#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
};
#if defined(BOOST_INTRUSIVE_DOXYGEN_INVOKED)
template<class T, class ...Options>
bool operator!= (const bs_multiset_impl<T, Options...> &x, const bs_multiset_impl<T, Options...> &y);
template<class T, class ...Options>
bool operator>(const bs_multiset_impl<T, Options...> &x, const bs_multiset_impl<T, Options...> &y);
template<class T, class ...Options>
bool operator<=(const bs_multiset_impl<T, Options...> &x, const bs_multiset_impl<T, Options...> &y);
template<class T, class ...Options>
bool operator>=(const bs_multiset_impl<T, Options...> &x, const bs_multiset_impl<T, Options...> &y);
template<class T, class ...Options>
void swap(bs_multiset_impl<T, Options...> &x, bs_multiset_impl<T, Options...> &y);
#endif //#if defined(BOOST_INTRUSIVE_DOXYGEN_INVOKED)
//! Helper metafunction to define a \c bs_multiset that yields to the same type when the
//! same options (either explicitly or implicitly) are used.
#if defined(BOOST_INTRUSIVE_DOXYGEN_INVOKED) || defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES)
template<class T, class ...Options>
#else
template<class T, class O1 = void, class O2 = void
, class O3 = void, class O4 = void
, class O5 = void>
#endif
struct make_bs_multiset
{
/// @cond
typedef typename pack_options
< bstree_defaults,
#if !defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES)
O1, O2, O3, O4, O5
#else
Options...
#endif
>::type packed_options;
typedef typename detail::get_value_traits
<T, typename packed_options::proto_value_traits>::type value_traits;
typedef typename detail::get_header_holder_type
< value_traits, typename packed_options::header_holder_type >::type header_holder_type;
typedef bs_multiset_impl
< value_traits
, typename packed_options::compare
, typename packed_options::size_type
, packed_options::constant_time_size
, header_holder_type
> implementation_defined;
/// @endcond
typedef implementation_defined type;
};
#ifndef BOOST_INTRUSIVE_DOXYGEN_INVOKED
#if !defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES)
template<class T, class O1, class O2, class O3, class O4, class O5>
#else
template<class T, class ...Options>
#endif
class bs_multiset
: public make_bs_multiset<T,
#if !defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES)
O1, O2, O3, O4, O5
#else
Options...
#endif
>::type
{
typedef typename make_bs_multiset<T,
#if !defined(BOOST_INTRUSIVE_VARIADIC_TEMPLATES)
O1, O2, O3, O4, O5
#else
Options...
#endif
>::type Base;
BOOST_MOVABLE_BUT_NOT_COPYABLE(bs_multiset)
public:
typedef typename Base::value_compare value_compare;
typedef typename Base::value_traits value_traits;
typedef typename Base::iterator iterator;
typedef typename Base::const_iterator const_iterator;
//Assert if passed value traits are compatible with the type
BOOST_STATIC_ASSERT((detail::is_same<typename value_traits::value_type, T>::value));
explicit bs_multiset( const value_compare &cmp = value_compare()
, const value_traits &v_traits = value_traits())
: Base(cmp, v_traits)
{}
template<class Iterator>
bs_multiset( Iterator b, Iterator e
, const value_compare &cmp = value_compare()
, const value_traits &v_traits = value_traits())
: Base(b, e, cmp, v_traits)
{}
bs_multiset(BOOST_RV_REF(bs_multiset) x)
: Base(::boost::move(static_cast<Base&>(x)))
{}
bs_multiset& operator=(BOOST_RV_REF(bs_multiset) x)
{ return static_cast<bs_multiset &>(this->Base::operator=(::boost::move(static_cast<Base&>(x)))); }
static bs_multiset &container_from_end_iterator(iterator end_iterator)
{ return static_cast<bs_multiset &>(Base::container_from_end_iterator(end_iterator)); }
static const bs_multiset &container_from_end_iterator(const_iterator end_iterator)
{ return static_cast<const bs_multiset &>(Base::container_from_end_iterator(end_iterator)); }
static bs_multiset &container_from_iterator(iterator it)
{ return static_cast<bs_multiset &>(Base::container_from_iterator(it)); }
static const bs_multiset &container_from_iterator(const_iterator it)
{ return static_cast<const bs_multiset &>(Base::container_from_iterator(it)); }
};
#endif
} //namespace intrusive
} //namespace boost
#include <boost/intrusive/detail/config_end.hpp>
#endif //BOOST_INTRUSIVE_BS_SET_HPP<|fim▁end|> | |
<|file_name|>RenderPassManager.js<|end_file_name|><|fim▁begin|>function RenderPassManager(renderer)
{
// not implemented yet
throw "Not implemented";
var mRenderPasses = [];
return this;
}
<|fim▁hole|> mRenderPasses.push(renderPass);
};
RenderPassManager.prototype.render = function()
{
for(var renderPass in mRenderPasses) {
renderPass.sort();
renderPass.render(renderer);
renderPass.clear();
}
};<|fim▁end|> | RenderPassManager.prototype.addRenderPass = function(renderPass)
{ |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>extern crate time;
extern crate gl;
extern crate glfw;
// include the OpenGL type aliases
use gl::types::*;
use glfw::{Action, Context, Key};
use state::State;
use renderer::Renderer;
mod renderer;
mod state;
fn main() {
let glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
let height = 800;
let width = 600;
let (window, events) = glfw.create_window(height, width, "Rusted Space", glfw::WindowMode::Windowed)
.expect("Failed to create GLFW window.");
window.set_key_polling(true);
window.make_current();
let mut state = State::new();
let mut renderer = Renderer::new(glfw, window, events, state);
while renderer.running() {
renderer.tick()
}<|fim▁hole|> println!("Rusted Space ended not with a bang, but with whimper");
}<|fim▁end|> | |
<|file_name|>movie.component.ts<|end_file_name|><|fim▁begin|>import {Component} from 'angular2/core';
import {RouteConfig, RouterOutlet} from 'angular2/router';
import {MovieList} from './movie-list.component';
import {MovieDetail} from './movie-detail.component';
import {MovieService} from './movie.service';
@Component({
template: `
<h2>Movie List</h2>
<router-outlet></router-outlet>
`,
directives: [RouterOutlet],
providers: [MovieService]
})<|fim▁hole|>@RouteConfig([
{ path: '/', name: 'Movies', component: MovieList, useAsDefault: true },
{ path: '/:id', name: 'MovieDetail', component: MovieDetail },
{ path: '/new', name: 'AddMovie', component: MovieDetail }
])
export class MovieComponent { }<|fim▁end|> | |
<|file_name|>projections.py<|end_file_name|><|fim▁begin|># -*-coding:Utf-8 -*
from abcmodels import AModel
from mplotlab.utils.abctypes import FLOAT,LIST,STRING,BOOL,RegisterType
class AProjection(AModel):
<|fim▁hole|> ("title", STRING,lambda:"title","axes title"),
("xlabel", STRING,lambda:"","axes xlabel"),
("ylabel", STRING,lambda:"","axes ylabel"),
])
class Projection2D(AProjection):
parametersInfo = list(AProjection.parametersInfo)
parametersInfo.extend([
("autolim",BOOL,lambda:True,"Auto lim axis. Won't use x/y min/max"),
("xmin", FLOAT,lambda:0.0,"axes xmin"),
("xmax", FLOAT,lambda:1.0,"axes xmax"),
("ymin", FLOAT,lambda:0.0,"axes ymin"),
("ymax", FLOAT,lambda:1.0,"axes ymax"),
])
RegisterType(AProjection)
RegisterType(Projection2D)<|fim▁end|> | parametersInfo = list(AModel.parametersInfo)
parametersInfo.extend([
("plotmodels",LIST,lambda:[],"plotModels"),
|
<|file_name|>en-ie.js<|end_file_name|><|fim▁begin|>//! moment.js locale configuration<|fim▁hole|> typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['moment'], factory) :
factory(global.moment)
}(this, function (moment) { 'use strict';
var en_ie = moment.defineLocale('en-ie', {
months : 'January_February_March_April_May_June_July_August_September_October_November_December'.split('_'),
monthsShort : 'Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec'.split('_'),
weekdays : 'Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday'.split('_'),
weekdaysShort : 'Sun_Mon_Tue_Wed_Thu_Fri_Sat'.split('_'),
weekdaysMin : 'Su_Mo_Tu_We_Th_Fr_Sa'.split('_'),
longDateFormat : {
LT : 'HH:mm',
LTS : 'HH:mm:ss',
L : 'DD-MM-YYYY',
LL : 'D MMMM YYYY',
LLL : 'D MMMM YYYY HH:mm',
LLLL : 'dddd D MMMM YYYY HH:mm'
},
calendar : {
sameDay : '[Today at] LT',
nextDay : '[Tomorrow at] LT',
nextWeek : 'dddd [at] LT',
lastDay : '[Yesterday at] LT',
lastWeek : '[Last] dddd [at] LT',
sameElse : 'L'
},
relativeTime : {
future : 'in %s',
past : '%s ago',
s : 'a few seconds',
m : 'a minute',
mm : '%d minutes',
h : 'an hour',
hh : '%d hours',
d : 'a day',
dd : '%d days',
M : 'a month',
MM : '%d months',
y : 'a year',
yy : '%d years'
},
ordinalParse: /\d{1,2}(st|nd|rd|th)/,
ordinal : function (number) {
var b = number % 10,
output = (~~(number % 100 / 10) === 1) ? 'th' :
(b === 1) ? 'st' :
(b === 2) ? 'nd' :
(b === 3) ? 'rd' : 'th';
return number + output;
},
week : {
dow : 1, // Monday is the first day of the week.
doy : 4 // The week that contains Jan 4th is the first week of the year.
}
});
return en_ie;
}));<|fim▁end|> | //! locale : Irish english (en-ie)
//! author : Chris Cartlidge : https://github.com/chriscartlidge
;(function (global, factory) { |
<|file_name|>quiver.py<|end_file_name|><|fim▁begin|>"""
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import weakref
import numpy as np
from numpy import ma
import matplotlib.collections as mcollections
import matplotlib.transforms as transforms
import matplotlib.text as mtext
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
from matplotlib import docstring
import matplotlib.font_manager as font_manager
import matplotlib.cbook as cbook
from matplotlib.cbook import delete_masked_points
from matplotlib.patches import CirclePolygon
import math
_quiver_doc = """
Plot a 2-D field of arrows.
Call signatures::
quiver(U, V, **kw)
quiver(U, V, C, **kw)
quiver(X, Y, U, V, **kw)
quiver(X, Y, U, V, C, **kw)
*U* and *V* are the arrow data, *X* and *Y* set the location of the
arrows, and *C* sets the color of the arrows. These arguments may be 1-D or
2-D arrays or sequences.
If *X* and *Y* are absent, they will be generated as a uniform grid.
If *U* and *V* are 2-D arrays and *X* and *Y* are 1-D, and if ``len(X)`` and
``len(Y)`` match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
The default settings auto-scales the length of the arrows to a reasonable size.
To change this behavior see the *scale* and *scale_units* kwargs.
The defaults give a slightly swept-back arrow; to make the head a
triangle, make *headaxislength* the same as *headlength*. To make the
arrow more pointed, reduce *headwidth* or increase *headlength* and
*headaxislength*. To make the head smaller relative to the shaft,
scale down all the head parameters. You will probably do best to leave
minshaft alone.
*linewidths* and *edgecolors* can be used to customize the arrow
outlines.
Parameters
----------
X : 1D or 2D array, sequence, optional
The x coordinates of the arrow locations
Y : 1D or 2D array, sequence, optional
The y coordinates of the arrow locations
U : 1D or 2D array or masked array, sequence
The x components of the arrow vectors
V : 1D or 2D array or masked array, sequence
The y components of the arrow vectors
C : 1D or 2D array, sequence, optional
The arrow colors
units : [ 'width' | 'height' | 'dots' | 'inches' | 'x' | 'y' | 'xy' ]
The arrow dimensions (except for *length*) are measured in multiples of
this unit.
'width' or 'height': the width or height of the axis
'dots' or 'inches': pixels or inches, based on the figure dpi
'x', 'y', or 'xy': respectively *X*, *Y*, or :math:`\\sqrt{X^2 + Y^2}`
in data units
The arrows scale differently depending on the units. For
'x' or 'y', the arrows get larger as one zooms in; for other
units, the arrow size is independent of the zoom state. For
'width or 'height', the arrow size increases with the width and
height of the axes, respectively, when the window is resized;
for 'dots' or 'inches', resizing does not change the arrows.
angles : [ 'uv' | 'xy' ], array, optional
Method for determining the angle of the arrows. Default is 'uv'.
'uv': the arrow axis aspect ratio is 1 so that
if *U*==*V* the orientation of the arrow on the plot is 45 degrees
counter-clockwise from the horizontal axis (positive to the right).
'xy': arrows point from (x,y) to (x+u, y+v).
Use this for plotting a gradient field, for example.
Alternatively, arbitrary angles may be specified as an array
of values in degrees, counter-clockwise from the horizontal axis.
Note: inverting a data axis will correspondingly invert the
arrows only with ``angles='xy'``.
scale : None, float, optional
Number of data units per arrow length unit, e.g., m/s per plot width; a
smaller scale parameter makes the arrow longer. Default is *None*.
If *None*, a simple autoscaling algorithm is used, based on the average
vector length and the number of vectors. The arrow length unit is given by
the *scale_units* parameter
scale_units : [ 'width' | 'height' | 'dots' | 'inches' | 'x' | 'y' | 'xy' ], \
None, optional
If the *scale* kwarg is *None*, the arrow length unit. Default is *None*.
e.g. *scale_units* is 'inches', *scale* is 2.0, and
``(u,v) = (1,0)``, then the vector will be 0.5 inches long.
If *scale_units* is 'width'/'height', then the vector will be half the
width/height of the axes.
If *scale_units* is 'x' then the vector will be 0.5 x-axis
units. To plot vectors in the x-y plane, with u and v having
the same units as x and y, use
``angles='xy', scale_units='xy', scale=1``.
width : scalar, optional
Shaft width in arrow units; default depends on choice of units,
above, and number of vectors; a typical starting value is about
0.005 times the width of the plot.
headwidth : scalar, optional
Head width as multiple of shaft width, default is 3
headlength : scalar, optional
Head length as multiple of shaft width, default is 5
headaxislength : scalar, optional
Head length at shaft intersection, default is 4.5
minshaft : scalar, optional
Length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
Default is 1
minlength : scalar, optional
Minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
Default is 1.
pivot : [ 'tail' | 'mid' | 'middle' | 'tip' ], optional
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*.
color : [ color | color sequence ], optional
This is a synonym for the
:class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
If *C* has been set, *color* has no effect.
Notes
-----
Additional :class:`~matplotlib.collections.PolyCollection`
keyword arguments:
%(PolyCollection)s
See Also
--------
quiverkey : Add a key to a quiver plot
""" % docstring.interpd.params
_quiverkey_doc = """
Add a key to a quiver plot.
Call signature::
quiverkey(Q, X, Y, U, label, **kw)
Arguments:
*Q*:
The Quiver instance returned by a call to quiver.
*X*, *Y*:
The location of the key; additional explanation follows.
*U*:
The length of the key
*label*:
A string with the length and units of the key
Keyword arguments:
*angle* = 0
The angle of the key arrow. Measured in degrees anti-clockwise from the
x-axis.
*coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with 0,0 in the lower left and 1,1
in the upper right; 'data' are the axes data coordinates (used for
the locations of the vectors in the quiver plot itself); 'inches'
is position in the figure in inches, with 0,0 at the lower left
corner.
*color*:
overrides face and edge colors from *Q*.
*labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
Position the label above, below, to the right, to the left of the
arrow, respectively.
*labelsep*:
Distance in inches between the arrow and the label. Default is
0.1
*labelcolor*:
defaults to default :class:`~matplotlib.text.Text` color.
*fontproperties*:
A dictionary with keyword arguments accepted by the
:class:`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*
Any additional keyword arguments are used to override vector
properties taken from *Q*.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
tail; in either of these two cases, *X*, *Y* is somewhere in the
middle of the arrow+label key object.
"""
class QuiverKey(martist.Artist):
""" Labelled arrow for use as a quiver plot scale key."""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'middle', 'S': 'middle', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label, **kw):
martist.Artist.__init__(self)
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.angle = kw.pop('angle', 0)
self.coord = kw.pop('coordinates', 'axes')
self.color = kw.pop('color', None)
self.label = label
self._labelsep_inches = kw.pop('labelsep', 0.1)
self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
# try to prevent closure over the real self
weak_self = weakref.ref(self)
def on_dpi_change(fig):
self_weakref = weak_self()
if self_weakref is not None:
self_weakref.labelsep = (self_weakref._labelsep_inches*fig.dpi)
self_weakref._initialized = False # simple brute force update
# works because _init is
# called at the start of
# draw.
self._cid = Q.ax.figure.callbacks.connect('dpi_changed',
on_dpi_change)
self.labelpos = kw.pop('labelpos', 'N')
self.labelcolor = kw.pop('labelcolor', None)
self.fontproperties = kw.pop('fontproperties', dict())
self.kw = kw
_fp = self.fontproperties
# boxprops = dict(facecolor='red')
self.text = mtext.Text(
text=label, # bbox=boxprops,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=font_manager.FontProperties(**_fp))
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._initialized = False
self.zorder = Q.zorder + 0.1
def remove(self):
"""
Overload the remove method
"""
self.Q.ax.figure.callbacks.disconnect(self._cid)
self._cid = None
# pass the remove call up the stack
martist.Artist.remove(self)
__init__.__doc__ = _quiverkey_doc
def _init(self):
if True: # not self._initialized:
if not self.Q._initialized:
self.Q._init()
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
# Hack: save and restore the Umask
_mask = self.Q.Umask
self.Q.Umask = ma.nomask
self.verts = self.Q._make_verts(np.array([self.U]),
np.zeros((1,)),
self.angle)
self.Q.Umask = _mask
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = mcollections.PolyCollection(
self.verts,
offsets=[(self.X, self.Y)],
transOffset=self.get_transform(),
**kw)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self.vector.set_figure(self.get_figure())
self._initialized = True
def _text_x(self, x):
if self.labelpos == 'E':
return x + self.labelsep
elif self.labelpos == 'W':
return x - self.labelsep
else:
return x
def _text_y(self, y):
if self.labelpos == 'N':
return y + self.labelsep
elif self.labelpos == 'S':
return y - self.labelsep
else:
return y
@allow_rasterization
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
x, y = self.get_transform().transform_point((self.X, self.Y))
self.text.set_x(self._text_x(x))
self.text.set_y(self._text_y(y))
self.text.draw(renderer)
self.stale = False
def _set_transform(self):
if self.coord == 'data':
self.set_transform(self.Q.ax.transData)
elif self.coord == 'axes':
self.set_transform(self.Q.ax.transAxes)
elif self.coord == 'figure':
self.set_transform(self.Q.ax.figure.transFigure)
elif self.coord == 'inches':
self.set_transform(self.Q.ax.figure.dpi_scale_trans)
else:
raise ValueError('unrecognized coordinates')
def set_figure(self, fig):
martist.Artist.set_figure(self, fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0] or
self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
quiverkey_doc = _quiverkey_doc
# This is a helper function that parses out the various combination of
# arguments for doing colored vector plots. Pulling it out here
# allows both Quiver and Barbs to use it
def _parse_args(*args):
X, Y, U, V, C = [None] * 5
args = list(args)
# The use of atleast_1d allows for handling scalar arguments while also
# keeping masked arrays
if len(args) == 3 or len(args) == 5:
C = np.atleast_1d(args.pop(-1))
V = np.atleast_1d(args.pop(-1))
U = np.atleast_1d(args.pop(-1))
if U.ndim == 1:
nr, nc = 1, U.shape[0]
else:
nr, nc = U.shape
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def _check_consistent_shapes(*arrays):
all_shapes = set(a.shape for a in arrays)
if len(all_shapes) != 1:
raise ValueError('The shapes of the passed in arrays do not match.')
class Quiver(mcollections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
_PIVOT_VALS = ('tail', 'mid', 'middle', 'tip')
@docstring.Substitution(_quiver_doc)
def __init__(self, ax, *args, **kw):
"""
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s
"""
self.ax = ax
X, Y, U, V, C = _parse_args(*args)
self.X = X
self.Y = Y
self.XY = np.hstack((X[:, np.newaxis], Y[:, np.newaxis]))
self.N = len(X)
self.scale = kw.pop('scale', None)
self.headwidth = kw.pop('headwidth', 3)
self.headlength = float(kw.pop('headlength', 5))
self.headaxislength = kw.pop('headaxislength', 4.5)
self.minshaft = kw.pop('minshaft', 1)
self.minlength = kw.pop('minlength', 1)
self.units = kw.pop('units', 'width')
self.scale_units = kw.pop('scale_units', None)
self.angles = kw.pop('angles', 'uv')
self.width = kw.pop('width', None)
self.color = kw.pop('color', 'k')
pivot = kw.pop('pivot', 'tail').lower()
# validate pivot
if pivot not in self._PIVOT_VALS:
raise ValueError(
'pivot must be one of {keys}, you passed {inp}'.format(
keys=self._PIVOT_VALS, inp=pivot))
# normalize to 'middle'
if pivot == 'mid':
pivot = 'middle'
self.pivot = pivot
self.transform = kw.pop('transform', ax.transData)
kw.setdefault('facecolors', self.color)
kw.setdefault('linewidths', (0,))
mcollections.PolyCollection.__init__(self, [], offsets=self.XY,
transOffset=self.transform,
closed=False,
**kw)
self.polykw = kw
self.set_UVC(U, V, C)
self._initialized = False
self.keyvec = None
self.keytext = None
# try to prevent closure over the real self
weak_self = weakref.ref(self)
def on_dpi_change(fig):
self_weakref = weak_self()
if self_weakref is not None:
self_weakref._new_UV = True # vertices depend on width, span
# which in turn depend on dpi
self_weakref._initialized = False # simple brute force update
# works because _init is
# called at the start of
# draw.
self._cid = self.ax.figure.callbacks.connect('dpi_changed',
on_dpi_change)
def remove(self):
"""
Overload the remove method
"""
# disconnect the call back
self.ax.figure.callbacks.disconnect(self._cid)
self._cid = None
# pass the remove call up the stack
mcollections.PolyCollection.remove(self)
def _init(self):
"""
Initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: # not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point(
(ax.bbox.width, ax.bbox.height))
self.span = sx
if self.width is None:
sn = np.clip(math.sqrt(self.N), 8, 25)
self.width = 0.06 * self.span / sn
# _make_verts sets self.scale if not already specified
if not self._initialized and self.scale is None:
self._make_verts(self.U, self.V, self.angles)
self._initialized = True
def get_datalim(self, transData):
trans = self.get_transform()
transOffset = self.get_offset_transform()
full_transform = (trans - transData) + (transOffset - transData)
XY = full_transform.transform(self.XY)
bbox = transforms.Bbox.null()
bbox.update_from_data_xy(XY, ignore=True)
return bbox
@allow_rasterization
def draw(self, renderer):
self._init()
verts = self._make_verts(self.U, self.V, self.angles)
self.set_verts(verts, closed=False)
self._new_UV = False
mcollections.PolyCollection.draw(self, renderer)
self.stale = False
def set_UVC(self, U, V, C=None):
# We need to ensure we have a copy, not a reference
# to an array that might change before draw().
U = ma.masked_invalid(U, copy=True).ravel()
V = ma.masked_invalid(V, copy=True).ravel()
mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True)
if C is not None:
C = ma.masked_invalid(C, copy=True).ravel()
mask = ma.mask_or(mask, C.mask, copy=False, shrink=True)
if mask is ma.nomask:
C = C.filled()
else:
C = ma.array(C, mask=mask, copy=False)
self.U = U.filled(1)
self.V = V.filled(1)
self.Umask = mask
if C is not None:
self.set_array(C)
self._new_UV = True
self.stale = True
def _dots_per_unit(self, units):
"""
Return a scale factor for converting from units to pixels
"""
ax = self.ax
if units in ('x', 'y', 'xy'):
if units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
elif units == 'y':
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
else: # 'xy' is assumed
dxx0 = ax.viewLim.width
dxx1 = ax.bbox.width
dyy0 = ax.viewLim.height
dyy1 = ax.bbox.height
dx1 = np.hypot(dxx1, dyy1)
dx0 = np.hypot(dxx0, dyy0)
dx = dx1 / dx0
else:
if units == 'width':
dx = ax.bbox.width
elif units == 'height':
dx = ax.bbox.height
elif units == 'dots':
dx = 1.0
elif units == 'inches':
dx = ax.figure.dpi
else:
raise ValueError('unrecognized units')
return dx
def _set_transform(self):
"""
Sets the PolygonCollection transform to go
from arrow width units to pixels.
"""
dx = self._dots_per_unit(self.units)
self._trans_scale = dx # pixels per arrow width unit
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
def _angles_lengths(self, U, V, eps=1):
xy = self.ax.transData.transform(self.XY)
uv = np.hstack((U[:, np.newaxis], V[:, np.newaxis]))
xyp = self.ax.transData.transform(self.XY + eps * uv)
dxy = xyp - xy
angles = np.arctan2(dxy[:, 1], dxy[:, 0])
lengths = np.hypot(*dxy.T) / eps
return angles, lengths
def _make_verts(self, U, V, angles):
uv = (U + V * 1j)
str_angles = angles if isinstance(angles, six.string_types) else ''
if str_angles == 'xy' and self.scale_units == 'xy':
# Here eps is 1 so that if we get U, V by diffing
# the X, Y arrays, the vectors will connect the
# points, regardless of the axis scaling (including log).
angles, lengths = self._angles_lengths(U, V, eps=1)
elif str_angles == 'xy' or self.scale_units == 'xy':
# Calculate eps based on the extents of the plot
# so that we don't end up with roundoff error from
# adding a small number to a large.
eps = np.abs(self.ax.dataLim.extents).max() * 0.001
angles, lengths = self._angles_lengths(U, V, eps=eps)
if str_angles and self.scale_units == 'xy':
a = lengths
else:
a = np.abs(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
if self.Umask is not ma.nomask:
amean = a[~self.Umask].mean()
else:
amean = a.mean()
# crude auto-scaling
# scale is typical arrow length as a multiple of the arrow width
scale = 1.8 * amean * sn / self.span
if self.scale_units is None:
if self.scale is None:
self.scale = scale
widthu_per_lenu = 1.0
else:
if self.scale_units == 'xy':
dx = 1
else:
dx = self._dots_per_unit(self.scale_units)
widthu_per_lenu = dx / self._trans_scale
if self.scale is None:
self.scale = scale * widthu_per_lenu
length = a * (widthu_per_lenu / (self.scale * self.width))
X, Y = self._h_arrows(length)
if str_angles == 'xy':
theta = angles
elif str_angles == 'uv':
theta = np.angle(uv)
else:
theta = ma.masked_invalid(np.deg2rad(angles)).filled(0)
theta = theta.reshape((-1, 1)) # for broadcasting
xy = (X + Y * 1j) * np.exp(1j * theta) * self.width
xy = xy[:, :, np.newaxis]<|fim▁hole|> if self.Umask is not ma.nomask:
XY = ma.array(XY)
XY[self.Umask] = ma.masked
# This might be handled more efficiently with nans, given
# that nans will end up in the paths anyway.
return XY
def _h_arrows(self, length):
""" length is in arrow width units """
# It might be possible to streamline the code
# and speed it up a bit by using complex (x,y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# This number is chosen based on when pixel values overflow in Agg
# causing rendering errors
# length = np.minimum(length, 2 ** 16)
np.clip(length, 0, 2 ** 16, out=length)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0],
np.float64)
x = x + np.array([0, 1, 1, 1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis, :], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh - self.headaxislength,
minsh - self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0, 1, 2, 3, 2, 1, 0, 0]
X = x.take(ii, 1)
Y = y.take(ii, 1)
Y[:, 3:-1] *= -1
X0 = x0.take(ii)
Y0 = y0.take(ii)
Y0[3:-1] *= -1
shrink = length / minsh if minsh != 0. else 0.
X0 = shrink * X0[np.newaxis, :]
Y0 = shrink * Y0[np.newaxis, :]
short = np.repeat(length < minsh, 8, axis=1)
# Now select X0, Y0 if short, otherwise X, Y
np.copyto(X, X0, where=short)
np.copyto(Y, Y0, where=short)
if self.pivot == 'middle':
X -= 0.5 * X[:, 3, np.newaxis]
elif self.pivot == 'tip':
X = X - X[:, 3, np.newaxis] # numpy bug? using -= does not
# work here unless we multiply
# by a float first, as with 'mid'.
elif self.pivot != 'tail':
raise ValueError(("Quiver.pivot must have value in {{'middle', "
"'tip', 'tail'}} not {0}").format(self.pivot))
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0, 8, 1, np.float64) * (np.pi / 3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = np.repeat(tooshort, 8, 1)
np.copyto(X, X1, where=tooshort)
np.copyto(Y, Y1, where=tooshort)
# Mask handling is deferred to the caller, _make_verts.
return X, Y
quiver_doc = _quiver_doc
_barbs_doc = r"""
Plot a 2-D field of barbs.
Call signatures::
barb(U, V, **kw)
barb(U, V, C, **kw)
barb(X, Y, U, V, **kw)
barb(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the barb locations
(default is head of barb; see *pivot* kwarg)
*U*, *V*:
Give the x and y components of the barb shaft
*C*:
An optional array used to map colors to the barbs
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if ``len(X)`` and ``len(Y)``
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*length*:
Length of the barb in points; the other parts of the barb
are scaled against this.
Default is 7.
*pivot*: [ 'tip' | 'middle' | float ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*. Default is 'tip'. Can
also be a number, which shifts the start of the barb that many
points from the origin.
*barbcolor*: [ color | color sequence ]
Specifies the color all parts of the barb except any flags. This
parameter is analagous to the *edgecolor* parameter for polygons,
which can be used instead. However this parameter will override
facecolor.
*flagcolor*: [ color | color sequence ]
Specifies the color of any flags on the barb. This parameter is
analagous to the *facecolor* parameter for polygons, which can be
used instead. However this parameter will override facecolor. If
this is not set (and *C* has not either) then *flagcolor* will be
set to match *barbcolor* so that the barb has a uniform color. If
*C* has been set, *flagcolor* has no effect.
*sizes*:
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or
full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
*fill_empty*:
A flag on whether the empty barbs (circles) that are drawn should
be filled with the flag color. If they are not filled, they will
be drawn such that no color is applied to the center. Default is
False
*rounding*:
A flag to indicate whether the vector magnitude should be rounded
when allocating barb components. If True, the magnitude is
rounded to the nearest multiple of the half-barb increment. If
False, the magnitude is simply truncated to the next lowest
multiple. Default is True
*barb_increments*:
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
*flip_barb*:
Either a single boolean flag or an array of booleans. Single
boolean indicates whether the lines and flags should point
opposite to normal for all barbs. An array (which should be the
same size as the other data arrays) indicates whether to flip for
each individual barb. Normal behavior is for the barbs and lines
to point right (comes from wind barbs having these features point
towards low pressure in the Northern Hemisphere.) Default is
False
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \\
: / \ \\
: / \ \ \\
: / \ \ \\
: ------------------------------
.. note the double \\ at the end of each line to make the figure
.. render correctly
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
linewidths and edgecolors can be used to customize the barb.
Additional :class:`~matplotlib.collections.PolyCollection` keyword
arguments:
%(PolyCollection)s
""" % docstring.interpd.params
docstring.interpd.update(barbs_doc=_barbs_doc)
class Barbs(mcollections.PolyCollection):
'''
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
'''
# This may be an abuse of polygons here to render what is essentially maybe
# 1 triangle and a series of lines. It works fine as far as I can tell
# however.
@docstring.interpd
def __init__(self, ax, *args, **kw):
"""
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%(barbs_doc)s
"""
self._pivot = kw.pop('pivot', 'tip')
self._length = kw.pop('length', 7)
barbcolor = kw.pop('barbcolor', None)
flagcolor = kw.pop('flagcolor', None)
self.sizes = kw.pop('sizes', dict())
self.fill_empty = kw.pop('fill_empty', False)
self.barb_increments = kw.pop('barb_increments', dict())
self.rounding = kw.pop('rounding', True)
self.flip = kw.pop('flip_barb', False)
transform = kw.pop('transform', ax.transData)
# Flagcolor and barbcolor provide convenience parameters for
# setting the facecolor and edgecolor, respectively, of the barb
# polygon. We also work here to make the flag the same color as the
# rest of the barb by default
if None in (barbcolor, flagcolor):
kw['edgecolors'] = 'face'
if flagcolor:
kw['facecolors'] = flagcolor
elif barbcolor:
kw['facecolors'] = barbcolor
else:
# Set to facecolor passed in or default to black
kw.setdefault('facecolors', 'k')
else:
kw['edgecolors'] = barbcolor
kw['facecolors'] = flagcolor
# Explicitly set a line width if we're not given one, otherwise
# polygons are not outlined and we get no barbs
if 'linewidth' not in kw and 'lw' not in kw:
kw['linewidth'] = 1
# Parse out the data arrays from the various configurations supported
x, y, u, v, c = _parse_args(*args)
self.x = x
self.y = y
xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
# Make a collection
barb_size = self._length ** 2 / 4 # Empirically determined
mcollections.PolyCollection.__init__(self, [], (barb_size,),
offsets=xy,
transOffset=transform, **kw)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
'''
Find how many of each of the tail pieces is necessary. Flag
specifies the increment for a flag, barb for a full barb, and half for
half a barb. Mag should be the magnitude of a vector (i.e., >= 0).
This returns a tuple of:
(*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
*half_flag* is a boolean whether half of a barb is needed,
since there should only ever be one half on a given
barb. *empty_flag* flag is an array of flags to easily tell if
a barb is empty (too low to plot any barbs/flags.
'''
# If rounding, round to the nearest multiple of half, the smallest
# increment
if rounding:
mag = half * (mag / half + 0.5).astype(int)
num_flags = np.floor(mag / flag).astype(int)
mag = np.mod(mag, flag)
num_barb = np.floor(mag / full).astype(int)
mag = np.mod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
return num_flags, num_barb, half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
'''
This function actually creates the wind barbs. *u* and *v*
are components of the vector in the *x* and *y* directions,
respectively.
*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
*respectively, the number of flags, number of barbs, flag for
*half a barb, and flag for empty barb, ostensibly obtained
*from :meth:`_find_tails`.
*length* is the length of the barb staff in points.
*pivot* specifies the point on the barb around which the
entire barb should be rotated. Right now, valid options are
'tip' and 'middle'. Can also be a number, which shifts the start
of the barb that many points from the origin.
*sizes* is a dictionary of coefficients specifying the ratio
of a given feature to the length of the barb. These features
include:
- *spacing*: space between features (flags, full/half
barbs)
- *height*: distance from shaft of top of a flag or full
barb
- *width* - width of a flag, twice the width of a full barb
- *emptybarb* - radius of the circle used for low
magnitudes
*fill_empty* specifies whether the circle representing an
empty barb should be filled or not (this changes the drawing
of the polygon).
*flip* is a flag indicating whether the features should be flipped to
the other side of the barb (useful for winds in the southern
hemisphere).
This function returns list of arrays of vertices, defining a polygon
for each of the wind barbs. These polygons have been rotated to
properly align with the vector direction.
'''
# These control the spacing and size of barb elements relative to the
# length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
# Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length / 2.)
# Check for flip
if flip:
full_height = -full_height
endx = 0.0
try:
endy = float(pivot)
except ValueError:
endy = pivot_points[pivot.lower()]
# Get the appropriate angle for the vector components. The offset is
# due to the way the barb is initially drawn, going down the y-axis.
# This makes sense in a meteorological mode of thinking since there 0
# degrees corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi / 2)
# Used for low magnitude. We just get the vertices, so if we make it
# out here, it can be reused. The center set here should put the
# center of the circle at the location(offset), rather than at the
# same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0, 0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
# If we don't want the empty one filled, we make a degenerate
# polygon that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
# If the vector magnitude is too weak to draw anything, plot an
# empty circle instead
if empty_flag[index]:
# We can skip the transform since the circle has no preferred
# orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
# Add vertices for each flag
for i in range(nflags[index]):
# The spacing that works for the barbs is a little to much for
# the flags, but this only occurs when we have more than 1
# flag.
if offset != length:
offset += spacing / 2.
poly_verts.extend(
[[endx, endy + offset],
[endx + full_height, endy - full_width / 2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
# Add vertices for each barb. These really are lines, but works
# great adding 3 vertices that basically pull the polygon out and
# back down the line
for i in range(nbarbs[index]):
poly_verts.extend(
[(endx, endy + offset),
(endx + full_height, endy + offset + full_width / 2),
(endx, endy + offset)])
offset -= spacing
# Add the vertices for half a barb, if needed
if half_barb[index]:
# If the half barb is the first on the staff, traditionally it
# is offset from the end to make it easy to distinguish from a
# barb with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend(
[(endx, endy + offset),
(endx + full_height / 2, endy + offset + full_width / 4),
(endx, endy + offset)])
# Rotate the barb according the angle. Making the barb first and
# then rotating it made the math for drawing the barb really easy.
# Also, the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
def set_UVC(self, U, V, C=None):
self.u = ma.masked_invalid(U, copy=False).ravel()
self.v = ma.masked_invalid(V, copy=False).ravel()
if C is not None:
c = ma.masked_invalid(C, copy=False).ravel()
x, y, u, v, c = delete_masked_points(self.x.ravel(),
self.y.ravel(),
self.u, self.v, c)
_check_consistent_shapes(x, y, u, v, c)
else:
x, y, u, v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
_check_consistent_shapes(x, y, u, v)
magnitude = np.hypot(u, v)
flags, barbs, halves, empty = self._find_tails(magnitude,
self.rounding,
**self.barb_increments)
# Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes,
self.fill_empty, self.flip)
self.set_verts(plot_barbs)
# Set the color array
if C is not None:
self.set_array(c)
# Update the offsets in case the masked data changed
xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
self._offsets = xy
self.stale = True
def set_offsets(self, xy):
"""
Set the offsets for the barb polygons. This saves the offsets passed
in and actually sets version masked as appropriate for the existing
U/V data. *offsets* should be a sequence.
ACCEPTS: sequence of pairs of floats
"""
self.x = xy[:, 0]
self.y = xy[:, 1]
x, y, u, v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
_check_consistent_shapes(x, y, u, v)
xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
mcollections.PolyCollection.set_offsets(self, xy)
self.stale = True
set_offsets.__doc__ = mcollections.PolyCollection.set_offsets.__doc__
barbs_doc = _barbs_doc<|fim▁end|> | XY = np.concatenate((xy.real, xy.imag), axis=2) |
<|file_name|>script.js<|end_file_name|><|fim▁begin|>require([
"gaslib/Node"
], function(Node){
QUnit.test( "Node - initialize", function( assert ) {
var node = new Node();
<|fim▁hole|><|fim▁end|> | assert.equal( node.getCaption(), "Node", "Nieprawidłowa nazwa węzła" );
});
}); |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# This file is part of translate.
#
# translate is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# translate is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# translate. If not, see <http://www.gnu.org/licenses/>.
"""
translate.client.exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~
These are exception classes that are used by translate.client.Client. Most of
these classes are simple wrappers, just to differentiate different types of
errors. They can be constructed from a requests response object, or JSON
,returned from an API call.
"""
import json
import logging
log = logging.getLogger(__name__)
<|fim▁hole|>class TranslateException(Exception):
"""Mostly empty base class for exceptions relating to translate.
This class is used as a catch-all for exceptions thrown by the server. If
possible, a more specific subclass of this exception will be used.
"""
@classmethod
def from_json(cls, obj, status_code=400):
"""Return the proper exception class from the JSON object returned from
the server.
"""
exceptions = {
429: RateLimitException,
431: SizeLimitException,
452: TranslationException,
453: TranslatorException,
454: BadLanguagePairException
}
try:
code = obj['code'] if ('code' in obj) else status_code
klass = exceptions[code]
return klass.from_json(obj)
except KeyError:
return cls("Unknown error occured: " + repr(obj))
@classmethod
def from_response(cls, resp):
"""Generate a proper exception from the given requests response object
and return it.
"""
try:
obj = json.loads(resp.text)
return TranslateException.from_json(obj, resp.status_code)
except ValueError:
log.error("Was given invalid JSON, bailing...")
return TranslateException.from_json({}, resp.status_code)
class HTTPException(TranslateException):
"""Raised when an error occurs with the HTTP connection to the server
(e.g. host is not available, doesn't respond, etc.)
"""
pass
class RateLimitException(TranslateException):
"""Exception raised when a client goes over the ratelimit."""
def __init__(self, limit, per, reset):
self.limit = limit
self.per = per
self.reset = reset
@classmethod
def from_json(cls, obj):
try:
details = obj.get('details', {})
return cls(limit=details['limit'], per=details['per'],
reset=details['reset'])
except KeyError:
log.error("Received invalid JSON: " + repr(obj))
return cls(limit=0, per=0, reset=0)
def __str__(self):
return "Rate limit exceeded: {0} reqs / {1}s. Try again at {2}".format(
self.limit, self.per, self.reset)
class SizeLimitException(TranslateException):
"""Exception raised when a client tries to translate a text that is over
the server's size limit.
"""
def __init__(self, len, limit):
self.len = len
self.limit = limit
@classmethod
def from_json(cls, obj):
try:
details = obj['details']
return cls(len=details['len'], limit=details['limit'])
except KeyError:
log.error("Received invalid JSON: %s", repr(obj))
return cls(len=0, limit=0)
def __str__(self):
return "Specified text was too large: %d bytes. Maximum is %d bytes"\
.format(self.len, self.limit)
class TranslationException(TranslateException):
"""Returned on bad parameters to /translate"""
@classmethod
def from_json(cls, obj):
try:
msg = obj['message']
return cls("Bad parameters to translate API method: " + msg)
except KeyError:
log.error("Received invalid JSON: " + repr(obj))
return cls("Bad parameters to translate API method.")
class TranslatorException(TranslateException):
"""Returned when bad parameters are passed to the /translate method. (This
probably indicates some kind of API / Client bug.)
"""
def __init__(self, lang_pair, tried):
self.lang_pair = lang_pair
self.tried = tried
@classmethod
def from_json(cls, obj):
try:
details = obj['details']
pair = (details['from'], details['to'])
return cls(lang_pair=pair, tried=details['tried'])
except KeyError:
log.error("Received invalid JSON: " + repr(obj))
return cls(lang_pair=('unknown', 'unknown'), tried=['unknown'])
def __str__(self):
return "Failed to translate {0} (tried: {1})".format(self.lang_pair,
self.tried)
class BadLanguagePairException(TranslateException):
"""Raised when the client tried to translate using a language pair not
supported by the server
"""
def __init__(self, lang_pair):
self.lang_pair = lang_pair
@classmethod
def from_json(cls, obj):
try:
details = obj['details']
return cls(lang_pair=(details['from'], details['to']))
except KeyError:
log.error("Received invalid JSON: " + repr(obj))
return cls(lang_pair=('unknown', 'unknown'))
def __str__(self):
return "Unsupported language pair: {0}".format(self.lang_pair)<|fim▁end|> | |
<|file_name|>main.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Copyright (c) 2011-2013 The PPCoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "checkpoints.h"
#include "db.h"
#include "net.h"
#include "init.h"
#include "ui_interface.h"
#include "kernel.h"
#include "main.h"
#include <boost/algorithm/string/replace.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <cstdlib>
#include "GetNextTargetRequired.h"
#include "GetProofOfStakeReward.h"
#include "GetProofOfWorkReward.h"
using namespace std;
using namespace boost;
//
// Global state
//
CCriticalSection cs_setpwalletRegistered;
set<CWallet*> setpwalletRegistered;
CCriticalSection cs_main;
CTxMemPool mempool;
unsigned int nTransactionsUpdated = 0;
map<uint256, CBlockIndex*> mapBlockIndex;
set<pair<COutPoint, unsigned int> > setStakeSeen;
CBlockIndex* pindexGenesisBlock = NULL;
int nBestHeight = -1;
CBigNum bnBestChainTrust = 0;
CBigNum bnBestInvalidTrust = 0;
uint256 hashBestChain = 0;
CBlockIndex* pindexBest = NULL;
int64 nTimeBestReceived = 0;
CMedianFilter<int> cPeerBlockCounts(5, 0); // Amount of blocks that other nodes claim to have
map<uint256, CBlock*> mapOrphanBlocks;
multimap<uint256, CBlock*> mapOrphanBlocksByPrev;
set<pair<COutPoint, unsigned int> > setStakeSeenOrphan;
map<uint256, uint256> mapProofOfStake;
map<uint256, CDataStream*> mapOrphanTransactions;
map<uint256, map<uint256, CDataStream*> > mapOrphanTransactionsByPrev;
// Constant stuff for coinbase transactions we create:
CScript COINBASE_FLAGS;
const string strMessageMagic = COIN_NAME " Signed Message:\n";
double dHashesPerSec;
int64 nHPSTimerStart;
// Settings
int64 nTransactionFee = MIN_TX_FEES;
//////////////////////////////////////////////////////////////////////////////
//
// dispatching functions
//
// These functions dispatch to one or all registered wallets
void RegisterWallet(CWallet* pwalletIn)
{
{
LOCK(cs_setpwalletRegistered);
setpwalletRegistered.insert(pwalletIn);
}
}
void UnregisterWallet(CWallet* pwalletIn)
{
{
LOCK(cs_setpwalletRegistered);
setpwalletRegistered.erase(pwalletIn);
}
}
// check whether the passed transaction is from us
bool static IsFromMe(CTransaction& tx)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
if (pwallet->IsFromMe(tx))
return true;
return false;
}
// get the wallet transaction with the given hash (if it exists)
bool static GetTransaction(const uint256& hashTx, CWalletTx& wtx)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
if (pwallet->GetTransaction(hashTx,wtx))
return true;
return false;
}
// erases transaction with the given hash from all wallets
void static EraseFromWallets(uint256 hash)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->EraseFromWallet(hash);
}
// make sure all wallets know about the given transaction, in the given block
void SyncWithWallets(const CTransaction& tx, const CBlock* pblock, bool fUpdate, bool fConnect)
{
if (!fConnect)
{
// ppcoin: wallets need to refund inputs when disconnecting coinstake
if (tx.IsCoinStake())
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
if (pwallet->IsFromMe(tx))
pwallet->DisableTransaction(tx);
}
return;
}
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->AddToWalletIfInvolvingMe(tx, pblock, fUpdate);
}
// notify wallets about a new best chain
void static SetBestChain(const CBlockLocator& loc)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->SetBestChain(loc);
}
// notify wallets about an updated transaction
void static UpdatedTransaction(const uint256& hashTx)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->UpdatedTransaction(hashTx);
}
// dump all wallets
void static PrintWallets(const CBlock& block)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->PrintWallet(block);
}
// notify wallets about an incoming inventory (for request counts)
void static Inventory(const uint256& hash)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->Inventory(hash);
}
// ask wallets to resend their transactions
void static ResendWalletTransactions()
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->ResendWalletTransactions();
}
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
//
bool AddOrphanTx(const CDataStream& vMsg)
{
CTransaction tx;
CDataStream(vMsg) >> tx;
uint256 hash = tx.GetHash();
if (mapOrphanTransactions.count(hash))
return false;
CDataStream* pvMsg = new CDataStream(vMsg);
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 10,000 orphans, each of which is at most 5,000 bytes big is
// at most 500 megabytes of orphans:
if (pvMsg->size() > 5000)
{
printf("ignoring large orphan tx (size: %u, hash: %s)\n", pvMsg->size(), hash.ToString().substr(0,10).c_str());
delete pvMsg;
return false;
}
mapOrphanTransactions[hash] = pvMsg;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapOrphanTransactionsByPrev[txin.prevout.hash].insert(make_pair(hash, pvMsg));
printf("stored orphan tx %s (mapsz %u)\n", hash.ToString().substr(0,10).c_str(),
mapOrphanTransactions.size());
return true;
}
void static EraseOrphanTx(uint256 hash)
{
if (!mapOrphanTransactions.count(hash))
return;
const CDataStream* pvMsg = mapOrphanTransactions[hash];
CTransaction tx;
CDataStream(*pvMsg) >> tx;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
mapOrphanTransactionsByPrev[txin.prevout.hash].erase(hash);
if (mapOrphanTransactionsByPrev[txin.prevout.hash].empty())
mapOrphanTransactionsByPrev.erase(txin.prevout.hash);
}
delete pvMsg;
mapOrphanTransactions.erase(hash);
}
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
{
unsigned int nEvicted = 0;
while (mapOrphanTransactions.size() > nMaxOrphans)
{
// Evict a random orphan:
uint256 randomhash = GetRandHash();
map<uint256, CDataStream*>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
if (it == mapOrphanTransactions.end())
it = mapOrphanTransactions.begin();
EraseOrphanTx(it->first);
++nEvicted;
}
return nEvicted;
}
//////////////////////////////////////////////////////////////////////////////
//
// CTransaction and CTxIndex
//
bool CTransaction::ReadFromDisk(CTxDB& txdb, const uint256& hash, CTxIndex& txindexRet)
{
SetNull();
if (!txdb.ReadTxIndex(hash, txindexRet))
return false;
if (!ReadFromDisk(txindexRet.pos))
return false;
return true;
}
bool CTransaction::ReadFromDisk(CTxDB& txdb, COutPoint prevout, CTxIndex& txindexRet)
{
if (!ReadFromDisk(txdb, prevout.hash, txindexRet))
return false;
if (prevout.n >= vout.size())
{
SetNull();
return false;
}
return true;
}
bool CTransaction::ReadFromDisk(CTxDB& txdb, COutPoint prevout)
{
CTxIndex txindex;
return ReadFromDisk(txdb, prevout, txindex);
}
bool CTransaction::ReadFromDisk(COutPoint prevout)
{
CTxDB txdb("r");
CTxIndex txindex;
return ReadFromDisk(txdb, prevout, txindex);
}
bool CTransaction::IsStandard() const
{
BOOST_FOREACH(const CTxIn& txin, vin)
{
// Biggest 'standard' txin is a 3-signature 3-of-3 CHECKMULTISIG
// pay-to-script-hash, which is 3 ~80-byte signatures, 3
// ~65-byte public keys, plus a few script ops.
if (txin.scriptSig.size() > 500)
return false;
if (!txin.scriptSig.IsPushOnly())
return false;
}
unsigned int nDataOut = 0;
txnouttype whichType;
BOOST_FOREACH(const CTxOut& txout, vout) {
if (!::IsStandard(txout.scriptPubKey, whichType)) {
return false;
}
if (whichType == TX_NULL_DATA)
nDataOut++;
}
// only one OP_RETURN txout is permitted
if (nDataOut > 1) {
return false;
}
return true;
}
//
// Check transaction inputs, and make sure any
// pay-to-script-hash transactions are evaluating IsStandard scripts
//
// Why bother? To avoid denial-of-service attacks; an attacker
// can submit a standard HASH... OP_EQUAL transaction,
// which will get accepted into blocks. The redemption
// script can be anything; an attacker could use a very
// expensive-to-check-upon-redemption script like:
// DUP CHECKSIG DROP ... repeated 100 times... OP_1
//
bool CTransaction::AreInputsStandard(const MapPrevTx& mapInputs) const
{
if (IsCoinBase())
return true; // Coinbases don't use vin normally
for (unsigned int i = 0; i < vin.size(); i++)
{
const CTxOut& prev = GetOutputFor(vin[i], mapInputs);
vector<vector<unsigned char> > vSolutions;
txnouttype whichType;
// get the scriptPubKey corresponding to this input:
const CScript& prevScript = prev.scriptPubKey;
if (!Solver(prevScript, whichType, vSolutions))
return false;
int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions);
if (nArgsExpected < 0)
return false;
// Transactions with extra stuff in their scriptSigs are
// non-standard. Note that this EvalScript() call will
// be quick, because if there are any operations
// beside "push data" in the scriptSig the
// IsStandard() call returns false
vector<vector<unsigned char> > stack;
if (!EvalScript(stack, vin[i].scriptSig, *this, i, 0))
return false;
if (whichType == TX_SCRIPTHASH)
{
if (stack.empty())
return false;
CScript subscript(stack.back().begin(), stack.back().end());
vector<vector<unsigned char> > vSolutions2;
txnouttype whichType2;
if (!Solver(subscript, whichType2, vSolutions2))
return false;
if (whichType2 == TX_SCRIPTHASH)
return false;
int tmpExpected;
tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2);
if (tmpExpected < 0)
return false;
nArgsExpected += tmpExpected;
}
if (stack.size() != (unsigned int)nArgsExpected)
return false;
}
return true;
}
unsigned int
CTransaction::GetLegacySigOpCount() const
{
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTxIn& txin, vin)
{
nSigOps += txin.scriptSig.GetSigOpCount(false);
}
BOOST_FOREACH(const CTxOut& txout, vout)
{
nSigOps += txout.scriptPubKey.GetSigOpCount(false);
}
return nSigOps;
}
int CMerkleTx::SetMerkleBranch(const CBlock* pblock)
{
if (fClient)
{
if (hashBlock == 0)
return 0;
}
else
{
CBlock blockTmp;
if (pblock == NULL)
{
// Load the block this tx is in
CTxIndex txindex;
if (!CTxDB("r").ReadTxIndex(GetHash(), txindex))
return 0;
if (!blockTmp.ReadFromDisk(txindex.pos.nFile, txindex.pos.nBlockPos))
return 0;
pblock = &blockTmp;
}
// Update the tx's hashBlock
hashBlock = pblock->GetHash();
// Locate the transaction
for (nIndex = 0; nIndex < (int)pblock->vtx.size(); nIndex++)
if (pblock->vtx[nIndex] == *(CTransaction*)this)
break;
if (nIndex == (int)pblock->vtx.size())
{
vMerkleBranch.clear();
nIndex = -1;
printf("ERROR: SetMerkleBranch() : couldn't find tx in block\n");
return 0;
}
// Fill in merkle branch
vMerkleBranch = pblock->GetMerkleBranch(nIndex);
}
// Is the tx in a block that's in the main chain
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
if (!pindex || !pindex->IsInMainChain())
return 0;
return pindexBest->nHeight - pindex->nHeight + 1;
}
bool CTransaction::IsRestrictedCoinStake() const
{
if (!IsCoinStake())
return false;
int64 nValueIn = 0;
CScript onlyAllowedScript;
for (unsigned int i = 0; i < vin.size(); ++i)
{
const COutPoint& prevout = vin[i].prevout;
CTxDB txdb("r");
CTransaction txPrev;
CTxIndex txindex;
if (!txPrev.ReadFromDisk(txdb, prevout, txindex))
return false;
txdb.Close();
const CTxOut& prevtxo = txPrev.vout[prevout.n];
const CScript& prevScript = prevtxo.scriptPubKey;
if (i == 0)
{
onlyAllowedScript = prevScript;
if (onlyAllowedScript.empty())
{
return false;
}
}
else
{
if (prevScript != onlyAllowedScript)
{
return false;
}
}
nValueIn += prevtxo.nValue;
}
int64 nValueOut = 0;
for (unsigned int i = 1; i < vout.size(); ++i)
{
const CTxOut& txo = vout[i];
if (txo.nValue == 0)
continue ;
if (txo.scriptPubKey != onlyAllowedScript)
return false;
nValueOut += txo.nValue;
}
if (nValueOut < nValueIn)
return false;
return true;
}
bool CTransaction::CheckTransaction() const
{
// Basic checks that don't depend on any context
if (vin.empty())
return DoS(10, error("CTransaction::CheckTransaction() : vin empty"));
if (vout.empty())
return DoS(10, error("CTransaction::CheckTransaction() : vout empty"));
// Time (prevent mempool memory exhaustion attack)
if (nTime > GetAdjustedTime() + MAX_CLOCK_DRIFT)
return DoS(10, error("CTransaction::CheckTransaction() : timestamp is too far into the future"));
// Size limits
if (::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return DoS(100, error("CTransaction::CheckTransaction() : size limits failed"));
// Check for negative or overflow output values
int64 nValueOut = 0;
for (size_t i = 0; i < vout.size(); i++)
{
const CTxOut& txout = vout[i];
if (txout.IsEmpty() && (!IsCoinBase()) && (!IsCoinStake()))
return DoS(100, error("CTransaction::CheckTransaction() : txout empty for user transaction"));
// ppcoin: enforce minimum output amount
if ((!txout.IsEmpty()) && txout.nValue < MIN_TXOUT_AMOUNT)
return DoS(100, error("CTransaction::CheckTransaction() : txout.nValue below minimum (%d)", txout.nValue));
if (txout.nValue > MAX_MONEY_STACK)
return DoS(100, error("CTransaction::CheckTransaction() : txout.nValue too high (%d)", txout.nValue));
nValueOut += txout.nValue;
if (!IsValidAmount(nValueOut))
return DoS(100, error("CTransaction::CheckTransaction() : txout total out of range"));
}
// Check for duplicate inputs
set<COutPoint> vInOutPoints;
BOOST_FOREACH(const CTxIn& txin, vin)
{
if (vInOutPoints.count(txin.prevout))
return false;
vInOutPoints.insert(txin.prevout);
}
if (IsCoinBase())
{
if (vin[0].scriptSig.size() < 2 || vin[0].scriptSig.size() > 100)
return DoS(100, error("CTransaction::CheckTransaction() : coinbase script size"));
}
else
{
BOOST_FOREACH(const CTxIn& txin, vin)
if (txin.prevout.IsNull())
return DoS(10, error("CTransaction::CheckTransaction() : prevout is null"));
}
return true;
}
bool CTxMemPool::accept(CTxDB& txdb, CTransaction &tx, bool fCheckInputs,
bool* pfMissingInputs)
{
if (pfMissingInputs)
*pfMissingInputs = false;
if (!tx.CheckTransaction())
return error("CTxMemPool::accept() : CheckTransaction failed");
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
return tx.DoS(100, error("CTxMemPool::accept() : coinbase as individual tx"));
// ppcoin: coinstake is also only valid in a block, not as a loose transaction
if (tx.IsCoinStake())
return tx.DoS(100, error("CTxMemPool::accept() : coinstake as individual tx"));
// To help v0.1.5 clients who would see it as a negative number
if ((int64)tx.nLockTime > std::numeric_limits<int>::max())
return error("CTxMemPool::accept() : not accepting nLockTime beyond 2038 yet");
// Rather not work on nonstandard transactions
if (!tx.IsStandard())
return error("CTxMemPool::accept() : nonstandard transaction type");
// Do we already have it?
uint256 hash = tx.GetHash();
{
LOCK(cs);
if (mapTx.count(hash))
return false;
}
if (fCheckInputs)
if (txdb.ContainsTx(hash))
return false;
// Check for conflicts with in-memory transactions
CTransaction* ptxOld = NULL;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
COutPoint outpoint = tx.vin[i].prevout;
if (mapNextTx.count(outpoint))
{
// Disable replacement feature for now
return false;
// Allow replacing with a newer version of the same transaction
if (i != 0)
return false;
ptxOld = mapNextTx[outpoint].ptx;
if (ptxOld->IsFinal())
return false;
if (!tx.IsNewerThan(*ptxOld))
return false;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
COutPoint outpoint = tx.vin[i].prevout;
if (!mapNextTx.count(outpoint) || mapNextTx[outpoint].ptx != ptxOld)
return false;
}
break;
}
}
if (fCheckInputs)
{
MapPrevTx mapInputs;
map<uint256, CTxIndex> mapUnused;
bool fInvalid = false;
if (!tx.FetchInputs(txdb, mapUnused, false, false, mapInputs, fInvalid))
{
if (fInvalid)
return error("CTxMemPool::accept() : FetchInputs found invalid tx %s", hash.ToString().substr(0,10).c_str());
if (pfMissingInputs)
*pfMissingInputs = true;
return error("CTxMemPool::accept() : FetchInputs failed %s", hash.ToString().substr(0,10).c_str());
}
// Check for non-standard pay-to-script-hash in inputs
if (!tx.AreInputsStandard(mapInputs))
return error("CTxMemPool::accept() : nonstandard transaction input");
// Note: if you modify this code to accept non-standard transactions, then
// you should add code here to check that the transaction does a<|fim▁hole|> // reasonable number of ECDSA signature verifications.
int64 nFees = tx.GetValueIn(mapInputs)-tx.GetValueOut();
unsigned int nSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
// Don't accept it if it can't get into a block
if (nFees < tx.GetMinFee(1000, false, GMF_RELAY))
return error("CTxMemPool::accept() : not enough fees");
// Continuously rate-limit free transactions
// This mitigates 'penny-flooding' -- sending thousands of free transactions just to
// be annoying or make other's transactions take longer to confirm.
if (nFees < MIN_RELAY_TX_FEES)
{
static CCriticalSection cs;
static double dFreeCount;
static int64 nLastTime;
int64 nNow = GetTime();
{
LOCK(cs);
// Use an exponentially decaying ~10-minute window:
dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime));
nLastTime = nNow;
// -limitfreerelay unit is thousand-bytes-per-minute
// At default rate it would take over a month to fill 1GB
if (dFreeCount > GetArg("-limitfreerelay", 15)*10*1000 && !IsFromMe(tx))
return error("CTxMemPool::accept() : free transaction rejected by rate limiter");
if (fDebug)
printf("Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize);
dFreeCount += nSize;
}
}
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
if (!tx.ConnectInputs(txdb, mapInputs, mapUnused, CDiskTxPos(1,1,1), pindexBest, false, false))
{
return error("CTxMemPool::accept() : ConnectInputs failed %s", hash.ToString().substr(0,10).c_str());
}
}
// Store transaction in memory
{
LOCK(cs);
if (ptxOld)
{
printf("CTxMemPool::accept() : replacing tx %s with new version\n", ptxOld->GetHash().ToString().c_str());
remove(*ptxOld);
}
addUnchecked(tx);
}
///// are we sure this is ok when loading transactions or restoring block txes
// If updated, erase old tx from wallet
if (ptxOld)
EraseFromWallets(ptxOld->GetHash());
printf("CTxMemPool::accept() : accepted %s\n", hash.ToString().substr(0,10).c_str());
return true;
}
bool CTransaction::AcceptToMemoryPool(CTxDB& txdb, bool fCheckInputs, bool* pfMissingInputs)
{
return mempool.accept(txdb, *this, fCheckInputs, pfMissingInputs);
}
bool CTxMemPool::addUnchecked(CTransaction &tx)
{
printf("addUnchecked(): size %lu\n", mapTx.size());
// Add to memory pool without checking anything. Don't call this directly,
// call CTxMemPool::accept to properly check the transaction first.
{
LOCK(cs);
uint256 hash = tx.GetHash();
mapTx[hash] = tx;
for (unsigned int i = 0; i < tx.vin.size(); i++)
mapNextTx[tx.vin[i].prevout] = CInPoint(&mapTx[hash], i);
nTransactionsUpdated++;
}
return true;
}
bool CTxMemPool::remove(CTransaction &tx)
{
// Remove transaction from memory pool
{
LOCK(cs);
uint256 hash = tx.GetHash();
if (mapTx.count(hash))
{
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapNextTx.erase(txin.prevout);
mapTx.erase(hash);
nTransactionsUpdated++;
}
}
return true;
}
void CTxMemPool::queryHashes(std::vector<uint256>& vtxid)
{
vtxid.clear();
LOCK(cs);
vtxid.reserve(mapTx.size());
for (map<uint256, CTransaction>::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi)
vtxid.push_back((*mi).first);
}
int CMerkleTx::GetDepthInMainChain(CBlockIndex* &pindexRet) const
{
if (hashBlock == 0 || nIndex == -1)
return 0;
// Find the block it claims to be in
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
if (!pindex || !pindex->IsInMainChain())
return 0;
// Make sure the merkle branch connects to this block
if (!fMerkleVerified)
{
if (CBlock::CheckMerkleBranch(GetHash(), vMerkleBranch, nIndex) != pindex->hashMerkleRoot)
return 0;
fMerkleVerified = true;
}
pindexRet = pindex;
return pindexBest->nHeight - pindex->nHeight + 1;
}
int CMerkleTx::GetBlocksToMaturity() const
{
if (!(IsCoinBase() || IsCoinStake()))
return 0;
int depth = GetDepthInMainChain();
if (depth == 0) // Not in the blockchain
return COINBASE_MATURITY;
return max(0, COINBASE_MATURITY - (depth - 1));
}
bool CMerkleTx::AcceptToMemoryPool(CTxDB& txdb, bool fCheckInputs)
{
if (fClient)
{
if (!IsInMainChain() && !ClientConnectInputs())
return false;
return CTransaction::AcceptToMemoryPool(txdb, false);
}
else
{
return CTransaction::AcceptToMemoryPool(txdb, fCheckInputs);
}
}
bool CMerkleTx::AcceptToMemoryPool()
{
CTxDB txdb("r");
return AcceptToMemoryPool(txdb);
}
bool CWalletTx::AcceptWalletTransaction(CTxDB& txdb, bool fCheckInputs)
{
{
LOCK(mempool.cs);
// Add previous supporting transactions first
BOOST_FOREACH(CMerkleTx& tx, vtxPrev)
{
if (!(tx.IsCoinBase() || tx.IsCoinStake()))
{
uint256 hash = tx.GetHash();
if (!mempool.exists(hash) && !txdb.ContainsTx(hash))
tx.AcceptToMemoryPool(txdb, fCheckInputs);
}
}
return AcceptToMemoryPool(txdb, fCheckInputs);
}
return false;
}
bool CWalletTx::AcceptWalletTransaction()
{
CTxDB txdb("r");
return AcceptWalletTransaction(txdb);
}
int CTxIndex::GetDepthInMainChain() const
{
// Read block header
CBlock block;
if (!block.ReadFromDisk(pos.nFile, pos.nBlockPos, false))
return 0;
// Find the block in the index
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(block.GetHash());
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
if (!pindex || !pindex->IsInMainChain())
return 0;
return 1 + nBestHeight - pindex->nHeight;
}
// Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock
bool GetTransaction(const uint256 &hash, CTransaction &tx, uint256 &hashBlock)
{
{
LOCK(cs_main);
{
LOCK(mempool.cs);
if (mempool.exists(hash))
{
tx = mempool.lookup(hash);
return true;
}
}
CTxDB txdb("r");
CTxIndex txindex;
if (tx.ReadFromDisk(txdb, hash, txindex))
{
CBlock block;
if (block.ReadFromDisk(txindex.pos.nFile, txindex.pos.nBlockPos, false))
hashBlock = block.GetHash();
return true;
}
// look for transaction in disconnected blocks to find orphaned CoinBase and CoinStake transactions
BOOST_FOREACH(PAIRTYPE(const uint256, CBlockIndex*)& item, mapBlockIndex)
{
CBlockIndex* pindex = item.second;
if (pindex == pindexBest || pindex->pnext != 0)
continue;
CBlock block;
if (!block.ReadFromDisk(pindex))
continue;
BOOST_FOREACH(const CTransaction& txOrphan, block.vtx)
{
if (txOrphan.GetHash() == hash)
{
tx = txOrphan;
return true;
}
}
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////
//
// CBlock and CBlockIndex
//
bool CBlock::ReadFromDisk(const CBlockIndex* pindex, bool fReadTransactions)
{
if (!fReadTransactions)
{
*this = pindex->GetBlockHeader();
return true;
}
if (!ReadFromDisk(pindex->nFile, pindex->nBlockPos, fReadTransactions))
return false;
if (GetHash() != pindex->GetBlockHash())
return error("CBlock::ReadFromDisk() : GetHash() doesn't match index");
return true;
}
uint256 static GetOrphanRoot(const CBlock* pblock)
{
// Work back to the first block in the orphan chain
while (mapOrphanBlocks.count(pblock->hashPrevBlock))
pblock = mapOrphanBlocks[pblock->hashPrevBlock];
return pblock->GetHash();
}
// ppcoin: find block wanted by given orphan block
uint256 WantedByOrphan(const CBlock* pblockOrphan)
{
// Work back to the first block in the orphan chain
while (mapOrphanBlocks.count(pblockOrphan->hashPrevBlock))
pblockOrphan = mapOrphanBlocks[pblockOrphan->hashPrevBlock];
return pblockOrphan->hashPrevBlock;
}
//
// minimum amount of work that could possibly be required nTime after
// minimum work required was nBase
//
unsigned int ComputeMinWork(unsigned int nBase, int64 nTime)
{
CBigNum bnResult;
bnResult.SetCompact(nBase);
bnResult *= 2;
while (nTime > 0 && bnResult < POW_MAX_TARGET)
{
// Maximum 200% adjustment per day...
bnResult *= 2;
nTime -= 24 * 60 * 60;
}
if (bnResult > POW_MAX_TARGET)
bnResult = POW_MAX_TARGET;
return bnResult.GetCompact();
}
// ppcoin: find last block index up to pindex
const CBlockIndex* GetLastBlockIndex(const CBlockIndex* pindex, bool fProofOfStake)
{
while (pindex && pindex->pprev && (pindex->IsProofOfStake() != fProofOfStake))
pindex = pindex->pprev;
return pindex;
}
bool CheckProofOfWork(uint256 hash, unsigned int nBits, bool triggerErrors)
{
CBigNum bnTarget;
bnTarget.SetCompact(nBits);
// Check range
if (bnTarget <= 0 || bnTarget > POW_MAX_TARGET)
return triggerErrors ? error("CheckProofOfWork() : nBits below minimum work") : false;
// Check proof of work matches claimed amount
if (hash > bnTarget.getuint256())
return triggerErrors ? error("CheckProofOfWork() : hash doesn't match nBits") : false;
return true;
}
// Return maximum amount of blocks that other nodes claim to have
int GetNumBlocksOfPeers()
{
return std::max(cPeerBlockCounts.median(), Checkpoints::GetTotalBlocksEstimate());
}
bool IsInitialBlockDownload()
{
if (pindexBest == NULL || nBestHeight < Checkpoints::GetTotalBlocksEstimate())
return true;
static int64 nLastUpdate;
static CBlockIndex* pindexLastBest;
if (pindexBest != pindexLastBest)
{
pindexLastBest = pindexBest;
nLastUpdate = GetTime();
}
return (GetTime() - nLastUpdate < 10 &&
pindexBest->GetBlockTime() < GetTime() - 24 * 60 * 60);
}
void static InvalidChainFound(CBlockIndex* pindexNew)
{
if (pindexNew->bnChainTrust > bnBestInvalidTrust)
{
bnBestInvalidTrust = pindexNew->bnChainTrust;
CTxDB().WriteBestInvalidTrust(bnBestInvalidTrust);
MainFrameRepaint();
}
printf("InvalidChainFound: invalid block=%s height=%d trust=%s\n", pindexNew->GetBlockHash().ToString().substr(0,20).c_str(), pindexNew->nHeight, CBigNum(pindexNew->bnChainTrust).ToString().c_str());
printf("InvalidChainFound: current best=%s height=%d trust=%s\n", hashBestChain.ToString().substr(0,20).c_str(), nBestHeight, CBigNum(bnBestChainTrust).ToString().c_str());
// ppcoin: should not enter safe mode for longer invalid chain
}
void CBlock::UpdateTime(const CBlockIndex* pindexPrev)
{
nTime = max(GetBlockTime(), GetAdjustedTime());
}
bool CTransaction::DisconnectInputs(CTxDB& txdb)
{
// Relinquish previous transactions' spent pointers
if (!IsCoinBase())
{
BOOST_FOREACH(const CTxIn& txin, vin)
{
COutPoint prevout = txin.prevout;
// Get prev txindex from disk
CTxIndex txindex;
if (!txdb.ReadTxIndex(prevout.hash, txindex))
return error("DisconnectInputs() : ReadTxIndex failed");
if (prevout.n >= txindex.vSpent.size())
return error("DisconnectInputs() : prevout.n out of range");
// Mark outpoint as not spent
txindex.vSpent[prevout.n].SetNull();
// Write back
if (!txdb.UpdateTxIndex(prevout.hash, txindex))
return error("DisconnectInputs() : UpdateTxIndex failed");
}
}
// Remove transaction from index
// This can fail if a duplicate of this transaction was in a chain that got
// reorganized away. This is only possible if this transaction was completely
// spent, so erasing it would be a no-op anway.
txdb.EraseTxIndex(*this);
return true;
}
bool CTransaction::FetchInputs(CTxDB& txdb, const map<uint256, CTxIndex>& mapTestPool,
bool fBlock, bool fMiner, MapPrevTx& inputsRet, bool& fInvalid)
{
// FetchInputs can return false either because we just haven't seen some inputs
// (in which case the transaction should be stored as an orphan)
// or because the transaction is malformed (in which case the transaction should
// be dropped). If tx is definitely invalid, fInvalid will be set to true.
fInvalid = false;
if (IsCoinBase())
return true; // Coinbase transactions have no inputs to fetch.
for (unsigned int i = 0; i < vin.size(); i++)
{
COutPoint prevout = vin[i].prevout;
if (inputsRet.count(prevout.hash))
continue; // Got it already
// Read txindex
CTxIndex& txindex = inputsRet[prevout.hash].first;
bool fFound = true;
if ((fBlock || fMiner) && mapTestPool.count(prevout.hash))
{
// Get txindex from current proposed changes
txindex = mapTestPool.find(prevout.hash)->second;
}
else
{
// Read txindex from txdb
fFound = txdb.ReadTxIndex(prevout.hash, txindex);
}
if (!fFound && (fBlock || fMiner))
return fMiner ? false : error("FetchInputs() : %s prev tx %s index entry not found", GetHash().ToString().substr(0,10).c_str(), prevout.hash.ToString().substr(0,10).c_str());
// Read txPrev
CTransaction& txPrev = inputsRet[prevout.hash].second;
if (!fFound || txindex.pos == CDiskTxPos(1,1,1))
{
// Get prev tx from single transactions in memory
{
LOCK(mempool.cs);
if (!mempool.exists(prevout.hash))
return error("FetchInputs() : %s mempool Tx prev not found %s", GetHash().ToString().substr(0,10).c_str(), prevout.hash.ToString().substr(0,10).c_str());
txPrev = mempool.lookup(prevout.hash);
}
if (!fFound)
txindex.vSpent.resize(txPrev.vout.size());
}
else
{
// Get prev tx from disk
if (!txPrev.ReadFromDisk(txindex.pos))
return error("FetchInputs() : %s ReadFromDisk prev tx %s failed", GetHash().ToString().substr(0,10).c_str(), prevout.hash.ToString().substr(0,10).c_str());
}
}
// Make sure all prevout.n's are valid:
for (unsigned int i = 0; i < vin.size(); i++)
{
const COutPoint prevout = vin[i].prevout;
assert(inputsRet.count(prevout.hash) != 0);
const CTxIndex& txindex = inputsRet[prevout.hash].first;
const CTransaction& txPrev = inputsRet[prevout.hash].second;
if (prevout.n >= txPrev.vout.size() || prevout.n >= txindex.vSpent.size())
{
// Revisit this if/when transaction replacement is implemented and allows
// adding inputs:
fInvalid = true;
return DoS(100, error("FetchInputs() : %s prevout.n out of range %d %d %d prev tx %s\n%s", GetHash().ToString().substr(0,10).c_str(), prevout.n, txPrev.vout.size(), txindex.vSpent.size(), prevout.hash.ToString().substr(0,10).c_str(), txPrev.ToString().c_str()));
}
}
return true;
}
const CTxOut& CTransaction::GetOutputFor(const CTxIn& input, const MapPrevTx& inputs) const
{
MapPrevTx::const_iterator mi = inputs.find(input.prevout.hash);
if (mi == inputs.end())
throw std::runtime_error("CTransaction::GetOutputFor() : prevout.hash not found");
const CTransaction& txPrev = (mi->second).second;
if (input.prevout.n >= txPrev.vout.size())
throw std::runtime_error("CTransaction::GetOutputFor() : prevout.n out of range");
return txPrev.vout[input.prevout.n];
}
int64 CTransaction::GetValueIn(const MapPrevTx& inputs) const
{
if (IsCoinBase())
return 0;
int64 nResult = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
nResult += GetOutputFor(vin[i], inputs).nValue;
}
return nResult;
}
unsigned int CTransaction::GetP2SHSigOpCount(const MapPrevTx& inputs) const
{
if (IsCoinBase())
return 0;
unsigned int nSigOps = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
const CTxOut& prevout = GetOutputFor(vin[i], inputs);
if (prevout.scriptPubKey.IsPayToScriptHash())
nSigOps += prevout.scriptPubKey.GetSigOpCount(vin[i].scriptSig);
}
return nSigOps;
}
bool CTransaction::ConnectInputs(CTxDB& txdb, MapPrevTx inputs,
map<uint256, CTxIndex>& mapTestPool, const CDiskTxPos& posThisTx,
const CBlockIndex* pindexBlock, bool fBlock, bool fMiner, bool fStrictPayToScriptHash)
{
// Take over previous transactions' spent pointers
// fBlock is true when this is called from AcceptBlock when a new best-block is added to the blockchain
// fMiner is true when called from the internal bitcoin miner
// ... both are false when called from CTransaction::AcceptToMemoryPool
if (!IsCoinBase())
{
int64 nValueIn = 0;
int64 nFees = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
COutPoint prevout = vin[i].prevout;
assert(inputs.count(prevout.hash) > 0);
CTxIndex& txindex = inputs[prevout.hash].first;
CTransaction& txPrev = inputs[prevout.hash].second;
if (prevout.n >= txPrev.vout.size() || prevout.n >= txindex.vSpent.size())
return DoS(100, error("ConnectInputs() : %s prevout.n out of range %d %d %d prev tx %s\n%s", GetHash().ToString().substr(0,10).c_str(), prevout.n, txPrev.vout.size(), txindex.vSpent.size(), prevout.hash.ToString().substr(0,10).c_str(), txPrev.ToString().c_str()));
// If prev is coinbase/coinstake, check that it's matured
if (txPrev.IsCoinBase() || txPrev.IsCoinStake())
for (const CBlockIndex* pindex = pindexBlock; pindex && pindexBlock->nHeight - pindex->nHeight < COINBASE_MATURITY; pindex = pindex->pprev)
if (pindex->nBlockPos == txindex.pos.nBlockPos && pindex->nFile == txindex.pos.nFile)
return error("ConnectInputs() : tried to spend coinbase/coinstake at depth %d", pindexBlock->nHeight - pindex->nHeight);
// ppcoin: check transaction timestamp
if (txPrev.nTime > nTime)
return DoS(100, error("ConnectInputs() : transaction timestamp earlier than input transaction"));
// Check for negative or overflow input values
nValueIn += txPrev.vout[prevout.n].nValue;
if (!IsValidAmount(txPrev.vout[prevout.n].nValue) || !IsValidAmount(nValueIn))
return DoS(100, error("ConnectInputs() : txin values out of range"));
}
// The first loop above does all the inexpensive checks.
// Only if ALL inputs pass do we perform expensive ECDSA signature checks.
// Helps prevent CPU exhaustion attacks.
for (unsigned int i = 0; i < vin.size(); i++)
{
COutPoint prevout = vin[i].prevout;
assert(inputs.count(prevout.hash) > 0);
CTxIndex& txindex = inputs[prevout.hash].first;
CTransaction& txPrev = inputs[prevout.hash].second;
// Check for conflicts (double-spend)
// This doesn't trigger the DoS code on purpose; if it did, it would make it easier
// for an attacker to attempt to split the network.
if (!txindex.vSpent[prevout.n].IsNull())
return fMiner ? false : error("ConnectInputs() : %s prev tx already used at %s", GetHash().ToString().substr(0,10).c_str(), txindex.vSpent[prevout.n].ToString().c_str());
// Skip ECDSA signature verification when connecting blocks (fBlock=true)
// before the last blockchain checkpoint. This is safe because block merkle hashes are
// still computed and checked, and any change will be caught at the next checkpoint.
if (!(fBlock && (nBestHeight < Checkpoints::GetTotalBlocksEstimate())))
{
// Verify signature
if (!VerifySignature(txPrev, *this, i, fStrictPayToScriptHash, 0))
{
// only during transition phase for P2SH: do not invoke anti-DoS code for
// potentially old clients relaying bad P2SH transactions
if (fStrictPayToScriptHash && VerifySignature(txPrev, *this, i, false, 0))
return error("ConnectInputs() : %s P2SH VerifySignature failed", GetHash().ToString().substr(0,10).c_str());
return DoS(100,error("ConnectInputs() : %s VerifySignature failed", GetHash().ToString().substr(0,10).c_str()));
}
}
// Mark outpoints as spent
txindex.vSpent[prevout.n] = posThisTx;
// Write back
if (fBlock || fMiner)
{
mapTestPool[prevout.hash] = txindex;
}
}
if (IsCoinStake())
{
// ppcoin: coin stake tx earns reward instead of paying fee
uint64 nCoinAge;
if (!GetCoinAge(txdb, nCoinAge))
return error("ConnectInputs() : %s unable to get coin age for coinstake", GetHash().ToString().substr(0,10).c_str());
int64 nStakeReward = GetValueOut() - nValueIn;
if (nStakeReward > GetProofOfStakeReward(nCoinAge, pindexBlock->pprev->nHeight) - GetMinFee() + MIN_TX_FEES)
return DoS(100, error("ConnectInputs() : %s stake reward exceeded", GetHash().ToString().substr(0,10).c_str()));
}
else
{
if (nValueIn < GetValueOut())
return DoS(100, error("ConnectInputs() : %s value in < value out", GetHash().ToString().substr(0,10).c_str()));
// Tally transaction fees
int64 nTxFee = nValueIn - GetValueOut();
if (nTxFee < 0)
return DoS(100, error("ConnectInputs() : %s nTxFee < 0", GetHash().ToString().substr(0,10).c_str()));
// ppcoin: enforce transaction fees for every block
if (nTxFee < GetMinFee())
return fBlock? DoS(100, error("ConnectInputs() : %s not paying required fee=%s, paid=%s", GetHash().ToString().substr(0,10).c_str(), FormatMoney(GetMinFee()).c_str(), FormatMoney(nTxFee).c_str())) : false;
nFees += nTxFee;
if (!IsValidAmount(nFees))
return DoS(100, error("ConnectInputs() : nFees out of range"));
}
}
return true;
}
bool CTransaction::ClientConnectInputs()
{
if (IsCoinBase())
return false;
// Take over previous transactions' spent pointers
{
LOCK(mempool.cs);
int64 nValueIn = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
// Get prev tx from single transactions in memory
COutPoint prevout = vin[i].prevout;
if (!mempool.exists(prevout.hash))
return false;
CTransaction& txPrev = mempool.lookup(prevout.hash);
if (prevout.n >= txPrev.vout.size())
return false;
// Verify signature
if (!VerifySignature(txPrev, *this, i, true, 0))
return error("ConnectInputs() : VerifySignature failed");
///// this is redundant with the mempool.mapNextTx stuff,
///// not sure which I want to get rid of
///// this has to go away now that posNext is gone
// // Check for conflicts
// if (!txPrev.vout[prevout.n].posNext.IsNull())
// return error("ConnectInputs() : prev tx already used");
//
// // Flag outpoints as used
// txPrev.vout[prevout.n].posNext = posThisTx;
nValueIn += txPrev.vout[prevout.n].nValue;
if (!IsValidAmount(txPrev.vout[prevout.n].nValue) || !IsValidAmount(nValueIn)) {
return error("ClientConnectInputs() : txin values out of range");
}
}
if (GetValueOut() > nValueIn) {
return false;
}
}
return true;
}
bool CBlock::DisconnectBlock(CTxDB& txdb, CBlockIndex* pindex)
{
// Disconnect in reverse order
for (int i = vtx.size()-1; i >= 0; i--)
if (!vtx[i].DisconnectInputs(txdb))
return false;
// Update block index on disk without changing it in memory.
// The memory index structure will be changed after the db commits.
if (pindex->pprev)
{
CDiskBlockIndex blockindexPrev(pindex->pprev);
blockindexPrev.hashNext = 0;
if (!txdb.WriteBlockIndex(blockindexPrev))
return error("DisconnectBlock() : WriteBlockIndex failed");
}
// ppcoin: clean up wallet after disconnecting coinstake
BOOST_FOREACH(CTransaction& tx, vtx)
SyncWithWallets(tx, this, false, false);
return true;
}
bool CBlock::ConnectBlock(CTxDB& txdb, CBlockIndex* pindex)
{
// Check it again in case a previous version let a bad block in
if (!CheckBlock())
return false;
// Check coinbase reward
if (IsProofOfWork() && vtx[0].GetValueOut() > (IsProofOfWork() ? (GetProofOfWorkReward(pindex->pprev ? pindex->pprev->nHeight : -1) - vtx[0].GetMinFee() + MIN_TX_FEES) : 0))
return DoS(50, error("CheckBlock() : coinbase reward exceeded %s > %s", FormatMoney(vtx[0].GetValueOut()).c_str(), FormatMoney(IsProofOfWork() ? GetProofOfWorkReward(pindex->pprev ? pindex->pprev->nHeight : -1) : 0).c_str()));
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
// If such overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance -- even after
// being sent to another address.
// See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
// already refuses previously-known transaction id's entirely.
BOOST_FOREACH(CTransaction& tx, vtx)
{
CTxIndex txindexOld;
if (txdb.ReadTxIndex(tx.GetHash(), txindexOld))
{
BOOST_FOREACH(CDiskTxPos &pos, txindexOld.vSpent)
if (pos.IsNull())
return false;
}
}
// BIP16 didn't become active until Apr 1 2012
int64 nBIP16SwitchTime = 1333238400;
bool fStrictPayToScriptHash = (pindex->nTime >= nBIP16SwitchTime);
//// issue here: it doesn't know the version
unsigned int nTxPos = pindex->nBlockPos + ::GetSerializeSize(CBlock(), SER_DISK, CLIENT_VERSION) - (2 * GetSizeOfCompactSize(0)) + GetSizeOfCompactSize(vtx.size());
map<uint256, CTxIndex> mapQueuedChanges;
int64 nFees = 0;
int64 nValueIn = 0;
int64 nValueOut = 0;
unsigned int nSigOps = 0;
BOOST_FOREACH(CTransaction& tx, vtx)
{
nSigOps += tx.GetLegacySigOpCount();
if (nSigOps > MAX_BLOCK_SIGOPS)
return DoS(100, error("ConnectBlock() : too many sigops"));
CDiskTxPos posThisTx(pindex->nFile, pindex->nBlockPos, nTxPos);
nTxPos += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
MapPrevTx mapInputs;
if (tx.IsCoinBase())
nValueOut += tx.GetValueOut();
else
{
bool fInvalid;
if (!tx.FetchInputs(txdb, mapQueuedChanges, true, false, mapInputs, fInvalid))
return false;
if (fStrictPayToScriptHash)
{
// Add in sigops done by pay-to-script-hash inputs;
// this is to prevent a "rogue miner" from creating
// an incredibly-expensive-to-validate block.
nSigOps += tx.GetP2SHSigOpCount(mapInputs);
if (nSigOps > MAX_BLOCK_SIGOPS)
return DoS(100, error("ConnectBlock() : too many sigops"));
}
int64 nTxValueIn = tx.GetValueIn(mapInputs);
int64 nTxValueOut = tx.GetValueOut();
nValueIn += nTxValueIn;
nValueOut += nTxValueOut;
if (!tx.IsCoinStake())
nFees += nTxValueIn - nTxValueOut;
if (!tx.ConnectInputs(txdb, mapInputs, mapQueuedChanges, posThisTx, pindex, true, false, fStrictPayToScriptHash))
return false;
}
mapQueuedChanges[tx.GetHash()] = CTxIndex(posThisTx, tx.vout.size());
}
// ppcoin: track money supply and mint amount info
pindex->nMint = nValueOut - nValueIn + nFees;
pindex->nMoneySupply = (pindex->pprev? pindex->pprev->nMoneySupply : 0) + nValueOut - nValueIn;
if (!txdb.WriteBlockIndex(CDiskBlockIndex(pindex)))
return error("Connect() : WriteBlockIndex for pindex failed");
// Write queued txindex changes
for (map<uint256, CTxIndex>::iterator mi = mapQueuedChanges.begin(); mi != mapQueuedChanges.end(); ++mi)
{
if (!txdb.UpdateTxIndex((*mi).first, (*mi).second))
return error("ConnectBlock() : UpdateTxIndex failed");
}
// ppcoin: fees are not collected by miners as in bitcoin
// ppcoin: fees are destroyed to compensate the entire network
if (fDebug && GetBoolArg("-printcreation"))
printf("ConnectBlock() : destroy=%s nFees=%"PRI64d"\n", FormatMoney(nFees).c_str(), nFees);
// Update block index on disk without changing it in memory.
// The memory index structure will be changed after the db commits.
if (pindex->pprev)
{
CDiskBlockIndex blockindexPrev(pindex->pprev);
blockindexPrev.hashNext = pindex->GetBlockHash();
if (!txdb.WriteBlockIndex(blockindexPrev))
return error("ConnectBlock() : WriteBlockIndex for blockindexPrev failed");
}
// Watch for transactions paying to me
BOOST_FOREACH(CTransaction& tx, vtx)
SyncWithWallets(tx, this, true);
return true;
}
bool Reorganize(CTxDB& txdb, CBlockIndex* pindexNew)
{
printf("REORGANIZE\n");
// Find the fork
CBlockIndex* pfork = pindexBest;
CBlockIndex* plonger = pindexNew;
while (pfork != plonger)
{
while (plonger->nHeight > pfork->nHeight)
if (!(plonger = plonger->pprev))
return error("Reorganize() : plonger->pprev is null");
if (pfork == plonger)
break;
if (!(pfork = pfork->pprev))
return error("Reorganize() : pfork->pprev is null");
}
// List of what to disconnect
vector<CBlockIndex*> vDisconnect;
for (CBlockIndex* pindex = pindexBest; pindex != pfork; pindex = pindex->pprev)
vDisconnect.push_back(pindex);
// List of what to connect
vector<CBlockIndex*> vConnect;
for (CBlockIndex* pindex = pindexNew; pindex != pfork; pindex = pindex->pprev)
vConnect.push_back(pindex);
reverse(vConnect.begin(), vConnect.end());
printf("REORGANIZE: Disconnect %i blocks; %s..%s\n", vDisconnect.size(), pfork->GetBlockHash().ToString().substr(0,20).c_str(), pindexBest->GetBlockHash().ToString().substr(0,20).c_str());
printf("REORGANIZE: Connect %i blocks; %s..%s\n", vConnect.size(), pfork->GetBlockHash().ToString().substr(0,20).c_str(), pindexNew->GetBlockHash().ToString().substr(0,20).c_str());
// Disconnect shorter branch
vector<CTransaction> vResurrect;
BOOST_FOREACH(CBlockIndex* pindex, vDisconnect)
{
CBlock block;
if (!block.ReadFromDisk(pindex))
return error("Reorganize() : ReadFromDisk for disconnect failed");
if (!block.DisconnectBlock(txdb, pindex))
return error("Reorganize() : DisconnectBlock %s failed", pindex->GetBlockHash().ToString().substr(0,20).c_str());
// Queue memory transactions to resurrect
BOOST_FOREACH(const CTransaction& tx, block.vtx)
if (!(tx.IsCoinBase() || tx.IsCoinStake()))
vResurrect.push_back(tx);
}
// Connect longer branch
vector<CTransaction> vDelete;
for (unsigned int i = 0; i < vConnect.size(); i++)
{
CBlockIndex* pindex = vConnect[i];
CBlock block;
if (!block.ReadFromDisk(pindex))
return error("Reorganize() : ReadFromDisk for connect failed");
if (!block.ConnectBlock(txdb, pindex))
{
// Invalid block
txdb.TxnAbort();
return error("Reorganize() : ConnectBlock %s failed", pindex->GetBlockHash().ToString().substr(0,20).c_str());
}
// Queue memory transactions to delete
BOOST_FOREACH(const CTransaction& tx, block.vtx)
vDelete.push_back(tx);
}
if (!txdb.WriteHashBestChain(pindexNew->GetBlockHash()))
return error("Reorganize() : WriteHashBestChain failed");
// Make sure it's successfully written to disk before changing memory structure
if (!txdb.TxnCommit())
return error("Reorganize() : TxnCommit failed");
// Disconnect shorter branch
BOOST_FOREACH(CBlockIndex* pindex, vDisconnect)
if (pindex->pprev)
pindex->pprev->pnext = NULL;
// Connect longer branch
BOOST_FOREACH(CBlockIndex* pindex, vConnect)
if (pindex->pprev)
pindex->pprev->pnext = pindex;
// Resurrect memory transactions that were in the disconnected branch
BOOST_FOREACH(CTransaction& tx, vResurrect)
tx.AcceptToMemoryPool(txdb, false);
// Delete redundant memory transactions that are in the connected branch
BOOST_FOREACH(CTransaction& tx, vDelete)
mempool.remove(tx);
printf("REORGANIZE: done\n");
return true;
}
// Called from inside SetBestChain: attaches a block to the new best chain being built
bool CBlock::SetBestChainInner(CTxDB& txdb, CBlockIndex *pindexNew)
{
uint256 hash = GetHash();
// Adding to current best branch
if (!ConnectBlock(txdb, pindexNew) || !txdb.WriteHashBestChain(hash))
{
txdb.TxnAbort();
InvalidChainFound(pindexNew);
return false;
}
if (!txdb.TxnCommit())
return error("SetBestChain() : TxnCommit failed");
// Add to current best branch
pindexNew->pprev->pnext = pindexNew;
// Delete redundant memory transactions
BOOST_FOREACH(CTransaction& tx, vtx)
mempool.remove(tx);
return true;
}
bool CBlock::SetBestChain(CTxDB& txdb, CBlockIndex* pindexNew)
{
uint256 hash = GetHash();
if (!txdb.TxnBegin())
return error("SetBestChain() : TxnBegin failed");
if (pindexGenesisBlock == NULL && hash == GENESIS_HASH)
{
txdb.WriteHashBestChain(hash);
if (!txdb.TxnCommit())
return error("SetBestChain() : TxnCommit failed");
pindexGenesisBlock = pindexNew;
}
else if (hashPrevBlock == hashBestChain)
{
if (!SetBestChainInner(txdb, pindexNew))
return error("SetBestChain() : SetBestChainInner failed");
}
else
{
// the first block in the new chain that will cause it to become the new best chain
CBlockIndex *pindexIntermediate = pindexNew;
// list of blocks that need to be connected afterwards
std::vector<CBlockIndex*> vpindexSecondary;
// Reorganize is costly in terms of db load, as it works in a single db transaction.
// Try to limit how much needs to be done inside
while (pindexIntermediate->pprev && pindexIntermediate->pprev->bnChainTrust > pindexBest->bnChainTrust)
{
vpindexSecondary.push_back(pindexIntermediate);
pindexIntermediate = pindexIntermediate->pprev;
}
if (!vpindexSecondary.empty())
printf("Postponing %i reconnects\n", vpindexSecondary.size());
// Switch to new best branch
if (!Reorganize(txdb, pindexIntermediate))
{
txdb.TxnAbort();
InvalidChainFound(pindexNew);
return error("SetBestChain() : Reorganize failed");
}
// Connect futher blocks
BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vpindexSecondary)
{
CBlock block;
if (!block.ReadFromDisk(pindex))
{
printf("SetBestChain() : ReadFromDisk failed\n");
break;
}
if (!txdb.TxnBegin()) {
printf("SetBestChain() : TxnBegin 2 failed\n");
break;
}
// errors now are not fatal, we still did a reorganisation to a new chain in a valid way
if (!block.SetBestChainInner(txdb, pindex))
break;
}
}
// Update best block in wallet (so we can detect restored wallets)
bool fIsInitialDownload = IsInitialBlockDownload();
if (!fIsInitialDownload)
{
const CBlockLocator locator(pindexNew);
::SetBestChain(locator);
}
// New best block
hashBestChain = hash;
pindexBest = pindexNew;
nBestHeight = pindexBest->nHeight;
bnBestChainTrust = pindexNew->bnChainTrust;
nTimeBestReceived = GetTime();
nTransactionsUpdated++;
printf("SetBestChain: new best=%s height=%d trust=%s moneysupply=%s\n", hashBestChain.ToString().substr(0,20).c_str(), nBestHeight, bnBestChainTrust.ToString().c_str(), FormatMoney(pindexBest->nMoneySupply).c_str());
std::string strCmd = GetArg("-blocknotify", "");
if (!fIsInitialDownload && !strCmd.empty())
{
boost::replace_all(strCmd, "%s", hashBestChain.GetHex());
boost::thread t(runCommand, strCmd); // thread runs free
}
return true;
}
// ppcoin: total coin age spent in transaction, in the unit of coin-days.
// Only those coins meeting minimum age requirement counts. As those
// transactions not in main chain are not currently indexed so we
// might not find out about their coin age. Older transactions are
// guaranteed to be in main chain by sync-checkpoint. This rule is
// introduced to help nodes establish a consistent view of the coin
// age (trust score) of competing branches.
bool CTransaction::GetCoinAge(CTxDB& txdb, uint64& nCoinAge) const
{
CBigNum bnCentSecond = 0; // coin age in the unit of cent-seconds
nCoinAge = 0;
if (IsCoinBase())
return true;
BOOST_FOREACH(const CTxIn& txin, vin)
{
// First try finding the previous transaction in database
CTransaction txPrev;
CTxIndex txindex;
if (!txPrev.ReadFromDisk(txdb, txin.prevout, txindex))
continue; // previous transaction not in main chain
if (nTime < txPrev.nTime)
return false; // Transaction timestamp violation
// Read block header
CBlock block;
if (!block.ReadFromDisk(txindex.pos.nFile, txindex.pos.nBlockPos, false))
return false; // unable to read block of previous transaction
if (block.GetBlockTime() + STAKE_MIN_AGE > nTime)
continue; // only count coins meeting min age requirement
int64 nValueIn = txPrev.vout[txin.prevout.n].nValue;
bnCentSecond += CBigNum(nValueIn) * (nTime-txPrev.nTime) / CENT;
if (fDebug && GetBoolArg("-printcoinage"))
{
printf("coin age nValueIn=%-12"PRI64d" nTimeDiff=%d bnCentSecond=%s\n", nValueIn, nTime - txPrev.nTime, bnCentSecond.ToString().c_str());
}
}
CBigNum bnCoinDay = bnCentSecond * CENT / COIN / (24 * 60 * 60);
if (fDebug && GetBoolArg("-printcoinage"))
printf("coin age bnCoinDay=%s\n", bnCoinDay.ToString().c_str());
nCoinAge = bnCoinDay.getuint64();
return true;
}
// ppcoin: total coin age spent in block, in the unit of coin-days.
bool CBlock::GetCoinAge(uint64& nCoinAge) const
{
nCoinAge = 0;
CTxDB txdb("r");
BOOST_FOREACH(const CTransaction& tx, vtx)
{
uint64 nTxCoinAge;
if (tx.GetCoinAge(txdb, nTxCoinAge))
nCoinAge += nTxCoinAge;
else
return false;
}
if (nCoinAge == 0) // block coin age minimum 1 coin-day
nCoinAge = 1;
if (fDebug && GetBoolArg("-printcoinage"))
printf("block coin age total nCoinDays=%"PRI64d"\n", nCoinAge);
return true;
}
bool CBlock::AddToBlockIndex(unsigned int nFile, unsigned int nBlockPos)
{
// Check for duplicate
uint256 hash = GetHash();
if (mapBlockIndex.count(hash))
return error("AddToBlockIndex() : %s already exists", hash.ToString().substr(0,20).c_str());
// Construct new block index object
CBlockIndex* pindexNew = new CBlockIndex(nFile, nBlockPos, *this);
if (!pindexNew)
return error("AddToBlockIndex() : new CBlockIndex failed");
pindexNew->phashBlock = &hash;
map<uint256, CBlockIndex*>::iterator miPrev = mapBlockIndex.find(hashPrevBlock);
if (miPrev != mapBlockIndex.end())
{
pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
}
// ppcoin: compute chain trust score
pindexNew->bnChainTrust = (pindexNew->pprev ? pindexNew->pprev->bnChainTrust : 0) + pindexNew->GetBlockTrust();
// ppcoin: compute stake entropy bit for stake modifier
if (!pindexNew->SetStakeEntropyBit(GetStakeEntropyBit()))
return error("AddToBlockIndex() : SetStakeEntropyBit() failed");
// ppcoin: record proof-of-stake hash value
if (pindexNew->IsProofOfStake())
{
if (!mapProofOfStake.count(hash))
return error("AddToBlockIndex() : hashProofOfStake not found in map");
pindexNew->hashProofOfStake = mapProofOfStake[hash];
}
// ppcoin: compute stake modifier
uint64 nStakeModifier = 0;
bool fGeneratedStakeModifier = false;
if (!ComputeNextStakeModifier(pindexNew, nStakeModifier, fGeneratedStakeModifier))
return error("AddToBlockIndex() : ComputeNextStakeModifier() failed");
pindexNew->SetStakeModifier(nStakeModifier, fGeneratedStakeModifier);
pindexNew->nStakeModifierChecksum = GetStakeModifierChecksum(pindexNew);
if (!CheckStakeModifierCheckpoints(pindexNew->nHeight, pindexNew->nStakeModifierChecksum))
return error("AddToBlockIndex() : Rejected by stake modifier checkpoint height=%d, modifier=0x%016"PRI64x", checksum=0x%08x", pindexNew->nHeight, nStakeModifier, pindexNew->nStakeModifierChecksum);
// Add to mapBlockIndex
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
if (pindexNew->IsProofOfStake())
setStakeSeen.insert(make_pair(pindexNew->prevoutStake, pindexNew->nStakeTime));
pindexNew->phashBlock = &((*mi).first);
// Write to disk block index
CTxDB txdb;
if (!txdb.TxnBegin())
return false;
txdb.WriteBlockIndex(CDiskBlockIndex(pindexNew));
if (!txdb.TxnCommit())
return false;
// New best
if (pindexNew->bnChainTrust > bnBestChainTrust)
if (!SetBestChain(txdb, pindexNew))
return false;
txdb.Close();
if (pindexNew == pindexBest)
{
// Notify UI to display prev block's coinbase if it was ours
static uint256 hashPrevBestCoinBase;
UpdatedTransaction(hashPrevBestCoinBase);
hashPrevBestCoinBase = vtx[0].GetHash();
}
MainFrameRepaint();
return true;
}
bool CBlock::CheckBlock() const
{
// These are checks that are independent of context
// that can be verified before saving an orphan block.
// Size limits
if (vtx.empty() || vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return DoS(100, error("CheckBlock() : size limits failed"));
// Check proof of work matches claimed amount
if (IsProofOfWork() && !CheckProofOfWork(GetHash(), nBits))
return DoS(50, error("CheckBlock() : proof of work failed"));
// Check timestamp
if (GetBlockTime() > GetAdjustedTime() + MAX_CLOCK_DRIFT)
return error("CheckBlock() : block timestamp too far in the future");
// First transaction must be coinbase, the rest must not be
if (vtx.empty() || !vtx[0].IsCoinBase())
return DoS(100, error("CheckBlock() : first tx is not coinbase"));
for (unsigned int i = 1; i < vtx.size(); i++)
if (vtx[i].IsCoinBase())
return DoS(100, error("CheckBlock() : more than one coinbase"));
// ppcoin: only the second transaction can be the optional coinstake
for (size_t i = 2; i < vtx.size(); i++)
if (vtx[i].IsCoinStake())
return DoS(100, error("CheckBlock() : coinstake in wrong position"));
// ppcoin: coinbase output should be empty if proof-of-stake block
if (IsProofOfStake() && (vtx[0].vout.size() != 1 || !vtx[0].vout[0].IsEmpty()))
return error("CheckBlock() : coinbase output not empty for proof-of-stake block");
// Check coinbase timestamp
if (GetBlockTime() > (int64)vtx[0].nTime + MAX_CLOCK_DRIFT)
return DoS(50, error("CheckBlock() : coinbase timestamp is too early (block: %d, vtx[0]: %d)", GetBlockTime(), vtx[0].nTime));
// Check coinstake timestamp
if (IsProofOfStake() && !CheckCoinStakeTimestamp(GetBlockTime(), (int64)vtx[1].nTime))
return DoS(50, error("CheckBlock() : coinstake timestamp violation nTimeBlock=%u nTimeTx=%u", GetBlockTime(), vtx[1].nTime));
// Check coinbase reward
//
// Note: We're not doing the reward check here, because we need to know the block height.
// Check inside ConnectBlock instead.
// Check transactions
BOOST_FOREACH(const CTransaction& tx, vtx)
{
if (!tx.CheckTransaction())
return DoS(tx.nDoS, error("CheckBlock() : CheckTransaction failed"));
// ppcoin: check transaction timestamp
if (GetBlockTime() < (int64)tx.nTime)
return DoS(50, error("CheckBlock() : block timestamp earlier than transaction timestamp"));
}
// Check for duplicate txids. This is caught by ConnectInputs(),
// but catching it earlier avoids a potential DoS attack:
set<uint256> uniqueTx;
BOOST_FOREACH(const CTransaction& tx, vtx)
{
uniqueTx.insert(tx.GetHash());
}
if (uniqueTx.size() != vtx.size())
return DoS(100, error("CheckBlock() : duplicate transaction"));
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTransaction& tx, vtx)
{
nSigOps += tx.GetLegacySigOpCount();
}
if (nSigOps > MAX_BLOCK_SIGOPS)
return DoS(100, error("CheckBlock() : out-of-bounds SigOpCount"));
// Check merkleroot
if (hashMerkleRoot != BuildMerkleTree())
return DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"));
// ppcoin: check block signature
if (!CheckBlockSignature())
return DoS(100, error("CheckBlock() : bad block signature"));
return true;
}
bool CBlock::AcceptBlock()
{
// Check for duplicate
uint256 hash = GetHash();
if (mapBlockIndex.count(hash))
return error("AcceptBlock() : block already in mapBlockIndex");
// Get prev block index
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashPrevBlock);
if (mi == mapBlockIndex.end())
return DoS(10, error("AcceptBlock() : prev block not found"));
CBlockIndex* pindexPrev = (*mi).second;
int nHeight = pindexPrev->nHeight+1;
// Check proof-of-work or proof-of-stake
if (nBits != GetNextTargetRequired(pindexPrev, IsProofOfStake()))
return DoS(100, error("AcceptBlock() : incorrect proof-of-work/proof-of-stake"));
// Check timestamp against prev
if (GetBlockTime() <= pindexPrev->GetMedianTimePast() || GetBlockTime() + MAX_CLOCK_DRIFT < pindexPrev->GetBlockTime())
return error("AcceptBlock() : block's timestamp is too early");
// Check that all transactions are finalized
BOOST_FOREACH(const CTransaction& tx, vtx)
if (!tx.IsFinal(nHeight, GetBlockTime()))
return DoS(10, error("AcceptBlock() : contains a non-final transaction"));
// Check that the block chain matches the known block chain up to a hardened checkpoint
if (!Checkpoints::CheckHardened(nHeight, hash))
return DoS(100, error("AcceptBlock() : rejected by hardened checkpoint lockin at %d", nHeight));
// ppcoin: check that the block satisfies synchronized checkpoint
if (!Checkpoints::CheckSync(hash, pindexPrev))
return error("AcceptBlock() : rejected by synchronized checkpoint");
// Write block to history file
if (!CheckDiskSpace(::GetSerializeSize(*this, SER_DISK, CLIENT_VERSION)))
return error("AcceptBlock() : out of disk space");
unsigned int nFile = -1;
unsigned int nBlockPos = 0;
if (!WriteToDisk(nFile, nBlockPos))
return error("AcceptBlock() : WriteToDisk failed");
if (!AddToBlockIndex(nFile, nBlockPos))
return error("AcceptBlock() : AddToBlockIndex failed");
// Relay inventory, but don't relay old inventory during initial block download
int nBlockEstimate = Checkpoints::GetTotalBlocksEstimate();
if (hashBestChain == hash)
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
if (nBestHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
pnode->PushInventory(CInv(MSG_BLOCK, hash));
}
// ppcoin: check pending sync-checkpoint
Checkpoints::AcceptPendingSyncCheckpoint();
return true;
}
bool ProcessBlock(CNode* pfrom, CBlock* pblock)
{
// Check for duplicate
uint256 hash = pblock->GetHash();
if (mapBlockIndex.count(hash))
return error("ProcessBlock() : already have block %d %s", mapBlockIndex.at(hash)->nHeight, hash.ToString().substr(0,20).c_str());
if (mapOrphanBlocks.count(hash))
return error("ProcessBlock() : already have block (orphan) %s", hash.ToString().substr(0,20).c_str());
// ppcoin: check proof-of-stake
if (pblock->IsProofOfStake())
{
std::pair<COutPoint, unsigned int> proofOfStake = pblock->GetProofOfStake();
if (pindexBest->IsProofOfStake() && proofOfStake.first == pindexBest->prevoutStake)
{
if (!pblock->CheckBlockSignature())
{
if (pfrom)
pfrom->Misbehaving(100);
return error("ProcessBlock() : invalid signature in a duplicate Proof-of-Stake kernel");
}
RelayBlock(*pblock, hash);
BlacklistProofOfStake(proofOfStake, hash);
CTxDB txdb;
CBlock bestPrevBlock;
bestPrevBlock.ReadFromDisk(pindexBest->pprev);
if (!bestPrevBlock.SetBestChain(txdb, pindexBest->pprev))
return error("ProcessBlock() : Proof-of-stake rollback failed");
return error("ProcessBlock() : duplicate Proof-of-Stake kernel (%s, %d) in block %s", pblock->GetProofOfStake().first.ToString().c_str(), pblock->GetProofOfStake().second, hash.ToString().c_str());
}
else if (setStakeSeen.count(proofOfStake) && !mapOrphanBlocksByPrev.count(hash) && !Checkpoints::WantedByPendingSyncCheckpoint(hash))
{
return error("ProcessBlock() : duplicate Proof-of-Stake kernel (%s, %d) in block %s", proofOfStake.first.ToString().c_str(), proofOfStake.second, hash.ToString().c_str());
}
}
// Preliminary checks
if (!pblock->CheckBlock())
return error("ProcessBlock() : CheckBlock FAILED");
CBlockIndex* pcheckpoint = Checkpoints::GetLastSyncCheckpoint();
if (pcheckpoint && pblock->hashPrevBlock != hashBestChain && !Checkpoints::WantedByPendingSyncCheckpoint(hash))
{
// Extra checks to prevent "fill up memory by spamming with bogus blocks"
int64 deltaTime = pblock->GetBlockTime() - pcheckpoint->nTime;
CBigNum bnNewBlock;
bnNewBlock.SetCompact(pblock->nBits);
CBigNum bnRequired;
bnRequired.SetCompact(ComputeMinWork(GetLastBlockIndex(pcheckpoint, pblock->IsProofOfStake())->nBits, deltaTime));
if (bnNewBlock > bnRequired)
{
if (pfrom)
pfrom->Misbehaving(100);
return error("ProcessBlock() : block with too little %s", pblock->IsProofOfStake()? "proof-of-stake" : "proof-of-work");
}
}
// ppcoin: ask for pending sync-checkpoint if any
if (!IsInitialBlockDownload())
Checkpoints::AskForPendingSyncCheckpoint(pfrom);
// We remove the previous block from the blacklisted kernels, if needed
CleanProofOfStakeBlacklist(pblock->hashPrevBlock);
// Find the previous block
std::map<uint256, CBlockIndex*>::iterator parentBlockIt = mapBlockIndex.find(pblock->hashPrevBlock);
// If we don't already have it, shunt off the block to the holding area until we get its parent
if (parentBlockIt == mapBlockIndex.end())
{
printf("ProcessBlock: ORPHAN BLOCK, prev=%s\n", pblock->hashPrevBlock.ToString().substr(0,20).c_str());
CBlock* pblock2 = new CBlock(*pblock);
// ppcoin: check proof-of-stake
if (pblock2->IsProofOfStake())
setStakeSeenOrphan.insert(pblock2->GetProofOfStake());
mapOrphanBlocks.insert(make_pair(hash, pblock2));
mapOrphanBlocksByPrev.insert(make_pair(pblock2->hashPrevBlock, pblock2));
// Ask this guy to fill in what we're missing
if (pfrom)
{
pfrom->PushGetBlocks(pindexBest, GetOrphanRoot(pblock2));
// ppcoin: getblocks may not obtain the ancestor block rejected
// earlier by duplicate-stake check so we ask for it again directly
if (!IsInitialBlockDownload())
{
pfrom->AskFor(CInv(MSG_BLOCK, WantedByOrphan(pblock2)));
}
}
return true;
}
// ppcoin: verify hash target and signature of coinstake tx
if (pblock->IsProofOfStake())
{
uint256 hashProofOfStake = 0;
const CBlockIndex * pindexPrev = parentBlockIt->second;
if (!CheckProofOfStake(pindexPrev, pblock->vtx[1], pblock->nBits, hashProofOfStake))
{
printf("WARNING: ProcessBlock(): check proof-of-stake failed for block %s\n", hash.ToString().c_str());
return false; // do not error here as we expect this during initial block download
}
if (!mapProofOfStake.count(hash)) // add to mapProofOfStake
{
mapProofOfStake.insert(make_pair(hash, hashProofOfStake));
}
}
// Store to disk
if (!pblock->AcceptBlock())
return error("ProcessBlock() : AcceptBlock FAILED");
// Recursively process any orphan blocks that depended on this one
vector<uint256> vWorkQueue;
vWorkQueue.push_back(hash);
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
{
uint256 hashPrev = vWorkQueue[i];
CBlockIndex* pindexPrev = mapBlockIndex.at(hashPrev);
for (multimap<uint256, CBlock*>::iterator mi = mapOrphanBlocksByPrev.lower_bound(hashPrev); mi != mapOrphanBlocksByPrev.upper_bound(hashPrev); ++mi)
{
bool validated = true;
CBlock* pblockOrphan = (*mi).second;
uint256 orphanHash = pblockOrphan->GetHash();
if (pblockOrphan->IsProofOfStake())
{
uint256 hashProofOfStake = 0;
if (CheckProofOfStake(pindexPrev, pblockOrphan->vtx[1], pblockOrphan->nBits, hashProofOfStake))
{
if (!mapProofOfStake.count(orphanHash))
mapProofOfStake.insert(make_pair(orphanHash, hashProofOfStake));
validated = true;
}
else
{
validated = false;
}
}
if (validated && pblockOrphan->AcceptBlock())
vWorkQueue.push_back(orphanHash);
mapOrphanBlocks.erase(orphanHash);
setStakeSeenOrphan.erase(pblockOrphan->GetProofOfStake());
delete pblockOrphan;
}
mapOrphanBlocksByPrev.erase(hashPrev);
}
printf("ProcessBlock: ACCEPTED\n");
// ppcoin: if responsible for sync-checkpoint send it
if (pfrom && !CHECKPOINT_PRIVATE_KEY.empty())
Checkpoints::SendSyncCheckpoint(Checkpoints::AutoSelectSyncCheckpoint());
return true;
}
// ppcoin: sign block
bool CBlock::SignBlock(const CKeyStore& keystore)
{
vector<valtype> vSolutions;
txnouttype whichType;
const CTxOut& txout = IsProofOfStake()? vtx[1].vout[1] : vtx[0].vout[0];
if (!Solver(txout.scriptPubKey, whichType, vSolutions))
return false;
if (whichType == TX_PUBKEY)
{
// Sign
const valtype& vchPubKey = vSolutions[0];
CKey key;
if (!keystore.GetKey(Hash160(vchPubKey), key))
return false;
if (key.GetPubKey() != vchPubKey)
return false;
return key.Sign(GetHash(), vchBlockSig);
}
else if (whichType == TX_SCRIPTHASH)
{
CScript subscript;
if (!keystore.GetCScript(CScriptID(uint160(vSolutions[0])), subscript))
return false;
if (!Solver(subscript, whichType, vSolutions))
return false;
if (whichType != TX_COLDMINTING)
return false;
CKey key;
if (!keystore.GetKey(uint160(vSolutions[0]), key))
return false;
return key.Sign(GetHash(), vchBlockSig);
}
return false;
}
// ppcoin: check block signature
bool CBlock::CheckBlockSignature() const
{
if (GetHash() == GENESIS_HASH)
return vchBlockSig.empty();
vector<valtype> vSolutions;
txnouttype whichType;
const CTxOut& txout = IsProofOfStake()? vtx[1].vout[1] : vtx[0].vout[0];
if (!Solver(txout.scriptPubKey, whichType, vSolutions))
return false;
if (whichType == TX_PUBKEY)
{
const valtype& vchPubKey = vSolutions[0];
CKey key;
if (!key.SetPubKey(vchPubKey))
return false;
if (vchBlockSig.empty())
return false;
return key.Verify(GetHash(), vchBlockSig);
}
else if (whichType == TX_SCRIPTHASH)
{
// Output is a pay-to-script-hash
// Only allowed with cold minting
if (!IsProofOfStake())
return false;
// CoinStake scriptSig should contain 3 pushes: the signature, the pubkey and the cold minting script
CScript scriptSig = vtx[1].vin[0].scriptSig;
if (!scriptSig.IsPushOnly())
return false;
vector<vector<unsigned char> > stack;
if (!EvalScript(stack, scriptSig, CTransaction(), 0, 0))
return false;
if (stack.size() != 3)
return false;
// Verify the script is a cold minting script
const valtype& scriptSerialized = stack.back();
CScript script(scriptSerialized.begin(), scriptSerialized.end());
if (!Solver(script, whichType, vSolutions))
return false;
if (whichType != TX_COLDMINTING)
return false;
// Verify the scriptSig pubkey matches the minting key
valtype& vchPubKey = stack[1];
if (Hash160(vchPubKey) != uint160(vSolutions[0]))
return false;
// Verify the block signature with the minting key
CKey key;
if (!key.SetPubKey(vchPubKey))
return false;
if (vchBlockSig.empty())
return false;
return key.Verify(GetHash(), vchBlockSig);
}
return false;
}
// ppcoin: entropy bit for stake modifier if chosen by modifier
unsigned int CBlock::GetStakeEntropyBit() const
{
unsigned int nEntropyBit = 0;
nEntropyBit = ((GetHash().Get64()) & 1llu); // last bit of block hash
if (fDebug && GetBoolArg("-printstakemodifier"))
printf("GetStakeEntropyBit(v0.4+): nTime=%u hashBlock=%s entropybit=%d\n", nTime, GetHash().ToString().c_str(), nEntropyBit);
return nEntropyBit;
}
bool CheckDiskSpace(uint64 nAdditionalBytes)
{
uint64 nFreeBytesAvailable = filesystem::space(GetDataDir()).available;
// Check for 15MB because database could create another 10MB log file at any time
if (nFreeBytesAvailable < (uint64)15000000 + nAdditionalBytes)
{
fShutdown = true;
string strMessage = _("Warning: Disk space is low");
strMiscWarning = strMessage;
printf("*** %s\n", strMessage.c_str());
ThreadSafeMessageBox(strMessage, COIN_NAME, wxOK | wxICON_EXCLAMATION | wxMODAL);
StartShutdown();
return false;
}
return true;
}
FILE* OpenBlockFile(unsigned int nFile, unsigned int nBlockPos, const char* pszMode)
{
if (nFile == static_cast<unsigned int>(-1))
return NULL;
FILE* file = fopen((GetDataDir() / strprintf("blk%04d.dat", nFile)).string().c_str(), pszMode);
if (!file)
return NULL;
if (nBlockPos != 0 && !strchr(pszMode, 'a') && !strchr(pszMode, 'w'))
{
if (fseek(file, nBlockPos, SEEK_SET) != 0)
{
fclose(file);
return NULL;
}
}
return file;
}
static unsigned int nCurrentBlockFile = 1;
FILE* AppendBlockFile(unsigned int& nFileRet)
{
nFileRet = 0;
INFINITE_LOOP
{
FILE* file = OpenBlockFile(nCurrentBlockFile, 0, "ab");
if (!file)
return NULL;
if (fseek(file, 0, SEEK_END) != 0)
return NULL;
// FAT32 filesize max 4GB, fseek and ftell max 2GB, so we must stay under 2GB
if (ftell(file) < 0x7F000000 - MAX_SIZE)
{
nFileRet = nCurrentBlockFile;
return file;
}
fclose(file);
nCurrentBlockFile++;
}
}
bool LoadBlockIndex(bool fAllowNew)
{
//
// Load block index
//
CTxDB txdb("cr");
if (!txdb.LoadBlockIndex())
return false;
txdb.Close();
//
// Init with genesis block
//
if (mapBlockIndex.empty())
{
if (!fAllowNew)
return false;
// Genesis Block:
// CBlock(hash=000000000019d6, ver=1, hashPrevBlock=00000000000000, hashMerkleRoot=4a5e1e, nTime=1231006505, nBits=1d00ffff, nNonce=2083236893, vtx=1)
// CTransaction(hash=4a5e1e, ver=1, vin.size=1, vout.size=1, nLockTime=0)
// CTxIn(COutPoint(000000, -1), coinbase 04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73)
// CTxOut(nValue=50.00000000, scriptPubKey=0x5F1DF16B2B704C8A578D0B)
// vMerkleTree: 4a5e1e
const char* pszTimestamp = GENESIS_IDENT;
CTransaction txNew;
txNew.nTime = GENESIS_TX_TIME;
txNew.vin.resize(1);
txNew.vout.resize(1);
txNew.vin[0].scriptSig = CScript() << 486604799 << CBigNum(9999) << vector<unsigned char>((const unsigned char*)pszTimestamp, (const unsigned char*)pszTimestamp + strlen(pszTimestamp));
txNew.vout[0].SetEmpty();
CBlock block;
block.vtx.push_back(txNew);
block.hashPrevBlock = 0;
block.hashMerkleRoot = block.BuildMerkleTree();
block.nVersion = GENESIS_BLOCK_VERSION;
block.nTime = GENESIS_BLOCK_TIME;
block.nBits = POW_INITIAL_TARGET.GetCompact();
block.nNonce = GENESIS_BLOCK_NONCE;
printf("target : %s\n", POW_INITIAL_TARGET.getuint256().ToString().c_str());
printf("nBits : %08X\n", block.nBits);
printf("expected genesis hash : %s\n", GENESIS_HASH.ToString().c_str());
printf("true genesis hash : %s\n", block.GetHash().ToString().c_str());
// If genesis block hash does not match, then generate new genesis hash.
if (block.GetHash() != GENESIS_HASH || !CheckProofOfWork(block.GetHash(), block.nBits, false)) {
printf("\n");
printf("FATAL ERROR: The genesis block is invalid.\n");
printf("Please notify the coins maintainers at " COIN_BUGTRACKER ".\n");
printf("If you're working on an Altcoin, we suggest you to use the following parameters as new genesis (wait a bit):\n");
// This will figure out a valid hash and Nonce if you're
// creating a different genesis block:
while (!CheckProofOfWork(block.GetHash(), block.nBits, false)) {
if ((block.nNonce & 0xFFF) == 0)
printf("Trying nonce %08X and above...\n", block.nNonce);
++block.nNonce;
if (block.nNonce == 0) {
printf("NONCE WRAPPED, incrementing time\n");
++block.nTime;
}
}
printf("A matching block has been found, with the following parameters:\n");
printf(" - GENESIS_MERKLE_HASH : %s\n", block.hashMerkleRoot.ToString().c_str());
printf(" - GENESIS_HASH : %s\n", block.GetHash().ToString().c_str());
printf(" - GENESIS_TIME : %u\n", block.nTime);
printf(" - GENESIS_NONCE : %u\n", block.nNonce);
std::exit( 1 );
}
//// debug print
assert(block.hashMerkleRoot == GENESIS_MERKLE_HASH);
assert(block.GetHash() == GENESIS_HASH);
assert(block.CheckBlock());
// Start new block file
unsigned int nFile;
unsigned int nBlockPos;
if (!block.WriteToDisk(nFile, nBlockPos))
return error("LoadBlockIndex() : writing genesis block to disk failed");
if (!block.AddToBlockIndex(nFile, nBlockPos))
return error("LoadBlockIndex() : genesis block not accepted");
// ppcoin: initialize synchronized checkpoint
if (!Checkpoints::WriteSyncCheckpoint(GENESIS_HASH)) {
return error("LoadBlockIndex() : failed to init sync checkpoint");
}
}
// ppcoin: if checkpoint master key changed must reset sync-checkpoint
{
CTxDB txdb;
string strPubKey = "";
if (!txdb.ReadCheckpointPubKey(strPubKey) || strPubKey != CHECKPOINT_PUBLIC_KEY)
{
// write checkpoint master key to db
txdb.TxnBegin();
if (!txdb.WriteCheckpointPubKey(CHECKPOINT_PUBLIC_KEY))
return error("LoadBlockIndex() : failed to write new checkpoint master key to db");
if (!txdb.TxnCommit())
return error("LoadBlockIndex() : failed to commit new checkpoint master key to db");
if (!Checkpoints::ResetSyncCheckpoint())
return error("LoadBlockIndex() : failed to reset sync-checkpoint");
}
txdb.Close();
}
return true;
}
void PrintBlockTree()
{
// precompute tree structure
map<CBlockIndex*, vector<CBlockIndex*> > mapNext;
for (map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.begin(); mi != mapBlockIndex.end(); ++mi)
{
CBlockIndex* pindex = (*mi).second;
mapNext[pindex->pprev].push_back(pindex);
// test
//while (rand() % 3 == 0)
// mapNext[pindex->pprev].push_back(pindex);
}
vector<pair<int, CBlockIndex*> > vStack;
vStack.push_back(make_pair(0, pindexGenesisBlock));
int nPrevCol = 0;
while (!vStack.empty())
{
int nCol = vStack.back().first;
CBlockIndex* pindex = vStack.back().second;
vStack.pop_back();
// print split or gap
if (nCol > nPrevCol)
{
for (int i = 0; i < nCol-1; i++)
printf("| ");
printf("|\\\n");
}
else if (nCol < nPrevCol)
{
for (int i = 0; i < nCol; i++)
printf("| ");
printf("|\n");
}
nPrevCol = nCol;
// print columns
for (int i = 0; i < nCol; i++)
printf("| ");
// print item
CBlock block;
block.ReadFromDisk(pindex);
printf("%d (%u,%u) %s %08lx %s mint %7s tx %d",
pindex->nHeight,
pindex->nFile,
pindex->nBlockPos,
block.GetHash().ToString().c_str(),
block.nBits,
DateTimeStrFormat(block.GetBlockTime()).c_str(),
FormatMoney(pindex->nMint).c_str(),
block.vtx.size());
PrintWallets(block);
// put the main timechain first
vector<CBlockIndex*>& vNext = mapNext[pindex];
for (unsigned int i = 0; i < vNext.size(); i++)
{
if (vNext[i]->pnext)
{
swap(vNext[0], vNext[i]);
break;
}
}
// iterate children
for (unsigned int i = 0; i < vNext.size(); i++)
vStack.push_back(make_pair(nCol+i, vNext[i]));
}
}
//////////////////////////////////////////////////////////////////////////////
//
// CAlert
//
map<uint256, CAlert> mapAlerts;
CCriticalSection cs_mapAlerts;
static string strMintMessage = _("Warning: Minting suspended due to locked wallet.");
static string strMintWarning;
string GetWarnings(string strFor)
{
int nPriority = 0;
string strStatusBar;
string strRPC;
if (GetBoolArg("-testsafemode"))
strRPC = "test";
// ppcoin: wallet lock warning for minting
if (strMintWarning != "")
{
nPriority = 0;
strStatusBar = strMintWarning;
}
// Misc warnings like out of disk space and clock is wrong
if (strMiscWarning != "")
{
nPriority = 1000;
strStatusBar = strMiscWarning;
}
// ppcoin: if detected invalid checkpoint enter safe mode
if (Checkpoints::hashInvalidCheckpoint != 0)
{
nPriority = 3000;
strStatusBar = strRPC = "Warning: An invalid checkpoint has been found! Displayed transactions may not be correct! You may need to upgrade, and/or notify developers of the issue.";
}
// Alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
{
const CAlert& alert = item.second;
if (alert.AppliesToMe() && alert.nPriority > nPriority)
{
nPriority = alert.nPriority;
strStatusBar = alert.strStatusBar;
if (nPriority > 1000)
{
strRPC = strStatusBar; // ppcoin: safe mode for high alert
}
}
}
}
if (strFor == "statusbar")
return strStatusBar;
else if (strFor == "rpc")
return strRPC;
assert(!"GetWarnings() : invalid parameter");
return "error";
}
bool CAlert::ProcessAlert()
{
if (!CheckSignature())
return false;
if (!IsInEffect())
return false;
{
LOCK(cs_mapAlerts);
// Cancel previous alerts
for (map<uint256, CAlert>::iterator mi = mapAlerts.begin(); mi != mapAlerts.end();)
{
const CAlert& alert = (*mi).second;
if (Cancels(alert))
{
printf("cancelling alert %d\n", alert.nID);
mapAlerts.erase(mi++);
}
else if (!alert.IsInEffect())
{
printf("expiring alert %d\n", alert.nID);
mapAlerts.erase(mi++);
}
else
mi++;
}
// Check if this alert has been cancelled
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
{
const CAlert& alert = item.second;
if (alert.Cancels(*this))
{
printf("alert already cancelled by %d\n", alert.nID);
return false;
}
}
// Add to mapAlerts
mapAlerts.insert(make_pair(GetHash(), *this));
}
printf("accepted alert %d, AppliesToMe()=%d\n", nID, AppliesToMe());
MainFrameRepaint();
return true;
}
//////////////////////////////////////////////////////////////////////////////
//
// Messages
//
bool static AlreadyHave(CTxDB& txdb, const CInv& inv)
{
switch (inv.type)
{
case MSG_TX:
{
bool txInMap = false;
{
LOCK(mempool.cs);
txInMap = (mempool.exists(inv.hash));
}
return txInMap ||
mapOrphanTransactions.count(inv.hash) ||
txdb.ContainsTx(inv.hash);
}
case MSG_BLOCK:
return mapBlockIndex.count(inv.hash) ||
mapOrphanBlocks.count(inv.hash);
}
// Don't know what it is, just say we already got one
return true;
}
bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv)
{
static map<CService, CPubKey> mapReuseKey;
RandAddSeedPerfmon();
if (fDebug) {
printf("%s ", DateTimeStrFormat(GetTime()).c_str());
printf("received: %s (%d bytes)\n", strCommand.c_str(), vRecv.size());
}
if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
{
printf("dropmessagestest DROPPING RECV MESSAGE\n");
return true;
}
if (strCommand == "version")
{
// Each connection can only send one version message
if (pfrom->nVersion != 0)
{
pfrom->Misbehaving(1);
return false;
}
int64 nTime;
CAddress addrMe;
CAddress addrFrom;
uint64 nNonce = 1;
vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
if (pfrom->nVersion < MIN_PROTO_VERSION)
{
// Since February 20, 2012, the protocol is initiated at version 209,
// and earlier versions are no longer supported
printf("partner %s using obsolete version %i; disconnecting\n", pfrom->addr.ToString().c_str(), pfrom->nVersion);
pfrom->fDisconnect = true;
return false;
}
if (pfrom->nVersion == 10300)
pfrom->nVersion = 300;
if (!vRecv.empty())
vRecv >> addrFrom >> nNonce;
if (!vRecv.empty())
vRecv >> pfrom->strSubVer;
if (!vRecv.empty())
vRecv >> pfrom->nStartingHeight;
// Disconnect if we connected to ourself
if (nNonce == nLocalHostNonce && nNonce > 1)
{
printf("connected to self at %s, disconnecting\n", pfrom->addr.ToString().c_str());
pfrom->fDisconnect = true;
return true;
}
// ppcoin: record my external IP reported by peer
if (addrFrom.IsRoutable() && addrMe.IsRoutable())
addrSeenByPeer = addrMe;
// Be shy and don't send version until we hear
if (pfrom->fInbound)
pfrom->PushVersion();
pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
AddTimeData(pfrom->addr, nTime);
// Change version
pfrom->PushMessage("verack");
pfrom->vSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
if (!pfrom->fInbound)
{
// Advertise our address
if (!fNoListen && !fUseProxy && addrLocalHost.IsRoutable() &&
!IsInitialBlockDownload())
{
CAddress addr(addrLocalHost);
addr.nTime = GetAdjustedTime();
pfrom->PushAddress(addr);
}
// Get recent addresses
if (pfrom->nVersion >= CADDR_TIME_VERSION || addrman.size() < 1000)
{
pfrom->PushMessage("getaddr");
pfrom->fGetAddr = true;
}
addrman.Good(pfrom->addr);
} else {
if (((CNetAddr)pfrom->addr) == (CNetAddr)addrFrom)
{
addrman.Add(addrFrom, addrFrom);
addrman.Good(addrFrom);
}
}
// Ask the first connected node for block updates
static int nAskedForBlocks = 0;
if (!pfrom->fClient &&
(pfrom->nVersion < NOBLKS_VERSION_START ||
pfrom->nVersion >= NOBLKS_VERSION_END))
{
nAskedForBlocks++;
pfrom->PushGetBlocks(pindexBest, uint256(0));
}
// Relay alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
item.second.RelayTo(pfrom);
}
// ppcoin: relay sync-checkpoint
{
LOCK(Checkpoints::cs_hashSyncCheckpoint);
if (!Checkpoints::checkpointMessage.IsNull())
Checkpoints::checkpointMessage.RelayTo(pfrom);
}
pfrom->fSuccessfullyConnected = true;
printf("version message: version %d, blocks=%d\n", pfrom->nVersion, pfrom->nStartingHeight);
cPeerBlockCounts.input(pfrom->nStartingHeight);
// ppcoin: ask for pending sync-checkpoint if any
if (!IsInitialBlockDownload())
Checkpoints::AskForPendingSyncCheckpoint(pfrom);
}
else if (pfrom->nVersion == 0)
{
// Must have a version message before anything else
pfrom->Misbehaving(1);
return false;
}
else if (strCommand == "verack")
{
pfrom->vRecv.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
}
else if (strCommand == "addr")
{
vector<CAddress> vAddr;
vRecv >> vAddr;
// Don't want addr from older versions unless seeding
if (pfrom->nVersion < CADDR_TIME_VERSION && addrman.size() > 1000)
return true;
if (vAddr.size() > 1000)
{
pfrom->Misbehaving(20);
return error("message addr size() = %d", vAddr.size());
}
// Store the new addresses
int64 nNow = GetAdjustedTime();
int64 nSince = nNow - 10 * 60;
BOOST_FOREACH(CAddress& addr, vAddr)
{
if (fShutdown)
return true;
// ignore IPv6 for now, since it isn't implemented anyway
if (!addr.IsIPv4())
continue;
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom->AddAddressKnown(addr);
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
// Relay to a limited number of other nodes
{
LOCK(cs_vNodes);
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the setAddrKnowns of the chosen nodes prevent repeats
static uint256 hashSalt;
if (hashSalt == 0)
hashSalt = GetRandHash();
int64 hashAddr = addr.GetHash();
uint256 hashRand = hashSalt ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
multimap<uint256, CNode*> mapMix;
BOOST_FOREACH(CNode* pnode, vNodes)
{
if (pnode->nVersion < CADDR_TIME_VERSION)
continue;
unsigned int nPointer;
memcpy(&nPointer, &pnode, sizeof(nPointer));
uint256 hashKey = hashRand ^ nPointer;
hashKey = Hash(BEGIN(hashKey), END(hashKey));
mapMix.insert(make_pair(hashKey, pnode));
}
int nRelayNodes = 2;
for (multimap<uint256, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi)
((*mi).second)->PushAddress(addr);
}
}
}
addrman.Add(vAddr, pfrom->addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom->fGetAddr = false;
}
else if (strCommand == "inv")
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > 50000)
{
pfrom->Misbehaving(20);
return error("message inv size() = %d", vInv.size());
}
// find last block in inv vector
unsigned int nLastBlock = (unsigned int)(-1);
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++) {
if (vInv[vInv.size() - 1 - nInv].type == MSG_BLOCK) {
nLastBlock = vInv.size() - 1 - nInv;
break;
}
}
CTxDB txdb("r");
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
const CInv &inv = vInv[nInv];
if (fShutdown)
return true;
pfrom->AddInventoryKnown(inv);
bool fAlreadyHave = AlreadyHave(txdb, inv);
if (fDebug)
printf(" got inventory: %s %s\n", inv.ToString().c_str(), fAlreadyHave ? "have" : "new");
if (!fAlreadyHave)
{
pfrom->AskFor(inv);
}
else if (inv.type == MSG_BLOCK && mapOrphanBlocks.count(inv.hash))
{
pfrom->PushGetBlocks(pindexBest, GetOrphanRoot(mapOrphanBlocks[inv.hash]));
}
else if (nInv == nLastBlock)
{
// In case we are on a very long side-chain, it is possible that we already have
// the last block in an inv bundle sent in response to getblocks. Try to detect
// this situation and push another getblocks to continue.
std::vector<CInv> vGetData(1,inv);
pfrom->PushGetBlocks(mapBlockIndex.at(inv.hash), uint256(0));
if (fDebug)
{
printf("force request: %s\n", inv.ToString().c_str());
}
}
// Track requests for our stuff
Inventory(inv.hash);
}
}
else if (strCommand == "getdata")
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > 50000)
{
pfrom->Misbehaving(20);
return error("message getdata size() = %d", vInv.size());
}
BOOST_FOREACH(const CInv& inv, vInv)
{
if (fShutdown)
return true;
printf("received getdata for: %s\n", inv.ToString().c_str());
if (inv.type == MSG_BLOCK)
{
// Send block from disk
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(inv.hash);
if (mi != mapBlockIndex.end())
{
CBlock block;
block.ReadFromDisk((*mi).second);
pfrom->PushMessage("block", block);
// Trigger them to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
// ppcoin: send latest proof-of-work block to allow the
// download node to accept as orphan (proof-of-stake
// block might be rejected by stake connection check)
vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, GetLastBlockIndex(pindexBest, false)->GetBlockHash()));
pfrom->PushMessage("inv", vInv);
pfrom->hashContinue = 0;
}
}
}
else if (inv.IsKnownType())
{
// Send stream from relay memory
{
LOCK(cs_mapRelay);
map<CInv, CDataStream>::iterator mi = mapRelay.find(inv);
if (mi != mapRelay.end())
pfrom->PushMessage(inv.GetCommand(), (*mi).second);
}
}
// Track requests for our stuff
Inventory(inv.hash);
}
}
else if (strCommand == "getblocks")
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
// Find the last block the caller has in the main chain
CBlockIndex* pindex = locator.GetBlockIndex();
// Send the rest of the chain
if (pindex)
pindex = pindex->pnext;
int nLimit = 500 + locator.GetDistanceBack();
unsigned int nBytes = 0;
printf("getblocks %d to %s limit %d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().substr(0,20).c_str(), nLimit);
for (; pindex; pindex = pindex->pnext)
{
if (pindex->GetBlockHash() == hashStop)
{
printf(" getblocks stopping at %d %s (%u bytes)\n", pindex->nHeight, pindex->GetBlockHash().ToString().substr(0,20).c_str(), nBytes);
// ppcoin: tell downloading node about the latest block if it's
// without risk being rejected due to stake connection check
if (hashStop != hashBestChain && pindex->GetBlockTime() + STAKE_MIN_AGE > pindexBest->GetBlockTime())
pfrom->PushInventory(CInv(MSG_BLOCK, hashBestChain));
break;
}
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
CBlock block;
block.ReadFromDisk(pindex, true);
nBytes += block.GetSerializeSize(SER_NETWORK, PROTOCOL_VERSION);
if (--nLimit <= 0 || nBytes >= SendBufferSize()/2)
{
// When this block is requested, we'll send an inv that'll make them
// getblocks the next batch of inventory.
printf(" getblocks stopping at limit %d %s (%u bytes)\n", pindex->nHeight, pindex->GetBlockHash().ToString().substr(0,20).c_str(), nBytes);
pfrom->hashContinue = pindex->GetBlockHash();
break;
}
}
}
else if (strCommand == "getheaders")
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
CBlockIndex* pindex = NULL;
if (locator.IsNull())
{
// If locator is null, return the hashStop block
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashStop);
if (mi == mapBlockIndex.end())
return true;
pindex = (*mi).second;
}
else
{
// Find the last block the caller has in the main chain
pindex = locator.GetBlockIndex();
if (pindex)
pindex = pindex->pnext;
}
vector<CBlock> vHeaders;
int nLimit = 2000;
printf("getheaders %d to %s\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().substr(0,20).c_str());
for (; pindex; pindex = pindex->pnext)
{
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
}
pfrom->PushMessage("headers", vHeaders);
}
else if (strCommand == "tx")
{
vector<uint256> vWorkQueue;
vector<uint256> vEraseQueue;
CDataStream vMsg(vRecv);
CTxDB txdb("r");
CTransaction tx;
vRecv >> tx;
CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv);
bool fMissingInputs = false;
if (tx.AcceptToMemoryPool(txdb, true, &fMissingInputs))
{
SyncWithWallets(tx, NULL, true);
RelayMessage(inv, vMsg);
mapAlreadyAskedFor.erase(inv);
vWorkQueue.push_back(inv.hash);
vEraseQueue.push_back(inv.hash);
// Recursively process any orphan transactions that depended on this one
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
{
uint256 hashPrev = vWorkQueue[i];
for (map<uint256, CDataStream*>::iterator mi = mapOrphanTransactionsByPrev[hashPrev].begin();
mi != mapOrphanTransactionsByPrev[hashPrev].end();
++mi)
{
const CDataStream& vMsg = *((*mi).second);
CTransaction tx;
CDataStream(vMsg) >> tx;
CInv inv(MSG_TX, tx.GetHash());
bool fMissingInputs2 = false;
if (tx.AcceptToMemoryPool(txdb, true, &fMissingInputs2))
{
printf(" accepted orphan tx %s\n", inv.hash.ToString().substr(0,10).c_str());
SyncWithWallets(tx, NULL, true);
RelayMessage(inv, vMsg);
mapAlreadyAskedFor.erase(inv);
vWorkQueue.push_back(inv.hash);
vEraseQueue.push_back(inv.hash);
}
else if (!fMissingInputs2)
{
// invalid orphan
vEraseQueue.push_back(inv.hash);
printf(" removed invalid orphan tx %s\n", inv.hash.ToString().substr(0,10).c_str());
}
}
}
BOOST_FOREACH(uint256 hash, vEraseQueue)
EraseOrphanTx(hash);
}
else if (fMissingInputs)
{
AddOrphanTx(vMsg);
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nEvicted = LimitOrphanTxSize(MAX_BLOCK_ORPHAN_TX);
if (nEvicted > 0)
printf("mapOrphan overflow, removed %u tx\n", nEvicted);
}
if (tx.nDoS) pfrom->Misbehaving(tx.nDoS);
}
else if (strCommand == "block")
{
CBlock block;
vRecv >> block;
printf("received block %s\n", block.GetHash().ToString().substr(0,20).c_str());
// block.print();
CInv inv(MSG_BLOCK, block.GetHash());
pfrom->AddInventoryKnown(inv);
if (ProcessBlock(pfrom, &block))
mapAlreadyAskedFor.erase(inv);
if (block.nDoS) pfrom->Misbehaving(block.nDoS);
}
else if (strCommand == "getaddr")
{
pfrom->vAddrToSend.clear();
vector<CAddress> vAddr = addrman.GetAddr();
BOOST_FOREACH(const CAddress &addr, vAddr)
pfrom->PushAddress(addr);
}
else if (strCommand == "checkorder")
{
uint256 hashReply;
vRecv >> hashReply;
if (!GetBoolArg("-allowreceivebyip"))
{
pfrom->PushMessage("reply", hashReply, (int)2, string(""));
return true;
}
CWalletTx order;
vRecv >> order;
/// we have a chance to check the order here
// Keep giving the same key to the same ip until they use it
if (!mapReuseKey.count(pfrom->addr))
pwalletMain->GetKeyFromPool(mapReuseKey[pfrom->addr], true);
// Send back approval of order and pubkey to use
CScript scriptPubKey;
scriptPubKey << mapReuseKey[pfrom->addr] << OP_CHECKSIG;
pfrom->PushMessage("reply", hashReply, (int)0, scriptPubKey);
}
else if (strCommand == "reply")
{
uint256 hashReply;
vRecv >> hashReply;
CRequestTracker tracker;
{
LOCK(pfrom->cs_mapRequests);
map<uint256, CRequestTracker>::iterator mi = pfrom->mapRequests.find(hashReply);
if (mi != pfrom->mapRequests.end())
{
tracker = (*mi).second;
pfrom->mapRequests.erase(mi);
}
}
if (!tracker.IsNull())
tracker.fn(tracker.param1, vRecv);
}
else if (strCommand == "ping")
{
if (pfrom->nVersion > BIP0031_VERSION)
{
uint64 nonce = 0;
vRecv >> nonce;
// Echo the message back with the nonce. This allows for two useful features:
//
// 1) A remote node can quickly check if the connection is operational
// 2) Remote nodes can measure the latency of the network thread. If this node
// is overloaded it won't respond to pings quickly and the remote node can
// avoid sending us more work, like chain download requests.
//
// The nonce stops the remote getting confused between different pings: without
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
pfrom->PushMessage("pong", nonce);
}
}
else if (strCommand == "alert")
{
CAlert alert;
vRecv >> alert;
if (alert.ProcessAlert())
{
// Relay
pfrom->setKnown.insert(alert.GetHash());
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
alert.RelayTo(pnode);
}
}
}
else if (strCommand == "checkpoint")
{
CSyncCheckpoint checkpoint;
vRecv >> checkpoint;
if (checkpoint.ProcessSyncCheckpoint(pfrom))
{
// Relay
pfrom->hashCheckpointKnown = checkpoint.hashCheckpoint;
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
checkpoint.RelayTo(pnode);
}
}
else
{
// Ignore unknown commands for extensibility
}
// Update the last seen time for this node's address
if (pfrom->fNetworkNode)
if (strCommand == "version" || strCommand == "addr" || strCommand == "inv" || strCommand == "getdata" || strCommand == "ping")
AddressCurrentlyConnected(pfrom->addr);
return true;
}
bool ProcessMessages(CNode* pfrom)
{
CDataStream& vRecv = pfrom->vRecv;
if (vRecv.empty())
return true;
//if (fDebug)
// printf("ProcessMessages(%u bytes)\n", vRecv.size());
//
// Message format
// (4) message start
// (12) command
// (4) size
// (4) checksum
// (x) data
//
unsigned char pchMessageStart[4];
GetMessageStart(pchMessageStart);
static int64 nTimeLastPrintMessageStart = 0;
if (fDebug && GetBoolArg("-printmessagestart") && nTimeLastPrintMessageStart + 30 < GetAdjustedTime())
{
string strMessageStart((const char *)pchMessageStart, sizeof(pchMessageStart));
vector<unsigned char> vchMessageStart(strMessageStart.begin(), strMessageStart.end());
printf("ProcessMessages : AdjustedTime=%"PRI64d" MessageStart=%s\n", GetAdjustedTime(), HexStr(vchMessageStart).c_str());
nTimeLastPrintMessageStart = GetAdjustedTime();
}
INFINITE_LOOP
{
// Safe guards to prevent the node to ignore a requested shutdown
// in case of long processing
if (fRequestShutdown)
{
StartShutdown();
return true;
}
// Scan for message start
CDataStream::iterator pstart = search(vRecv.begin(), vRecv.end(), BEGIN(pchMessageStart), END(pchMessageStart));
int nHeaderSize = vRecv.GetSerializeSize(CMessageHeader());
if (vRecv.end() - pstart < nHeaderSize)
{
if ((int)vRecv.size() > nHeaderSize)
{
printf("\n\nPROCESSMESSAGE MESSAGESTART NOT FOUND\n\n");
vRecv.erase(vRecv.begin(), vRecv.end() - nHeaderSize);
}
break;
}
if (pstart - vRecv.begin() > 0)
printf("\n\nPROCESSMESSAGE SKIPPED %d BYTES\n\n", pstart - vRecv.begin());
vRecv.erase(vRecv.begin(), pstart);
// Read header
vector<char> vHeaderSave(vRecv.begin(), vRecv.begin() + nHeaderSize);
CMessageHeader hdr;
vRecv >> hdr;
if (!hdr.IsValid())
{
printf("\n\nPROCESSMESSAGE: ERRORS IN HEADER %s\n\n\n", hdr.GetCommand().c_str());
continue;
}
string strCommand = hdr.GetCommand();
// Message size
unsigned int nMessageSize = hdr.nMessageSize;
if (nMessageSize > MAX_SIZE)
{
printf("ProcessMessages(%s, %u bytes) : nMessageSize > MAX_SIZE\n", strCommand.c_str(), nMessageSize);
continue;
}
if (nMessageSize > vRecv.size())
{
// Rewind and wait for rest of message
vRecv.insert(vRecv.begin(), vHeaderSave.begin(), vHeaderSave.end());
break;
}
// Checksum
uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize);
unsigned int nChecksum = 0;
memcpy(&nChecksum, &hash, sizeof(nChecksum));
if (nChecksum != hdr.nChecksum)
{
printf("ProcessMessages(%s, %u bytes) : CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n",
strCommand.c_str(), nMessageSize, nChecksum, hdr.nChecksum);
continue;
}
// Copy message to its own buffer
CDataStream vMsg(vRecv.begin(), vRecv.begin() + nMessageSize, vRecv.nType, vRecv.nVersion);
vRecv.ignore(nMessageSize);
// Process message
bool fRet = false;
try
{
{
LOCK(cs_main);
fRet = ProcessMessage(pfrom, strCommand, vMsg);
}
if (fShutdown)
return true;
}
catch (std::ios_base::failure& e)
{
if (strstr(e.what(), "end of data"))
{
// Allow exceptions from underlength message on vRecv
printf("ProcessMessages(%s, %u bytes) : Exception '%s' caught, normally caused by a message being shorter than its stated length\n", strCommand.c_str(), nMessageSize, e.what());
}
else if (strstr(e.what(), "size too large"))
{
// Allow exceptions from overlong size
printf("ProcessMessages(%s, %u bytes) : Exception '%s' caught\n", strCommand.c_str(), nMessageSize, e.what());
}
else
{
PrintExceptionContinue(&e, "ProcessMessages()");
}
}
catch (std::exception& e) {
PrintExceptionContinue(&e, "ProcessMessages()");
} catch (...) {
PrintExceptionContinue(NULL, "ProcessMessages()");
}
if (!fRet)
printf("ProcessMessage(%s, %u bytes) FAILED\n", strCommand.c_str(), nMessageSize);
}
vRecv.Compact();
return true;
}
bool SendMessages(CNode* pto, bool fSendTrickle)
{
TRY_LOCK(cs_main, lockMain);
if (lockMain) {
// Don't send anything until we get their version message
if (pto->nVersion == 0)
return true;
// Keep-alive ping. We send a nonce of zero because we don't use it anywhere
// right now.
if (pto->nLastSend && GetTime() - pto->nLastSend > 30 * 60 && pto->vSend.empty()) {
uint64 nonce = 0;
if (pto->nVersion > BIP0031_VERSION)
pto->PushMessage("ping", nonce);
else
pto->PushMessage("ping");
}
// Resend wallet transactions that haven't gotten in a block yet
ResendWalletTransactions();
// Address refresh broadcast
static int64 nLastRebroadcast;
if (!IsInitialBlockDownload() && (GetTime() - nLastRebroadcast > 24 * 60 * 60))
{
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
{
// Periodically clear setAddrKnown to allow refresh broadcasts
if (nLastRebroadcast)
pnode->setAddrKnown.clear();
// Rebroadcast our address
if (!fNoListen && !fUseProxy && addrLocalHost.IsRoutable())
{
CAddress addr(addrLocalHost);
addr.nTime = GetAdjustedTime();
pnode->PushAddress(addr);
}
}
}
nLastRebroadcast = GetTime();
}
//
// Message: addr
//
if (fSendTrickle)
{
vector<CAddress> vAddr;
vAddr.reserve(pto->vAddrToSend.size());
BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
{
// returns true if wasn't already contained in the set
if (pto->setAddrKnown.insert(addr).second)
{
vAddr.push_back(addr);
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
{
pto->PushMessage("addr", vAddr);
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
pto->PushMessage("addr", vAddr);
}
//
// Message: inventory
//
vector<CInv> vInv;
vector<CInv> vInvWait;
{
LOCK(pto->cs_inventory);
vInv.reserve(pto->vInventoryToSend.size());
vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{
if (pto->setInventoryKnown.count(inv))
continue;
// trickle out tx inv to protect privacy
if (inv.type == MSG_TX && !fSendTrickle)
{
// 1/4 of tx invs blast to all immediately
static uint256 hashSalt;
if (hashSalt == 0)
hashSalt = GetRandHash();
uint256 hashRand = inv.hash ^ hashSalt;
hashRand = Hash(BEGIN(hashRand), END(hashRand));
bool fTrickleWait = ((hashRand & 3) != 0);
// always trickle our own transactions
if (!fTrickleWait)
{
CWalletTx wtx;
if (GetTransaction(inv.hash, wtx))
if (wtx.fFromMe)
fTrickleWait = true;
}
if (fTrickleWait)
{
vInvWait.push_back(inv);
continue;
}
}
// returns true if wasn't already contained in the set
if (pto->setInventoryKnown.insert(inv).second)
{
vInv.push_back(inv);
if (vInv.size() >= 1000)
{
pto->PushMessage("inv", vInv);
vInv.clear();
}
}
}
pto->vInventoryToSend = vInvWait;
}
if (!vInv.empty())
pto->PushMessage("inv", vInv);
//
// Message: getdata
//
vector<CInv> vGetData;
int64 nNow = GetTime() * 1000000;
CTxDB txdb("r");
while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
{
const CInv& inv = (*pto->mapAskFor.begin()).second;
if (!AlreadyHave(txdb, inv))
{
printf("sending getdata: %s\n", inv.ToString().c_str());
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
pto->PushMessage("getdata", vGetData);
vGetData.clear();
}
}
mapAlreadyAskedFor[inv] = nNow;
pto->mapAskFor.erase(pto->mapAskFor.begin());
}
if (!vGetData.empty())
pto->PushMessage("getdata", vGetData);
}
return true;
}
//////////////////////////////////////////////////////////////////////////////
//
// BitcoinMiner
//
int static FormatHashBlocks(void* pbuffer, unsigned int len)
{
unsigned char* pdata = (unsigned char*)pbuffer;
unsigned int blocks = 1 + ((len + 8) / 64);
unsigned char* pend = pdata + 64 * blocks;
memset(pdata + len, 0, 64 * blocks - len);
pdata[len] = 0x80;
unsigned int bits = len * 8;
pend[-1] = (bits >> 0) & 0xff;
pend[-2] = (bits >> 8) & 0xff;
pend[-3] = (bits >> 16) & 0xff;
pend[-4] = (bits >> 24) & 0xff;
return blocks;
}
static const unsigned int pSHA256InitState[8] =
{0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19};
void SHA256Transform(void* pstate, void* pinput, const void* pinit)
{
SHA256_CTX ctx;
unsigned char data[64];
SHA256_Init(&ctx);
for (int i = 0; i < 16; i++)
((uint32_t*)data)[i] = ByteReverse(((uint32_t*)pinput)[i]);
for (int i = 0; i < 8; i++)
ctx.h[i] = ((uint32_t*)pinit)[i];
SHA256_Update(&ctx, data, sizeof(data));
for (int i = 0; i < 8; i++)
((uint32_t*)pstate)[i] = ctx.h[i];
}
// Some explaining would be appreciated
class COrphan
{
public:
CTransaction* ptx;
set<uint256> setDependsOn;
double dPriority;
COrphan(CTransaction* ptxIn)
{
ptx = ptxIn;
dPriority = 0;
}
void print() const
{
printf("COrphan(hash=%s, dPriority=%.1f)\n", ptx->GetHash().ToString().substr(0,10).c_str(), dPriority);
BOOST_FOREACH(uint256 hash, setDependsOn)
printf(" setDependsOn %s\n", hash.ToString().substr(0,10).c_str());
}
};
uint64 nLastBlockTx = 0;
uint64 nLastBlockSize = 0;
int64 nLastCoinStakeSearchInterval = 0;
// CreateNewBlock:
// fProofOfStake: try (best effort) to make a proof-of-stake block
CBlock* CreateNewBlock(CReserveKey& reservekey, CWallet* pwallet, bool fProofOfStake)
{
// Create new block
auto_ptr<CBlock> pblock(new CBlock());
if (!pblock.get())
return NULL;
// Create coinbase tx
CTransaction txNew;
txNew.vin.resize(1);
txNew.vin[0].prevout.SetNull();
txNew.vout.resize(1);
txNew.vout[0].scriptPubKey << reservekey.GetReservedKey() << OP_CHECKSIG;
// Add our coinbase tx as first transaction
pblock->vtx.push_back(txNew);
// ppcoin: if coinstake available add coinstake tx
static int64 nLastCoinStakeSearchTime = GetAdjustedTime(); // only initialized at startup
CBlockIndex* pindexPrev = pindexBest;
if (fProofOfStake) // attemp to find a coinstake
{
pblock->nBits = GetNextTargetRequired(pindexPrev, true);
CTransaction txCoinStake;
int64 nSearchTime = txCoinStake.nTime; // search to current time
if (nSearchTime > nLastCoinStakeSearchTime)
{
if (pwallet->CreateCoinStake(*pwallet, pblock->nBits, nSearchTime-nLastCoinStakeSearchTime, txCoinStake))
{
if (txCoinStake.nTime >= max(pindexPrev->GetMedianTimePast()+1, pindexPrev->GetBlockTime() - MAX_CLOCK_DRIFT))
{ // make sure coinstake would meet timestamp protocol
// as it would be the same as the block timestamp
pblock->vtx[0].vout[0].SetEmpty();
pblock->vtx[0].nTime = txCoinStake.nTime;
pblock->vtx.push_back(txCoinStake);
}
}
nLastCoinStakeSearchInterval = nSearchTime - nLastCoinStakeSearchTime;
nLastCoinStakeSearchTime = nSearchTime;
}
}
pblock->nBits = GetNextTargetRequired(pindexPrev, pblock->IsProofOfStake());
// Collect memory pool transactions into the block
int64 nFees = 0;
{
LOCK2(cs_main, mempool.cs);
CTxDB txdb("r");
// Priority order to process transactions
list<COrphan> vOrphan; // list memory doesn't move
map<uint256, vector<COrphan*> > mapDependers;
multimap<double, CTransaction*> mapPriority;
for (map<uint256, CTransaction>::iterator mi = mempool.mapTx.begin(); mi != mempool.mapTx.end(); ++mi)
{
CTransaction& tx = (*mi).second;
if (tx.IsCoinBase() || tx.IsCoinStake() || !tx.IsFinal())
continue;
COrphan* porphan = NULL;
double dPriority = 0;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
// Read prev transaction
CTransaction txPrev;
CTxIndex txindex;
if (!txPrev.ReadFromDisk(txdb, txin.prevout, txindex))
{
// Has to wait for dependencies
if (!porphan)
{
// Use list for automatic deletion
vOrphan.push_back(COrphan(&tx));
porphan = &vOrphan.back();
}
mapDependers[txin.prevout.hash].push_back(porphan);
porphan->setDependsOn.insert(txin.prevout.hash);
continue;
}
int64 nValueIn = txPrev.vout[txin.prevout.n].nValue;
// Read block header
int nConf = txindex.GetDepthInMainChain();
dPriority += (double)nValueIn * nConf;
if (fDebug && GetBoolArg("-printpriority"))
printf("priority nValueIn=%-12"PRI64d" nConf=%-5d dPriority=%-20.1f\n", nValueIn, nConf, dPriority);
}
// Priority is sum(valuein * age) / txsize
dPriority /= ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
if (porphan)
porphan->dPriority = dPriority;
else
mapPriority.insert(make_pair(-dPriority, &(*mi).second));
if (fDebug && GetBoolArg("-printpriority"))
{
printf("priority %-20.1f %s\n%s", dPriority, tx.GetHash().ToString().substr(0,10).c_str(), tx.ToString().c_str());
if (porphan)
porphan->print();
printf("\n");
}
}
// Collect transactions into block
map<uint256, CTxIndex> mapTestPool;
uint64 nBlockSize = 1000;
uint64 nBlockTx = 0;
int nBlockSigOps = 100;
while (!mapPriority.empty())
{
// Take highest priority transaction off priority queue
CTransaction& tx = *(*mapPriority.begin()).second;
mapPriority.erase(mapPriority.begin());
// Size limits
unsigned int nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
if (nBlockSize + nTxSize >= MAX_BLOCK_SIZE_GEN)
continue;
// Legacy limits on sigOps:
unsigned int nTxSigOps = tx.GetLegacySigOpCount();
if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS)
continue;
// Timestamp limit
if (tx.nTime > GetAdjustedTime() || (pblock->IsProofOfStake() && tx.nTime > pblock->vtx[1].nTime))
continue;
// ppcoin: simplify transaction fee - allow free = false
int64 nMinFee = tx.GetMinFee(nBlockSize, false, GMF_BLOCK);
// Connecting shouldn't fail due to dependency on other memory pool transactions
// because we're already processing them in order of dependency
map<uint256, CTxIndex> mapTestPoolTmp(mapTestPool);
MapPrevTx mapInputs;
bool fInvalid;
if (!tx.FetchInputs(txdb, mapTestPoolTmp, false, true, mapInputs, fInvalid))
continue;
int64 nTxFees = tx.GetValueIn(mapInputs)-tx.GetValueOut();
if (nTxFees < nMinFee)
continue;
nTxSigOps += tx.GetP2SHSigOpCount(mapInputs);
if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS)
continue;
if (!tx.ConnectInputs(txdb, mapInputs, mapTestPoolTmp, CDiskTxPos(1,1,1), pindexPrev, false, true))
continue;
mapTestPoolTmp[tx.GetHash()] = CTxIndex(CDiskTxPos(1,1,1), tx.vout.size());
swap(mapTestPool, mapTestPoolTmp);
// Added
pblock->vtx.push_back(tx);
nBlockSize += nTxSize;
++nBlockTx;
nBlockSigOps += nTxSigOps;
nFees += nTxFees;
// Add transactions that depend on this one to the priority queue
uint256 hash = tx.GetHash();
if (mapDependers.count(hash))
{
BOOST_FOREACH(COrphan* porphan, mapDependers[hash])
{
if (!porphan->setDependsOn.empty())
{
porphan->setDependsOn.erase(hash);
if (porphan->setDependsOn.empty())
mapPriority.insert(make_pair(-porphan->dPriority, porphan->ptx));
}
}
}
}
nLastBlockTx = nBlockTx;
nLastBlockSize = nBlockSize;
if (fDebug && GetBoolArg("-printpriority")) {
printf("CreateNewBlock(): total size %lu\n", nBlockSize);
}
}
if (pblock->IsProofOfWork())
pblock->vtx[0].vout[0].nValue = GetProofOfWorkReward(pindexPrev->nHeight);
// Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
pblock->hashMerkleRoot = pblock->BuildMerkleTree();
if (pblock->IsProofOfStake())
pblock->nTime = pblock->vtx[1].nTime; //same as coinstake timestamp
pblock->nTime = max(pindexPrev->GetMedianTimePast()+1, pblock->GetMaxTransactionTime());
pblock->nTime = max(pblock->GetBlockTime(), pindexPrev->GetBlockTime() - MAX_CLOCK_DRIFT);
if (pblock->IsProofOfWork())
pblock->UpdateTime(pindexPrev);
pblock->nNonce = 0;
return pblock.release();
}
void IncrementExtraNonce(CBlock* pblock, CBlockIndex* pindexPrev, unsigned int& nExtraNonce)
{
// Update nExtraNonce
static uint256 hashPrevBlock;
if (hashPrevBlock != pblock->hashPrevBlock)
{
nExtraNonce = 0;
hashPrevBlock = pblock->hashPrevBlock;
}
++nExtraNonce;
pblock->vtx[0].vin[0].scriptSig = (CScript() << pblock->nTime << CBigNum(nExtraNonce)) + COINBASE_FLAGS;
assert(pblock->vtx[0].vin[0].scriptSig.size() <= 100);
pblock->hashMerkleRoot = pblock->BuildMerkleTree();
}
void FormatHashBuffers(CBlock* pblock, char* pmidstate, char* pdata, char* phash1)
{
//
// Prebuild hash buffers
//
struct
{
struct unnamed2
{
int nVersion;
uint256 hashPrevBlock;
uint256 hashMerkleRoot;
unsigned int nTime;
unsigned int nBits;
unsigned int nNonce;
}
block;
unsigned char pchPadding0[64];
uint256 hash1;
unsigned char pchPadding1[64];
}
tmp;
memset(&tmp, 0, sizeof(tmp));
tmp.block.nVersion = pblock->nVersion;
tmp.block.hashPrevBlock = pblock->hashPrevBlock;
tmp.block.hashMerkleRoot = pblock->hashMerkleRoot;
tmp.block.nTime = pblock->nTime;
tmp.block.nBits = pblock->nBits;
tmp.block.nNonce = pblock->nNonce;
FormatHashBlocks(&tmp.block, sizeof(tmp.block));
FormatHashBlocks(&tmp.hash1, sizeof(tmp.hash1));
// Byte swap all the input buffer
for (unsigned int i = 0; i < sizeof(tmp)/4; i++)
((unsigned int*)&tmp)[i] = ByteReverse(((unsigned int*)&tmp)[i]);
// Precalc the first half of the first hash, which stays constant
SHA256Transform(pmidstate, &tmp.block, pSHA256InitState);
memcpy(pdata, &tmp.block, 128);
memcpy(phash1, &tmp.hash1, 64);
}
bool CheckWork(CBlock* pblock, CWallet& wallet, CReserveKey& reservekey)
{
uint256 hash = pblock->GetHash();
uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
printf("Hash: %s\nTarget: %s\n", hash.GetHex().c_str(), hashTarget.GetHex().c_str());
if (hash > hashTarget && pblock->IsProofOfWork())
return error("BitcoinMiner : proof-of-work not meeting target");
//// debug print
printf("BitcoinMiner:\n");
printf("new block found \n hash: %s \ntarget: %s\n", hash.GetHex().c_str(), hashTarget.GetHex().c_str());
pblock->print();
printf("%s ", DateTimeStrFormat(GetTime()).c_str());
printf("generated %s\n", FormatMoney(pblock->vtx[0].vout[0].nValue).c_str());
// Found a solution
{
LOCK(cs_main);
if (pblock->hashPrevBlock != hashBestChain)
return error("BitcoinMiner : generated block is stale");
// Remove key from key pool
reservekey.KeepKey();
// Track how many getdata requests this block gets
{
LOCK(wallet.cs_wallet);
wallet.mapRequestCount[pblock->GetHash()] = 0;
}
// Process this block the same as if we had received it from another node
if (!ProcessBlock(NULL, pblock))
return error("BitcoinMiner : ProcessBlock, block not accepted");
}
return true;
}
void static ThreadBitcoinMiner(void* parg);
bool fStaking = true;
static bool fGenerateBitcoins = false;
static bool fLimitProcessors = false;
static int nLimitProcessors = -1;
bool BitcoinMiner(CWallet *pwallet, bool fProofOfStake, uint256 * minedBlock, uint64 nTimeout)
{
printf("CPUMiner started for proof-of-%s (%d)\n", fProofOfStake? "stake" : "work",
vnThreadsRunning[fProofOfStake? THREAD_MINTER : THREAD_MINER]);
SetThreadPriority(THREAD_PRIORITY_LOWEST);
uint64 nStartTime = GetTime();
// Each thread has its own key and counter
CReserveKey reservekey(pwallet);
unsigned int nExtraNonce = 0;
while (minedBlock || fGenerateBitcoins || fProofOfStake)
{
if (fShutdown || (fProofOfStake && !fStaking))
return false;
if (nTimeout && (GetTime() - nStartTime > nTimeout))
return false;
while (vNodes.empty() || IsInitialBlockDownload())
{
Sleep(1000);
if (fShutdown || (fProofOfStake && !fStaking))
return false;
if (!minedBlock && (!fGenerateBitcoins && !fProofOfStake))
return false;
}
while (pwallet->IsLocked())
{
strMintWarning = strMintMessage;
Sleep(1000);
}
strMintWarning.clear();
//
// Create new block
//
unsigned int nTransactionsUpdatedLast = nTransactionsUpdated;
CBlockIndex* pindexPrev = pindexBest;
auto_ptr<CBlock> pblock(CreateNewBlock(reservekey, pwallet, fProofOfStake));
if (!pblock.get()) return false;
IncrementExtraNonce(pblock.get(), pindexPrev, nExtraNonce);
if (fProofOfStake)
{
// ppcoin: if proof-of-stake block found then process block
if (pblock->IsProofOfStake())
{
if (!pblock->SignBlock(*pwalletMain))
{
strMintWarning = strMintMessage;
continue;
}
strMintWarning.clear();
printf("CPUMiner : proof-of-stake block found %s\n", pblock->GetHash().ToString().c_str());
SetThreadPriority(THREAD_PRIORITY_NORMAL);
bool fSucceeded = CheckWork(pblock.get(), *pwalletMain, reservekey);
SetThreadPriority(THREAD_PRIORITY_LOWEST);
if (fSucceeded && minedBlock)
{
*minedBlock = pblock->GetHash();
return true;
}
}
Sleep(500);
continue;
}
printf("Running BitcoinMiner with %d transactions in block\n", pblock->vtx.size());
//
// Search
//
int64 nStart = GetTime();
uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
INFINITE_LOOP
{
unsigned int nHashesDone = 0;
pblock->nNonce = 0;
INFINITE_LOOP
{
if (pblock->GetHash() <= hashTarget)
break;
pblock->nNonce += 1;
nHashesDone += 1;
if ((pblock->nNonce & 0xFFFF) == 0)
break;
}
// Check if something found
if (pblock->GetHash() <= hashTarget)
{
if (!pblock->SignBlock(*pwalletMain))
{
strMintWarning = strMintMessage;
break;
}
else
{
strMintWarning = "";
}
SetThreadPriority(THREAD_PRIORITY_NORMAL);
bool fSucceeded = CheckWork(pblock.get(), *pwalletMain, reservekey);
SetThreadPriority(THREAD_PRIORITY_LOWEST);
if (fSucceeded && minedBlock)
{
*minedBlock = pblock->GetHash();
return true;
}
break;
}
// Meter hashes/sec
static int64 nHashCounter;
if (nHPSTimerStart == 0)
{
nHPSTimerStart = GetTimeMillis();
nHashCounter = 0;
}
else
{
nHashCounter += nHashesDone;
}
if (GetTimeMillis() - nHPSTimerStart > 4000)
{
static CCriticalSection cs;
{
LOCK(cs);
if (GetTimeMillis() - nHPSTimerStart > 4000)
{
dHashesPerSec = 1000.0 * nHashCounter / (GetTimeMillis() - nHPSTimerStart);
nHPSTimerStart = GetTimeMillis();
nHashCounter = 0;
static int64 nLogTime;
if (GetTime() - nLogTime > 30 * 60)
{
nLogTime = GetTime();
printf("%s ", DateTimeStrFormat(GetTime()).c_str());
printf("hashmeter %3d CPUs %6.0f khash/s\n", vnThreadsRunning[THREAD_MINER], dHashesPerSec/1000.0);
}
}
}
}
// Check for stop or if block needs to be rebuilt
if (fShutdown || (fProofOfStake && !fStaking))
return false;
if (!minedBlock && !fGenerateBitcoins)
return false;
if (fLimitProcessors && vnThreadsRunning[THREAD_MINER] > nLimitProcessors)
return false;
if (vNodes.empty())
break;
if (nTransactionsUpdated != nTransactionsUpdatedLast && GetTime() - nStart > 60)
break;
if (pindexPrev != pindexBest)
break;
// Update nTime every few seconds
pblock->nTime = max(pindexPrev->GetMedianTimePast()+1, pblock->GetMaxTransactionTime());
pblock->nTime = max(pblock->GetBlockTime(), pindexPrev->GetBlockTime() - MAX_CLOCK_DRIFT);
pblock->UpdateTime(pindexPrev);
if (pblock->GetBlockTime() >= (int64)pblock->vtx[0].nTime + MAX_CLOCK_DRIFT)
{
break; // need to update coinbase timestamp
}
}
}
return false;
}
void static ThreadBitcoinMiner(void* parg)
{
CWallet* pwallet = (CWallet*)parg;
try
{
vnThreadsRunning[THREAD_MINER]++;
BitcoinMiner(pwallet, false);
vnThreadsRunning[THREAD_MINER]--;
}
catch (std::exception& e) {
vnThreadsRunning[THREAD_MINER]--;
PrintException(&e, "ThreadBitcoinMiner()");
} catch (...) {
vnThreadsRunning[THREAD_MINER]--;
PrintException(NULL, "ThreadBitcoinMiner()");
}
nHPSTimerStart = 0;
if (vnThreadsRunning[THREAD_MINER] == 0)
dHashesPerSec = 0;
printf("ThreadBitcoinMiner exiting, %d threads remaining\n", vnThreadsRunning[THREAD_MINER]);
}
void GenerateBitcoins(bool fGenerate, CWallet* pwallet)
{
fGenerateBitcoins = fGenerate;
nLimitProcessors = GetArg("-genproclimit", -1);
if (nLimitProcessors == 0)
fGenerateBitcoins = false;
fLimitProcessors = (nLimitProcessors != -1);
if (fGenerate)
{
int nProcessors = boost::thread::hardware_concurrency();
printf("%d processors\n", nProcessors);
if (nProcessors < 1)
nProcessors = 1;
if (fLimitProcessors && nProcessors > nLimitProcessors)
nProcessors = nLimitProcessors;
int nAddThreads = nProcessors - vnThreadsRunning[THREAD_MINER];
printf("Starting %d BitcoinMiner threads\n", nAddThreads);
for (int i = 0; i < nAddThreads; i++)
{
if (!CreateThread(ThreadBitcoinMiner, pwallet))
printf("Error: CreateThread(ThreadBitcoinMiner) failed\n");
Sleep(10);
}
}
}<|fim▁end|> | |
<|file_name|>bitcoin_fr.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="fr" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About tumincoin</source>
<translation>À propos de tumincoin</translation>
</message>
<message>
<location line="+39"/>
<source><b>tumincoin</b> version</source>
<translation><b>tumincoin</b> version</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Ce logiciel est en phase expérimentale.
Distribué sous licence MIT/X11, voir le fichier COPYING ou http://www.opensource.org/licenses/mit-license.php.
Ce produit comprend des fonctionnalités développées par le projet OpenSSL pour être utilisés dans la boîte à outils OpenSSL (http://www.openssl.org/), un logiciel cryptographique écrit par Eric Young ([email protected]), et des fonctionnalités développées pour le logiciel UPnP écrit par Thomas Bernard.</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation>Droit d'auteur</translation>
</message>
<message>
<location line="+0"/>
<source>The tumincoin developers</source>
<translation>Les développeurs tumincoin</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Carnet d'adresses</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>Double cliquez afin de modifier l'adresse ou l'étiquette</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Créer une nouvelle adresse</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Copier l'adresse sélectionnée dans le presse-papiers</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Nouvelle adresse</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your tumincoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Voici vos adresses tumincoin qui vous permettent de recevoir des paiements. Vous pouvez donner une adresse différente à chaque expéditeur afin de savoir qui vous paye.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>&Copier l'adresse</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Afficher le &QR Code</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a tumincoin address</source>
<translation>Signer un message pour prouver que vous détenez une adresse tumincoin</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Signer un &message</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Effacer l'adresse actuellement sélectionnée de la liste</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>Exporter les données de l'onglet courant vers un fichier</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation>&Exporter</translation>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified tumincoin address</source>
<translation>Vérifier un message pour vous assurer qu'il a bien été signé avec l'adresse tumincoin spécifiée</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&Vérifier un message</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Supprimer</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your tumincoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>Ce sont vos adresses tumincoin pour émettre des paiements. Vérifiez toujours le montant et l'adresse du destinataire avant d'envoyer des pièces.</translation>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>Copier l'é&tiquette</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&Éditer</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation>Envoyer des Bit&coins</translation>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>Exporter les données du carnet d'adresses</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Valeurs séparées par des virgules (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Erreur lors de l'exportation</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Impossible d'écrire dans le fichier %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Étiquette</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(aucune étiquette)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Dialogue de phrase de passe</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Entrez la phrase de passe</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nouvelle phrase de passe</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Répétez la phrase de passe</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Entrez une nouvelle phrase de passe pour le porte-monnaie.<br/>Veuillez utiliser une phrase composée de <b>10 caractères aléatoires ou plus</b>, ou bien de <b>huit mots ou plus</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Chiffrer le porte-monnaie</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Cette opération nécessite votre phrase de passe pour déverrouiller le porte-monnaie.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Déverrouiller le porte-monnaie</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Cette opération nécessite votre phrase de passe pour décrypter le porte-monnaie.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Déchiffrer le porte-monnaie</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Changer la phrase de passe</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Entrez l’ancienne phrase de passe pour le porte-monnaie ainsi que la nouvelle.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Confirmer le chiffrement du porte-monnaie</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR tumincoinS</b>!</source>
<translation>Attention : Si vous chiffrez votre porte-monnaie et perdez votre phrase de passe, vous <b>PERDREZ ACCÈS À TOUS VOS tumincoinS</b> !</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Êtes-vous sûr de vouloir chiffrer votre porte-monnaie ?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>IMPORTANT : Les sauvegardes précédentes de votre fichier de porte-monnaie devraient être remplacées par le nouveau fichier crypté de porte-monnaie. Pour des raisons de sécurité, les précédentes sauvegardes de votre fichier de porte-monnaie non chiffré deviendront inutilisables dès que vous commencerez à utiliser le nouveau porte-monnaie chiffré.</translation>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Attention : la touche Verr. Maj. est activée !</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>Porte-monnaie chiffré</translation>
</message>
<message>
<location line="-56"/>
<source>tumincoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your tumincoins from being stolen by malware infecting your computer.</source>
<translation>tumincoin va à présent se fermer pour terminer la procédure de cryptage. N'oubliez pas que le chiffrement de votre porte-monnaie ne peut pas fournir une protection totale contre le vol par des logiciels malveillants qui infecteraient votre ordinateur.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Le chiffrement du porte-monnaie a échoué</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Le chiffrement du porte-monnaie a échoué en raison d'une erreur interne. Votre porte-monnaie n'a pas été chiffré.</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>Les phrases de passe entrées ne correspondent pas.</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>Le déverrouillage du porte-monnaie a échoué</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La phrase de passe entrée pour décrypter le porte-monnaie était incorrecte.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Le déchiffrage du porte-monnaie a échoué</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>La phrase de passe du porte-monnaie a été modifiée avec succès.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>Signer un &message...</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>Synchronisation avec le réseau…</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&Vue d'ensemble</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Afficher une vue d’ensemble du porte-monnaie</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&Transactions</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Parcourir l'historique des transactions</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Éditer la liste des adresses et des étiquettes stockées</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Afficher la liste des adresses pour recevoir des paiements</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>Q&uitter</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Quitter l’application</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about tumincoin</source>
<translation>Afficher des informations à propos de tumincoin</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>À propos de &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Afficher des informations sur Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Options…</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&Chiffrer le porte-monnaie...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&Sauvegarder le porte-monnaie...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Modifier la phrase de passe...</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation>Importation des blocs depuis le disque...</translation>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation>Réindexation des blocs sur le disque...</translation>
</message>
<message>
<location line="-347"/>
<source>Send coins to a tumincoin address</source>
<translation>Envoyer des pièces à une adresse tumincoin</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for tumincoin</source>
<translation>Modifier les options de configuration de tumincoin</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>Sauvegarder le porte-monnaie à un autre emplacement</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Modifier la phrase de passe utilisée pour le chiffrement du porte-monnaie</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>Fenêtre de &débogage</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Ouvrir une console de débogage et de diagnostic</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>&Vérifier un message...</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>tumincoin</source>
<translation>tumincoin</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>Porte-monnaie</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation>&Envoyer</translation>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation>&Recevoir</translation>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation>&Adresses</translation>
</message>
<message>
<location line="+22"/>
<source>&About tumincoin</source>
<translation>À &propos de tumincoin</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Afficher / Cacher</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation>Afficher ou masquer la fenêtre principale</translation>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>Crypter les clefs privées de votre porte-monnaie</translation>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your tumincoin addresses to prove you own them</source>
<translation>Signer les messages avec vos adresses tumincoin pour prouver que vous les détenez</translation>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified tumincoin addresses</source>
<translation>Vérifier les messages pour vous assurer qu'ils ont bien été signés avec les adresses tumincoin spécifiées</translation>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&Fichier</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&Réglages</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>&Aide</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Barre d'outils des onglets</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>tumincoin client</source>
<translation>Client tumincoin</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to tumincoin network</source>
<translation><numerusform>%n connexion active avec le réseau tumincoin</numerusform><numerusform>%n connexions actives avec le réseau tumincoin</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation>Aucune source de bloc disponible...</translation>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation>%1 blocs sur %2 (estimés) de l'historique des transactions traités.</translation>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation>%1 blocs de l'historique des transactions traités.</translation>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation><numerusform>%n heure</numerusform><numerusform>%n heures</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n jour</numerusform><numerusform>%n jours</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation><numerusform>%n semaine</numerusform><numerusform>%n semaines</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation>%1 en arrière</translation>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation>Le dernier bloc reçu avait été généré il y a %1.</translation>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation>Les transactions après cela ne seront pas encore visibles.</translation>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation>Erreur</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation>Avertissement</translation>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation>Information</translation>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>Cette transaction dépasse la limite de taille. Vous pouvez quand même l'envoyer en vous acquittant de frais d'un montant de %1 qui iront aux nœuds qui traiteront la transaction et aideront à soutenir le réseau. Voulez-vous payer les frais ?</translation>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>À jour</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>Rattrapage…</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>Confirmer les frais de transaction</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>Transaction envoyée</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>Transaction entrante</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Date : %1
Montant : %2
Type : %3
Adresse : %4
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>Gestion des URI</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid tumincoin address or malformed URI parameters.</source>
<translation>L'URI ne peut être analysé ! Cela peut être causé par une adresse tumincoin invalide ou par des paramètres d'URI malformés.</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Le porte-monnaie est <b>chiffré</b> et est actuellement <b>déverrouillé</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Le porte-monnaie est <b>chiffré</b> et est actuellement <b>verrouillé</b></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. tumincoin can no longer continue safely and will quit.</source>
<translation>Une erreur fatale est survenue. tumincoin ne peut plus continuer à fonctionner de façon sûre et va s'arrêter.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>Alerte réseau</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Éditer l'adresse</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Étiquette</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>L’étiquette associée à cette entrée du carnet d'adresses</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Adresse</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>L’adresse associée avec cette entrée du carnet d'adresses. Ne peut être modifiées que les adresses d’envoi.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Nouvelle adresse de réception</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Nouvelle adresse d’envoi</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Éditer l’adresse de réception</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Éditer l’adresse d'envoi</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>L’adresse fournie « %1 » est déjà présente dans le carnet d'adresses.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid tumincoin address.</source>
<translation>L'adresse fournie « %1 » n'est pas une adresse tumincoin valide.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Impossible de déverrouiller le porte-monnaie.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Échec de la génération de la nouvelle clef.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>tumincoin-Qt</source>
<translation>tumincoin-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>version</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Utilisation :</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>options de ligne de commande</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>Options Interface Utilisateur</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Définir la langue, par exemple « de_DE » (par défaut : la langue du système)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Démarrer sous forme minimisée</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Afficher l'écran d'accueil au démarrage (par défaut : 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Options</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>Réglages &principaux</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation>Frais de transaction optionnel par ko qui aident à garantir un traitement rapide des transactions. La plupart des transactions utilisent 1 ko.</translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Payer des &frais de transaction</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start tumincoin after logging in to the system.</source>
<translation>Démarrer tumincoin automatiquement lors de l'ouverture une session sur l'ordinateur.</translation>
</message>
<message>
<location line="+3"/>
<source>&Start tumincoin on system login</source>
<translation>&Démarrer tumincoin lors de l'ouverture d'une session</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation>Remettre toutes les options du client aux valeurs par défaut.</translation>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation>&Remise à zéro des options</translation>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>&Réseau</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the tumincoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Ouvrir le port du client tumincoin automatiquement sur le routeur. Cela ne fonctionne que si votre routeur supporte l'UPnP et si la fonctionnalité est activée.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Ouvrir le port avec l'&UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the tumincoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Connexion au réseau tumincoin à travers un proxy SOCKS (par ex. lors d'une connexion via Tor).</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Connexion à travers un proxy SOCKS :</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>&IP du proxy :</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>Adresse IP du proxy (par ex. 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Port :</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Port du proxy (par ex. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>&Version SOCKS :</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Version SOCKS du serveur mandataire (par ex. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Fenêtre</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Afficher uniquement une icône système après minimisation.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimiser dans la barre système au lieu de la barre des tâches</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimiser au lieu quitter l'application lorsque la fenêtre est fermée. Lorsque cette option est activée, l'application ne pourra être fermée qu'en sélectionnant Quitter dans le menu déroulant.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimiser lors de la fermeture</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Affichage</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>&Langue de l'interface utilisateur :</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting tumincoin.</source>
<translation>La langue de l'interface utilisateur peut être définie ici. Ce réglage sera pris en compte après redémarrage de tumincoin.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&Unité d'affichage des montants :</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Choisissez la sous-unité par défaut pour l'affichage dans l'interface et lors de l'envoi de pièces.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show tumincoin addresses in the transaction list or not.</source>
<translation>Détermine si les adresses tumincoin seront affichées sur la liste des transactions.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>&Afficher les adresses sur la liste des transactions</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&Valider</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>A&nnuler</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Appliquer</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>par défaut</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation>Confirmer la remise à zéro des options</translation>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation>La prise en compte de certains réglages peut nécessiter un redémarrage du client.</translation>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation>Voulez-vous continuer ?</translation>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>Avertissement</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting tumincoin.</source>
<translation>Ce réglage sera pris en compte après un redémarrage de tumincoin.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>L'adresse de proxy fournie est invalide.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Formulaire</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the tumincoin network after a connection is established, but this process has not completed yet.</source>
<translation>Les informations affichées peuvent être obsolètes. Votre porte-monnaie est automatiquement synchronisé avec le réseau tumincoin lorsque la connexion s'établit, or ce processus n'est pas encore terminé.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>Solde :</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Non confirmé :</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>Porte-monnaie</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation>Immature :</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Le solde généré n'est pas encore mûr</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Transactions récentes</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>Votre solde actuel</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Total des transactions qui doivent encore être confirmées et qui ne sont pas prises en compte dans le solde actuel</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>désynchronisé</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start tumincoin: click-to-pay handler</source>
<translation>Impossible de démarrer tumincoin : gestionnaire de cliquer-pour-payer</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>Dialogue de QR Code</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Demande de paiement</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Montant :</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Étiquette :</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Message :</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Enregistrer sous...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Erreur de l'encodage de l'URI dans le QR Code.</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>Le montant entré est invalide, veuillez le vérifier.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>L'URI résultant est trop long, essayez avec un texte d'étiquette ou de message plus court.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Sauvegarder le QR Code</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>Images PNG (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Nom du client</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation>Indisponible</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Version du client</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Informations</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Version d'OpenSSL utilisée</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Date de démarrage</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Réseau</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Nombre de connexions</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>Sur testnet</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Chaîne de blocs</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Nombre actuel de blocs</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Nombre total estimé de blocs</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Horodatage du dernier bloc</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Ouvrir</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>Options de ligne de commande</translation>
</message>
<message>
<location line="+7"/>
<source>Show the tumincoin-Qt help message to get a list with possible tumincoin command-line options.</source>
<translation>Afficher le message d'aide de tumincoin-Qt pour obtenir la liste des options de ligne de commande disponibles pour tumincoin.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Afficher</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Console</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Date de compilation</translation>
</message>
<message>
<location line="-104"/>
<source>tumincoin - Debug window</source>
<translation>tumincoin - Fenêtre de débogage</translation>
</message>
<message>
<location line="+25"/>
<source>tumincoin Core</source>
<translation>Noyau tumincoin</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Journal de débogage</translation>
</message>
<message>
<location line="+7"/>
<source>Open the tumincoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Ouvrir le journal de débogage de tumincoin depuis le répertoire de données actuel. Cela peut prendre quelques secondes pour les journaux de grande taille.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Nettoyer la console</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the tumincoin RPC console.</source>
<translation>Bienvenue sur la console RPC de tumincoin.</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Utilisez les touches de curseur pour naviguer dans l'historique et <b>Ctrl-L</b> pour effacer l'écran.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Tapez <b>help</b> pour afficher une vue générale des commandes disponibles.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Envoyer des pièces</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>Envoyer des pièces à plusieurs destinataires à la fois</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Ajouter un &destinataire</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Enlever tous les champs de transaction</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>&Tout nettoyer</translation>
</message>
<message>
<location line="+22"/><|fim▁hole|> <message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123.456 BTC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Confirmer l’action d'envoi</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>E&nvoyer</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> à %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Confirmer l’envoi des pièces</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Êtes-vous sûr de vouloir envoyer %1 ?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation> et </translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Cette adresse de destinataire n’est pas valide, veuillez la vérifier.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Le montant à payer doit être supérieur à 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Le montant dépasse votre solde.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Le montant dépasse votre solde lorsque les frais de transaction de %1 sont inclus.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Adresse dupliquée trouvée, il n'est possible d'envoyer qu'une fois à chaque adresse par opération d'envoi.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation>Erreur : Échec de la création de la transaction !</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Erreur : la transaction a été rejetée. Cela peut arriver si certaines pièces de votre porte-monnaie ont déjà été dépensées, par exemple si vous avez utilisé une copie de wallet.dat avec laquelle les pièces ont été dépensées mais pas marquées comme telles ici.</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Formulaire</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>&Montant :</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Payer &à :</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>L'adresse à laquelle le paiement sera envoyé (par ex. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Entrez une étiquette pour cette adresse afin de l’ajouter à votre carnet d’adresses</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Étiquette :</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>Choisir une adresse dans le carnet d'adresses</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Coller une adresse depuis le presse-papiers</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Enlever ce destinataire</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a tumincoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Entrez une adresse tumincoin (par ex. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Signatures - Signer / Vérifier un message</translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>&Signer un message</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Vous pouvez signer des messages avec vos adresses pour prouver que les détenez. Faites attention à ne pas signer quoi que ce soit de vague car des attaques d'hameçonnage peuvent essayer d'usurper votre identité par votre signature. Ne signez que des déclarations entièrement détaillées et avec lesquelles vous serez d'accord.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>L'adresse avec laquelle le message sera signé (par ex. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>Choisir une adresse depuis le carnet d'adresses</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>Coller une adresse depuis le presse-papiers</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Entrez ici le message que vous désirez signer</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation>Signature</translation>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Copier la signature actuelle dans le presse-papiers</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this tumincoin address</source>
<translation>Signer le message pour prouver que vous détenez cette adresse tumincoin</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Signer le &message</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>Remettre à zéro tous les champs de signature de message</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>&Tout nettoyer</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>&Vérifier un message</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Entrez ci-dessous l'adresse ayant servi à signer, le message (assurez-vous d'avoir copié exactement les retours à la ligne, les espacements, tabulations etc.) et la signature pour vérifier le message. Faites attention à ne pas déduire davantage de la signature que ce qui est contenu dans le message signé lui-même pour éviter d'être trompé par une attaque d'homme du milieu.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>L'adresse avec laquelle le message a été signé (par ex. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified tumincoin address</source>
<translation>Vérifier le message pour vous assurer qu'il a bien été signé par l'adresse tumincoin spécifiée</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation>Vérifier un &message</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>Remettre à zéro tous les champs de vérification de message</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a tumincoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Entrez une adresse tumincoin (par ex. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Cliquez sur « Signer le message » pour générer la signature</translation>
</message>
<message>
<location line="+3"/>
<source>Enter tumincoin signature</source>
<translation>Entrer une signature tumincoin</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>L'adresse entrée est invalide.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Veuillez vérifier l'adresse et réessayez.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>L'adresse entrée ne fait pas référence à une clef.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Le déverrouillage du porte-monnaie a été annulé.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>La clef privée pour l'adresse indiquée n'est pas disponible.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>La signature du message a échoué.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Le message a été signé.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>La signature n'a pu être décodée.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Veuillez vérifier la signature et réessayez.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>La signature ne correspond pas au hachage du message.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Échec de la vérification du message.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Message vérifié.</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The tumincoin developers</source>
<translation>Les développeurs tumincoin</translation>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>Ouvert jusqu'à %1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1/hors ligne</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/non confirmée</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 confirmations</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>État</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, diffusée à travers %n nœud</numerusform><numerusform>, diffusée à travers %n nœuds</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Date</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Source</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Génération</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>De</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>À</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>votre propre adresse</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>étiquette</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Crédit</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>arrive à maturité dans %n bloc</numerusform><numerusform>arrive à maturité dans %n blocs de plus</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>non accepté</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Débit</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Frais de transaction</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Montant net</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Message</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Commentaire</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>ID de la transaction</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Les pièces générées doivent mûrir pendant 120 blocs avant de pouvoir être dépensées. Lorsque vous avez généré ce bloc, il a été diffusé sur le réseau pour être ajouté à la chaîne de blocs. S’il échoue a intégrer la chaîne, son état sera modifié en « non accepté » et il ne sera pas possible de le dépenser. Cela peut arriver occasionnellement si un autre nœud génère un bloc quelques secondes avant ou après vous.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Informations de débogage</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transaction</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>Entrées</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Montant</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>vrai</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>faux</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, n’a pas encore été diffusée avec succès</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Ouvert pour %n bloc de plus</numerusform><numerusform>Ouvert pour %n blocs de plus</numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>inconnu</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Détails de la transaction</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Ce panneau affiche une description détaillée de la transaction</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>Date</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Montant</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Ouvert pour %n bloc de plus</numerusform><numerusform>Ouvert pour %n blocs de plus</numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>Ouvert jusqu'à %1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>Hors ligne (%1 confirmations)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>Non confirmée (%1 confirmations sur un total de %2)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Confirmée (%1 confirmations)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>Le solde généré (mined) sera disponible quand il aura mûri dans %n bloc</numerusform><numerusform>Le solde généré (mined) sera disponible quand il aura mûri dans %n blocs</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Ce bloc n’a été reçu par aucun autre nœud et ne sera probablement pas accepté !</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Généré mais pas accepté</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>Reçue avec</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Reçue de</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Envoyée à</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Paiement à vous-même</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Extraction</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(indisponible)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>État de la transaction. Laissez le pointeur de la souris sur ce champ pour voir le nombre de confirmations.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Date et heure de réception de la transaction.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Type de transaction.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>L’adresse de destination de la transaction.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Montant ajouté au, ou enlevé du, solde.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>Toutes</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Aujourd’hui</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Cette semaine</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Ce mois-ci</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Mois dernier</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Cette année</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Intervalle…</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Reçues avec</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Envoyées à</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>À vous-même</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Extraction</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Autres</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Entrez une adresse ou une étiquette à rechercher</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Montant min</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Copier l’adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Copier l’étiquette</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Copier le montant</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Copier l'ID de la transaction</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Éditer l’étiquette</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Afficher les détails de la transaction</translation>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>Exporter les données des transactions</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Valeurs séparées par des virgules (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Confirmée</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Date</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Étiquette</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Montant</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Erreur lors de l’exportation</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Impossible d'écrire dans le fichier %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Intervalle :</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>à</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>Envoyer des pièces</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation>&Exporter</translation>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>Exporter les données de l'onglet courant vers un fichier</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation>Sauvegarder le porte-monnaie</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>Données de porte-monnaie (*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Échec de la sauvegarde</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>Une erreur est survenue lors de l'enregistrement des données de porte-monnaie à un nouvel endroit</translation>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation>Sauvegarde réussie</translation>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation>Les données de porte-monnaie ont été enregistrées avec succès sur le nouvel emplacement.</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>tumincoin version</source>
<translation>Version de tumincoin</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>Utilisation :</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or tumincoind</source>
<translation>Envoyer une commande à -server ou à tumincoind</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>Lister les commandes</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>Obtenir de l’aide pour une commande</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>Options :</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: tumincoin.conf)</source>
<translation>Spécifier le fichier de configuration (par défaut : tumincoin.conf)</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: tumincoind.pid)</source>
<translation>Spécifier le fichier PID (par défaut : tumincoind.pid)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Spécifier le répertoire de données</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Définir la taille du tampon en mégaoctets (par défaut : 25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 9333 or testnet: 19333)</source>
<translation>Écouter les connexions sur le <port> (par défaut : 9333 ou testnet : 19333)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Garder au plus <n> connexions avec les pairs (par défaut : 125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Se connecter à un nœud pour obtenir des adresses de pairs puis se déconnecter</translation>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation>Spécifier votre propre adresse publique</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Seuil de déconnexion des pairs de mauvaise qualité (par défaut : 100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Délai en secondes de refus de reconnexion aux pairs de mauvaise qualité (par défaut : 86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Une erreur est survenue lors de la mise en place du port RPC %u pour écouter sur IPv4 : %s</translation>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 9332 or testnet: 19332)</source>
<translation>Écouter les connexions JSON-RPC sur le <port> (par défaut : 9332 ou tesnet : 19332)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Accepter les commandes de JSON-RPC et de la ligne de commande</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Fonctionner en arrière-plan en tant que démon et accepter les commandes</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>Utiliser le réseau de test</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Accepter les connexions entrantes (par défaut : 1 si -proxy ou -connect ne sont pas présents)</translation>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=tumincoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "tumincoin Alert" [email protected]
</source>
<translation>%s, vous devez définir un mot de passe rpc dans le fichier de configuration :
%s
Il vous est conseillé d'utiliser le mot de passe aléatoire suivant :
rpcuser=tumincoinrpc
rpcpassword=%s
(vous n'avez pas besoin de retenir ce mot de passe)
Le nom d'utilisateur et le mot de passe NE DOIVENT PAS être identiques.
Si le fichier n'existe pas, créez-le avec les droits de lecture accordés au propriétaire.
Il est aussi conseillé de régler alertnotify pour être prévenu des problèmes ;
par exemple : alertnotify=echo %%s | mail -s "tumincoin Alert" [email protected]
</translation>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Une erreur est survenue lors de la mise en place du port RPC %u pour écouter sur IPv6, retour à IPv4 : %s</translation>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>Se lier à l'adresse donnée et toujours l'écouter. Utilisez la notation [host]:port pour l'IPv6</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. tumincoin is probably already running.</source>
<translation>Impossible d’obtenir un verrou sur le répertoire de données %s. tumincoin fonctionne probablement déjà.</translation>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Erreur : la transaction a été rejetée ! Cela peut arriver si certaines pièces de votre porte-monnaie étaient déjà dépensées, par exemple si vous avez utilisé une copie de wallet.dat et les pièces ont été dépensées avec cette copie sans être marquées comme telles ici.</translation>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation>Erreur : cette transaction nécessite des frais de transaction d'au moins %s en raison de son montant, de sa complexité ou parce que des fonds reçus récemment sont utilisés !</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>Exécuter une commande lorsqu'une alerte correspondante est reçue (%s dans la commande sera remplacé par le message)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Exécuter la commande lorsqu'une transaction de porte-monnaie change (%s dans la commande est remplacée par TxID)</translation>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Définir la taille maximale en octets des transactions prioritaires/à frais modiques (par défaut : 27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>Ceci est une pré-version de test - utilisez à vos risques et périls - ne l'utilisez pas pour miner ou pour des applications marchandes</translation>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Attention : -paytxfee est réglée sur un montant très élevé ! Il s'agit des frais de transaction que vous payerez si vous émettez une transaction.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>Avertissement : les transactions affichées pourraient être incorrectes ! Vous ou d'autres nœuds du réseau pourriez avoir besoin d'effectuer une mise à jour.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong tumincoin will not work properly.</source>
<translation>Attention : veuillez vérifier que l'heure et la date de votre ordinateur sont correctes ! Si votre horloge n'est pas à l'heure, tumincoin ne fonctionnera pas correctement.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Avertissement : une erreur est survenue lors de la lecture de wallet.dat ! Toutes les clefs ont été lues correctement mais les données de transaction ou les entrées du carnet d'adresses pourraient être incorrectes ou manquantes.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Avertissement : wallet.dat corrompu, données récupérées ! Le fichier wallet.dat original a été enregistré en tant que wallet.{horodatage}.bak dans %s ; si votre solde ou transactions sont incorrects vous devriez effectuer une restauration depuis une sauvegarde.</translation>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Tenter de récupérer les clefs privées d'un wallet.dat corrompu</translation>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>Options de création des blocs :</translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>Ne se connecter qu'au(x) nœud(s) spécifié(s)</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation>Base de données des blocs corrompue détectée</translation>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Découvrir sa propre adresse IP (par défaut : 1 lors de l'écoute et si -externalip n'est pas présent)</translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation>Voulez-vous reconstruire la base de données des blocs maintenant ?</translation>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation>Erreur lors de l'initialisation de la base de données des blocs</translation>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation>Erreur lors de l'initialisation de l'environnement de la base de données du porte-monnaie %s !</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation>Erreur du chargement de la base de données des blocs</translation>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation>Erreur lors de l'ouverture de la base de données</translation>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation>Erreur : l'espace disque est faible !</translation>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation>Erreur : Porte-monnaie verrouillé, impossible de créer la transaction !</translation>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation>Erreur : erreur système :</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Échec de l'écoute sur un port quelconque. Utilisez -listen=0 si vous voulez cela.</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation>La lecture des informations de bloc a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation>La lecture du bloc a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation>La synchronisation de l'index des blocs a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation>L''écriture de l'index des blocs a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation>L'écriture des informations du bloc a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation>L'écriture du bloc a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation>L'écriture des informations de fichier a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation>L'écriture dans la base de données des pièces a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation>L'écriture de l'index des transactions a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation>L'écriture des données d'annulation a échoué</translation>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>Trouver des pairs en utilisant la recherche DNS (par défaut : 1 sauf si -connect est utilisé)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation>Générer des pièces (défaut: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation>Nombre de blocs à vérifier au démarrage (par défaut : 288, 0 = tout)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation>Niveau d'approfondissement de la vérification des blocs (0-4, par défaut : 3)</translation>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation>Pas assez de descripteurs de fichiers disponibles.</translation>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation>Reconstruire l'index de la chaîne des blocs à partir des fichiers blk000??.dat actuels</translation>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation>Définir le nombre d'exétrons pour desservir les appels RPC (par défaut : 4)</translation>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation>Vérification des blocs...</translation>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation>Vérification du porte-monnaie...</translation>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation>Importe des blocs depuis un fichier blk000??.dat externe</translation>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation>Définir le nombre de fils d’exécution pour la vérification des scripts (maximum 16, 0 = auto, < 0 = laisser ce nombre de cœurs libres, par défaut : 0)</translation>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation>Informations</translation>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>Adresse -tor invalide : « %s »</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation>Montant invalide pour -minrelayfee=<montant> : « %s »</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation>Montant invalide pour -mintxfee=<montant> : « %s »</translation>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation>Maintenir un index complet des transactions (par défaut : 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Tampon maximal de réception par -connection, <n>*1000 octets (par défaut : 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Tampon maximal d'envoi par connexion, <n>*1000 octets (par défaut : 1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation>N'accepter que la chaîne de blocs correspondant aux points de vérification internes (par défaut : 1)</translation>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Se connecter uniquement aux nœuds du réseau <net> (IPv4, IPv6 ou Tor)</translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Afficher des information de débogage supplémentaires. Cela signifie toutes les autres options -debug*</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Afficher des informations de débogage réseau supplémentaires</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>Faire précéder les données de débogage par un horodatage</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the tumincoin Wiki for SSL setup instructions)</source>
<translation>Options SSL : (cf. le wiki de tumincoin pour les instructions de configuration du SSL)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Sélectionner la version du proxy socks à utiliser (4-5, 5 étant la valeur par défaut)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Envoyer les informations de débogage/trace à la console au lieu du fichier debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Envoyer les informations de débogage/trace au débogueur</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Définir la taille maximale des blocs en octets (par défaut : 250000)</translation>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Définir la taille minimale des blocs en octets (par défaut : 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Réduire le fichier debug.log lors du démarrage du client (par défaut : 1 lorsque -debug n'est pas présent)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation>La signature de la transaction a échoué</translation>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Spécifier le délai d'expiration de la connexion en millisecondes (par défaut : 5000)</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation>Erreur système :</translation>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation>Montant de la transaction trop bas</translation>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation>Les montants de la transaction doivent être positifs</translation>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation>Transaction trop volumineuse</translation>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Utiliser l'UPnP pour rediriger le port d'écoute (par défaut : 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Utiliser l'UPnP pour rediriger le port d'écoute (par défaut : 1 lors de l'écoute)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Utiliser un proxy pour atteindre les services cachés de Tor (par défaut : même valeur que -proxy)</translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>Nom d'utilisateur pour les connexions JSON-RPC</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation>Avertissement</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Avertissement : cette version est obsolète, une mise à jour est nécessaire !</translation>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation>Vous devez reconstruire les bases de données avec -reindex pour modifier -txindex</translation>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat corrompu, la récupération a échoué</translation>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>Mot de passe pour les connexions JSON-RPC</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Autoriser les connexions JSON-RPC depuis l'adresse IP spécifiée</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Envoyer des commandes au nœud fonctionnant à <ip> (par défaut : 127.0.0.1)</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Exécuter la commande lorsque le meilleur bloc change (%s est remplacé par le hachage du bloc dans cmd)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>Mettre à jour le format du porte-monnaie</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Régler la taille de la plage de clefs sur <n> (par défaut : 100)</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Réanalyser la chaîne de blocs pour les transactions de porte-monnaie manquantes</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Utiliser OpenSSL (https) pour les connexions JSON-RPC</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Fichier de certificat serveur (par défaut : server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Clef privée du serveur (par défaut : server.pem)</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Clefs de chiffrement acceptables (par défaut : TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>Ce message d'aide</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Impossible de se lier à %s sur cet ordinateur (bind a retourné l'erreur %d, %s)</translation>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation>Connexion via un proxy socks</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Autoriser les recherches DNS pour -addnode, -seednode et -connect</translation>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>Chargement des adresses…</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Erreur lors du chargement de wallet.dat : porte-monnaie corrompu</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of tumincoin</source>
<translation>Erreur lors du chargement de wallet.dat : le porte-monnaie nécessite une version plus récente de tumincoin</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart tumincoin to complete</source>
<translation>Le porte-monnaie nécessitait une réécriture : veuillez redémarrer tumincoin pour terminer l'opération</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>Erreur lors du chargement de wallet.dat</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Adresse -proxy invalide : « %s »</translation>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Réseau inconnu spécifié sur -onlynet : « %s »</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Version inconnue de proxy -socks demandée : %i</translation>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Impossible de résoudre l'adresse -bind : « %s »</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Impossible de résoudre l'adresse -externalip : « %s »</translation>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Montant invalide pour -paytxfee=<montant> : « %s »</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>Montant invalide</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>Fonds insuffisants</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>Chargement de l’index des blocs…</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Ajouter un nœud auquel se connecter et tenter de garder la connexion ouverte</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. tumincoin is probably already running.</source>
<translation>Impossible de se lier à %s sur cet ordinateur. tumincoin fonctionne probablement déjà.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Frais par Ko à ajouter aux transactions que vous enverrez</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>Chargement du porte-monnaie…</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>Impossible de revenir à une version antérieure du porte-monnaie</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>Impossible d'écrire l'adresse par défaut</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>Nouvelle analyse…</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>Chargement terminé</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>Pour utiliser l'option %s</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>Erreur</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Vous devez ajouter la ligne rpcpassword=<mot-de-passe> au fichier de configuration :
%s
Si le fichier n'existe pas, créez-le avec les droits de lecture seule accordés au propriétaire.</translation>
</message>
</context>
</TS><|fim▁end|> | <source>Balance:</source>
<translation>Solde :</translation>
</message> |
<|file_name|>leafmt.rs<|end_file_name|><|fim▁begin|>//! Lea code formatter.
// FIXME This requires the parser to preserve comments to work properly
// This is blocked on an upstream issue: https://github.com/kevinmehall/rust-peg/issues/84
<|fim▁hole|>#[macro_use]
extern crate clap;
extern crate term;
extern crate rustc_serialize;
extern crate lea_parser as parser;
extern crate lea;
use parser::span::DummyTerm;
use parser::prettyprint::PrettyPrinter;
use std::io::{self, stdin, stderr, stdout, Read, Write};
use std::fs::File;
use std::path::Path;
/// Opens a terminal that writes to stderr. If stderr couldn't be opened as a terminal, creates a
/// `DummyTerm` that writes to stderr instead.
fn stderr_term() -> Box<term::StderrTerminal> {
term::stderr().unwrap_or_else(|| Box::new(DummyTerm(io::stderr())))
}
/// Parses the given source code and pretty-prints it
fn prettyprint<W: Write>(code: &str, source_name: &str, mut target: W) -> io::Result<()> {
match parser::block(code) {
Ok(main) => {
let mut pp = PrettyPrinter::new(&mut target);
try!(pp.print_block(&main));
}
Err(e) => {
try!(e.format(code, source_name, &mut *stderr_term()));
}
}
Ok(())
}
fn read_file(filename: &str) -> io::Result<String> {
let mut s = String::new();
let mut file = try!(File::open(&Path::new(filename)));
try!(file.read_to_string(&mut s));
Ok(s)
}
fn main() {
let matches = clap_app!(leafmt =>
(version: lea::version_str())
(about: "Lea source code formatter / pretty printer")
(@arg FILE: +required "The file to format (`-` to read from stdin)")
(@arg out: -o --out +takes_value "Write output to <out> (`-` to write to stdout).")
(after_help: "By default, leafmt will write the formatted code to stdout.\n")
).get_matches();
let file = matches.value_of("FILE").unwrap();
// Read input
let mut code = String::new();
let source_name;
if file == "-" {
stdin().read_to_string(&mut code).unwrap();
source_name = "<stdin>";
} else {
source_name = file;
code = match read_file(&source_name) {
Ok(content) => content,
Err(e) => {
writeln!(stderr(), "{}", e).unwrap();
return;
}
}
}
// Open output
let writer: Box<Write>;
match matches.value_of("out") {
None | Some("-") => {
writer = Box::new(stdout()) as Box<Write>;
}
Some(s) => {
let f = match File::create(&Path::new(s)) {
Ok(f) => f,
Err(e) => {
writeln!(stderr(), "{}", e).unwrap();
return;
}
};
writer = Box::new(f) as Box<Write>;
}
}
prettyprint(&code, &source_name, writer).unwrap();
}<|fim▁end|> | |
<|file_name|>CWE78_OS_Command_Injection__char_console_w32_spawnvp_82_goodG2B.cpp<|end_file_name|><|fim▁begin|>/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE78_OS_Command_Injection__char_console_w32_spawnvp_82_goodG2B.cpp
Label Definition File: CWE78_OS_Command_Injection.strings.label.xml
Template File: sources-sink-82_goodG2B.tmpl.cpp
*/
/*
* @description
* CWE: 78 OS Command Injection
* BadSource: console Read input from the console
* GoodSource: Fixed string
<|fim▁hole|> * Flow Variant: 82 Data flow: data passed in a parameter to an virtual method called via a pointer
*
* */
#ifndef OMITGOOD
#include "std_testcase.h"
#include "CWE78_OS_Command_Injection__char_console_w32_spawnvp_82.h"
#include <process.h>
namespace CWE78_OS_Command_Injection__char_console_w32_spawnvp_82
{
void CWE78_OS_Command_Injection__char_console_w32_spawnvp_82_goodG2B::action(char * data)
{
{
char *args[] = {COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG3, NULL};
/* spawnvp - searches for the location of the command among
* the directories specified by the PATH environment variable */
/* POTENTIAL FLAW: Execute command without validating input possibly leading to command injection */
_spawnvp(_P_WAIT, COMMAND_INT, args);
}
}
}
#endif /* OMITGOOD */<|fim▁end|> | * Sinks: w32_spawnvp
* BadSink : execute command with spawnvp
|
Subsets and Splits