repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
natematias/pond-hopper | pond-hopper.py | 1 | 8692 | import re
import json
import sys
import os
import requests
import datetime
from bs4 import BeautifulSoup
from feedgen.feed import FeedGenerator
import dateutil.parser
from pytz import timezone
import pytz
import flask
from flask import Flask
from flask import render_template
from gender_detector import GenderDetector
import nltk
import string
from collections import defaultdict
import codecs
from mediameter.cliff import Cliff
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sex_detector = GenderDetector('us')
my_cliff = Cliff('http://civicprod.media.mit.edu',8080)
app = Flask(__name__)
class Article:
def __init__(self, section, author=None, social=False):
self.author = author
def append_feedgen(self, fe):
fe.title(self.title)
for byline in self.bylines:
fe.author({"name":byline["name"]})
fe.link([{"href":self.url},{"href":self.image}])
fe.id(self.url)
fe.updated(self.date)
fe.pubdate(self.date)
fe.description(self.subtitle)
class AtlanticArticle(Article):
def __init__(self, section, author=None, social=False):
#extract information from the page
title_link = section.findAll("a")[0]#findAll(attrs={"class":"article"})[0].findAll("a")[0]
self.title = title_link.findAll("h2")[0].get_text()
self.url = re.sub(r'\#.*', '', title_link['href'])
eastern = timezone('US/Eastern')
self.article_text = None
self.cliff = None
# some dates come in as July/August 2014, so strip the first month field from it
datefield = section.findAll("time")[0]#re.sub(r'.*\/?','',section.findAll(attrs={"class":"date"})[0].get_text())
#import pdb;pdb.set_trace()
self.date = eastern.localize(dateutil.parser.parse(datefield.text.strip()))
self.subtitle = section.findAll(attrs={"class":"dek"})[0].get_text()
self.bylines = []
if author is None:
for auth in section.findAll(attrs={"class":"author"}):
self.bylines.append({"name": auth.get_text(), "url": auth['href']})
else:
self.bylines.append({"name":author})
self.image = None
thumb = section.findAll("figure")
if(len(thumb)>0):
img = thumb[0].findAll("img")
if(len(img)>0):
self.image = img[0]['src']
#print self.image
#TODO: download the first paragraph from the article
print self.title.encode('ascii', 'ignore')
self.get_article_text()
self.query_cliff()
self.get_gender_counts()
#TODO: download social media metrics for the article
if(social):
self.facebook = facebook("http://theatlantic.com/" + self.url)
#self.twitter = twitter(self.url)
def get_article_text(self):
res = requests.get("http://theatlantic.com" + self.url)
soup = BeautifulSoup(res.text)
body_tag = soup.findAll(attrs={"class":"article-body"})
self.article_text = body_tag[0].text.replace("\n", " \n")
self.sentences = len(sent_detector.tokenize(self.article_text))
return self.article_text
def query_cliff(self):
#cliff_url = "http://cliff.mediameter.org/process"
a_text = self.article_text#.encode('ascii', 'ignore')
#res = requests.post(cliff_url, data={"demonyms":"false", "text":a_text})
self.cliff = my_cliff.parseText(a_text)#json.loads(res.text)
#f = codecs.open("articletext.log", "a", encoding='utf_8')
#f.write(a_text)
#f.write("\n\n ---------------\n\n")
#f.write(self.cliff)
#f.write("\n\n ---------------\n\n")
#f.close()
self.cliff['results']['mentions']=None
self.cliff['results']['places']=None
return self.cliff
def person_list(self):
return {"names":set(),"first":None, "gender":"unknown", "count":0}
def get_gender_counts(self):
if(self.cliff is None):
return None
people_list = defaultdict(self.person_list)
for person in self.cliff['results']['people']:
fullname = person['name']
nametokens = string.split(fullname.strip(), ' ')
surname = nametokens[-1]
if(len(nametokens)==0):
continue
## ASSUMPTION: SINGLE NAME IS A SURNAME SITUATION
people_list[surname]['names'].add(fullname)
people_list[surname]['count'] += person['count']
if(len(nametokens)>1):
people_list[surname]['first'] = nametokens[0]
counts = {"male":0, "female":0, "unknown":0}
for key in people_list.keys():
person = people_list[key]
if(person['first'] is None):
counts['unknown'] += person['count']
continue
gender = sex_detector.guess(person['first'])
counts[gender] += person['count']
people_list[gender] = gender
self.people_list = people_list
self.gender_counts = counts
@app.route("/metrics/byline/<byline>")
def byline_metrics(byline):
url = "http://www.theatlantic.com/author/" + byline.replace("/","") + "/"
fg, articles = get_fg(url,social=True)
#twitter = [str(x.twitter) for x in articles]
twitter = []
facebook = [str(x.facebook['data'][0]['total_count']) for x in articles]
labels = ['"' + x.date.strftime('%b %d %Y') + '"' for x in articles]
labels.reverse()
data = {"twitter":twitter,"facebook":facebook}
data['twitter'].reverse()
data['facebook'].reverse()
return render_template("metrics.html", fg = fg, articles=articles, byline=byline, twitter = twitter, facebook=facebook, labels=labels, data=data)
# get a feed for a byline
@app.route("/byline/<byline>")
def byline(byline):
url = "http://www.theatlantic.com/" + byline.replace("/","") + "/"
#print url
return get_feed_for_url(url)
# get a feed for a section
@app.route("/section/<sectiona>/<sectionb>/<sectionc>/")
def section(sectiona,sectionb,sectionc):
url = "http://www.theatlantic.com/{0}/{1}/{2}".format(sectiona,sectionb,sectionc)
return get_feed_for_url(url)
def get_fg(url, social=False):
res = requests.get(url)
soup = BeautifulSoup(res.text)
#load the articles into classes
articles = []
author_tag = soup.findAll("div", attrs={"class":"author-header"})
#at = author_tag.findAll("div", attrs={"class":"name"})
author = None
if len(author_tag)>0:
at = author_tag[0].findAll(attrs={"class":"name"})
#author = ' '.join(author_tag[0].get_text().split())
author = at[0].text.strip()
for article in soup.findAll(attrs={"class":"article"}):
articles.append(AtlanticArticle(article, author=author,social=social))
#import pdb; pdb.set_trace()
#set up the feed, with basic metadata
fg = FeedGenerator()
fg.link(href=url)
if(author is None and len(articles)>0):
fg.author(name=articles[0].bylines[0])
else:
fg.author(name=author)
title_tag = soup.findAll(attrs={"class":"display-category"})
#set the title if there's not a category -- e.g. it's a person's page
if(len(title_tag)>0):
title = ' '.join(title_tag[0].get_text().split())
else:
title = "Atlantic posts by {0}".format(author.encode('ascii', 'ignore'))
fg.title(title)
#set the description
description = soup.findAll(attrs={"class":"bio"})
if len(description)>0:
fg.description(' '.join(description[0].get_text().split()))
else:
fg.description("RSS Feed for {0}, generated by Pond Hopper 0.1".format(title))
#add each article to the feed
for article in articles:
article.append_feedgen(fg.add_entry())
return fg, articles
#return a feed for a url
def get_feed_for_url(url):
fg = get_fg(url)[0]
return flask.Response(fg.rss_str(pretty=True), mimetype='application/rss+xml')
#get facebook data for a url
def facebook(url):
#res = requests.get("http://graph.facebook.com/" + url)
res = requests.get("https://graph.facebook.com/fql?q=SELECT%20like_count,%20total_count,%20share_count,%20click_count,%20comment_count%20FROM%20link_stat%20WHERE%20url%20=%20%22{0}%22".format(url.replace("http://","")))
j = json.loads(res.text)
if "data" in j.keys() and len(j['data'])>0:
return j
else:
return {"data":[{"total_count":0}]}
#def twitter(url):
# res = requests.get("http://urls.api.twitter.com/1/urls/count.json?url=" + url)
# return json.loads(res.text)['count']
def reddit(url):
reddit_url = "http://buttons.reddit.com/button_info.json?url={0}".format(url)
res = requests.get(reddit_url)
#import pdb; pdb.set_trace()
j = json.loads(res.text)
if not "data" in j:
print "REDDIT ERROR WITH {0}".format(reddit_url)
return {"ups":"0", "num_comments":"0"}
else:
data = j['data']
if "children" in data and len(data["children"]) > 0 and "data" in data["children"][0]:
child = data["children"][0]
return {"ups":child["data"]["ups"],"num_comments":child["data"]["num_comments"]}
return {"ups":"0", "num_comments":"0"}
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0',port=5050)
| mit | -5,024,372,724,159,938,000 | 33.220472 | 221 | 0.652094 | false |
dc-atlas/bcml | doc/conf.py | 1 | 7251 | #
#
# Copyright (C) 2010 Razvan Popovici <[email protected]>
# Copyright (C) 2010 Luca Beltrame <[email protected]>
# Copyright (C) 2010 Enrica Calura <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
#
# BCML documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 10 10:09:27 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BCML'
copyright = u'2010, Luca Beltrame, Enrica Calura, Razvan Popovici'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'BCMLdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BCML.tex', u'BCML Documentation',
u'Luca Beltrame, Enrica Calura, Razvan Popovici', 'manual'),
]
latex_elements = {
"papersize" : "a4paper"
}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| lgpl-2.1 | 9,100,167,073,289,095,000 | 32.109589 | 80 | 0.717005 | false |
tensorflow/federated | tensorflow_federated/python/core/impl/computation/function_utils_test.py | 1 | 17117 | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import inspect
import itertools
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.impl.computation import function_utils
from tensorflow_federated.python.core.impl.context_stack import context_base
from tensorflow_federated.python.core.impl.context_stack import context_stack_base
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_analysis
class NoopIngestContextForTest(context_base.Context):
def ingest(self, val, type_spec):
type_analysis.check_type(val, type_spec)
return val
def invoke(self, comp, arg):
raise NotImplementedError
class FunctionUtilsTest(test_case.TestCase, parameterized.TestCase):
def test_get_defun_argspec_with_typed_non_eager_defun(self):
# In a tf.function with a defined input signature, **kwargs or default
# values are not allowed, but *args are, and the input signature may overlap
# with *args.
fn = tf.function(lambda x, y, *z: None, (
tf.TensorSpec(None, tf.int32),
tf.TensorSpec(None, tf.bool),
tf.TensorSpec(None, tf.float32),
tf.TensorSpec(None, tf.float32),
))
self.assertEqual(
collections.OrderedDict(function_utils.get_signature(fn).parameters),
collections.OrderedDict(
x=inspect.Parameter('x', inspect.Parameter.POSITIONAL_OR_KEYWORD),
y=inspect.Parameter('y', inspect.Parameter.POSITIONAL_OR_KEYWORD),
z=inspect.Parameter('z', inspect.Parameter.VAR_POSITIONAL),
))
def test_get_defun_argspec_with_untyped_non_eager_defun(self):
# In a tf.function with no input signature, the same restrictions as in a
# typed eager function apply.
fn = tf.function(lambda x, y, *z: None)
self.assertEqual(
collections.OrderedDict(function_utils.get_signature(fn).parameters),
collections.OrderedDict(
x=inspect.Parameter('x', inspect.Parameter.POSITIONAL_OR_KEYWORD),
y=inspect.Parameter('y', inspect.Parameter.POSITIONAL_OR_KEYWORD),
z=inspect.Parameter('z', inspect.Parameter.VAR_POSITIONAL),
))
def test_get_signature_with_class_instance_method(self):
class C:
def __init__(self, x):
self._x = x
def foo(self, y):
return self._x * y
c = C(5)
signature = function_utils.get_signature(c.foo)
self.assertEqual(
signature.parameters,
collections.OrderedDict(
y=inspect.Parameter('y', inspect.Parameter.POSITIONAL_OR_KEYWORD)))
def test_get_signature_with_class_property(self):
class C:
@property
def x(self):
return 99
c = C()
with self.assertRaises(TypeError):
function_utils.get_signature(c.x)
def test_as_wrapper_with_classmethod(self):
class C:
@classmethod
def foo(cls, x):
return x * 2
signature = function_utils.get_signature(C.foo)
self.assertEqual(
signature.parameters,
collections.OrderedDict(
x=inspect.Parameter('x', inspect.Parameter.POSITIONAL_OR_KEYWORD)))
# pyformat: disable
@parameterized.parameters(
itertools.product(
# Values of 'fn' to test.
[lambda: None,
lambda a: None,
lambda a, b: None,
lambda *a: None,
lambda **a: None,
lambda *a, **b: None,
lambda a, *b: None,
lambda a, **b: None,
lambda a, b, **c: None,
lambda a, b=10: None,
lambda a, b=10, c=20: None,
lambda a, b=10, *c: None,
lambda a, b=10, **c: None,
lambda a, b=10, *c, **d: None,
lambda a, b, c=10, *d: None,
lambda a=10, b=20, c=30, **d: None],
# Values of 'args' to test.
[[], [1], [1, 2], [1, 2, 3], [1, 2, 3, 4]],
# Values of 'kwargs' to test.
[{}, {'b': 100}, {'name': 'foo'}, {'b': 100, 'name': 'foo'}]))
# pyformat: enable
def test_get_callargs_for_signature(self, fn, args, kwargs):
signature = function_utils.get_signature(fn)
expected_error = None
try:
signature = inspect.signature(fn)
bound_arguments = signature.bind(*args, **kwargs)
expected_callargs = bound_arguments.arguments
except TypeError as e:
expected_error = e
expected_callargs = None
result_callargs = None
if expected_error is None:
try:
bound_args = signature.bind(*args, **kwargs).arguments
self.assertEqual(bound_args, expected_callargs)
except (TypeError, AssertionError) as test_err:
raise AssertionError(
'With signature `{!s}`, args {!s}, kwargs {!s}, expected bound '
'args {!s} and error {!s}, tested function returned {!s} and the '
'test has failed with message: {!s}'.format(signature, args, kwargs,
expected_callargs,
expected_error,
result_callargs,
test_err))
else:
with self.assertRaises(TypeError):
_ = signature.bind(*args, **kwargs)
# pyformat: disable
@parameterized.named_parameters(
('args_only',
function_utils.get_signature(lambda a: None),
[tf.int32],
collections.OrderedDict()),
('args_and_kwargs_unnamed',
function_utils.get_signature(lambda a, b=True: None),
[tf.int32, tf.bool],
collections.OrderedDict()),
('args_and_kwargs_named',
function_utils.get_signature(lambda a, b=True: None),
[tf.int32],
collections.OrderedDict(b=tf.bool)),
('args_and_kwargs_default_int',
function_utils.get_signature(lambda a=10, b=True: None),
[tf.int32],
collections.OrderedDict(b=tf.bool)),
)
# pyformat: enable
def test_is_signature_compatible_with_types_true(self, signature, *args,
**kwargs):
self.assertFalse(
function_utils.is_signature_compatible_with_types(
signature, *args, **kwargs))
# pyformat: disable
@parameterized.named_parameters(
('args_only',
function_utils.get_signature(lambda a=True: None),
[tf.int32],
collections.OrderedDict()),
('args_and_kwargs',
function_utils.get_signature(lambda a=10, b=True: None),
[tf.bool],
collections.OrderedDict(b=tf.bool)),
)
# pyformat: enable
def test_is_signature_compatible_with_types_false(self, signature, *args,
**kwargs):
self.assertFalse(
function_utils.is_signature_compatible_with_types(
signature, *args, **kwargs))
# pyformat: disable
@parameterized.named_parameters(
('int', tf.int32, False),
('tuple_unnamed', [tf.int32, tf.int32], True),
('tuple_partially_named', [tf.int32, ('b', tf.int32)], True),
('tuple_named', [('a', tf.int32), ('b', tf.int32)], True),
('tuple_partially_named_kwargs_first', [('a', tf.int32), tf.int32],
False),
('struct', structure.Struct([(None, 1), ('a', 2)]), True),
('struct_kwargs_first', structure.Struct([('a', 1), (None, 2)]), False))
# pyformat: enable
def test_is_argument_struct(self, arg, expected_result):
self.assertEqual(function_utils.is_argument_struct(arg), expected_result)
# pyformat: disable
@parameterized.named_parameters(
('tuple_unnamed', structure.Struct([(None, 1)]), [1], {}),
('tuple_partially_named', structure.Struct([(None, 1), ('a', 2)]),
[1], {'a': 2}),
)
# pyformat: enable
def test_unpack_args_from_structure(self, tuple_with_args, expected_args,
expected_kwargs):
self.assertEqual(
function_utils.unpack_args_from_struct(tuple_with_args),
(expected_args, expected_kwargs))
# pyformat: disable
@parameterized.named_parameters(
('tuple_unnamed_1', [tf.int32], [tf.int32], {}),
('tuple_named_1', [('a', tf.int32)], [], {'a': tf.int32}),
('tuple_unnamed_2', [tf.int32, tf.bool], [tf.int32, tf.bool], {}),
('tuple_partially_named',
[tf.int32, ('b', tf.bool)], [tf.int32], {'b': tf.bool}),
('tuple_named_2',
[('a', tf.int32), ('b', tf.bool)], [], {'a': tf.int32, 'b': tf.bool}),
)
# pyformat: enable
def test_unpack_args_from_struct_type(self, tuple_with_args, expected_args,
expected_kwargs):
args, kwargs = function_utils.unpack_args_from_struct(tuple_with_args)
self.assertEqual(len(args), len(expected_args))
for idx, arg in enumerate(args):
self.assertTrue(
arg.is_equivalent_to(computation_types.to_type(expected_args[idx])))
self.assertEqual(set(kwargs.keys()), set(expected_kwargs.keys()))
for k, v in kwargs.items():
self.assertTrue(
v.is_equivalent_to(computation_types.to_type(expected_kwargs[k])))
def test_pack_args_into_struct_without_type_spec(self):
self.assertEqual(
function_utils.pack_args_into_struct([1], {'a': 10}),
structure.Struct([(None, 1), ('a', 10)]))
self.assertIn(
function_utils.pack_args_into_struct([1, 2], {
'a': 10,
'b': 20
}), [
structure.Struct([
(None, 1),
(None, 2),
('a', 10),
('b', 20),
]),
structure.Struct([
(None, 1),
(None, 2),
('b', 20),
('a', 10),
])
])
self.assertIn(
function_utils.pack_args_into_struct([], {
'a': 10,
'b': 20
}), [
structure.Struct([('a', 10), ('b', 20)]),
structure.Struct([('b', 20), ('a', 10)])
])
self.assertEqual(
function_utils.pack_args_into_struct([1], {}),
structure.Struct([(None, 1)]))
# pyformat: disable
@parameterized.named_parameters(
('int', [1], {}, [tf.int32], [(None, 1)]),
('tuple_unnamed_with_args',
[1, True], {}, [tf.int32, tf.bool], [(None, 1), (None, True)]),
('tuple_named_with_args', [1, True], {},
[('x', tf.int32), ('y', tf.bool)], [('x', 1), ('y', True)]),
('tuple_named_with_args_and_kwargs', [1], {'y': True},
[('x', tf.int32), ('y', tf.bool)], [('x', 1), ('y', True)]),
('tuple_with_kwargs', [], {'x': 1, 'y': True},
[('x', tf.int32), ('y', tf.bool)], [('x', 1), ('y', True)]),
('tuple_with_args_odict', [], collections.OrderedDict([('y', True), ('x', 1)]),
[('x', tf.int32), ('y', tf.bool)], [('x', 1), ('y', True)]))
# pyformat: enable
def test_pack_args_into_struct_with_type_spec_expect_success(
self, args, kwargs, type_spec, elements):
self.assertEqual(
function_utils.pack_args_into_struct(args, kwargs, type_spec,
NoopIngestContextForTest()),
structure.Struct(elements))
# pyformat: disable
@parameterized.named_parameters(
('wrong_type', [1], {}, [(tf.bool)]),
('wrong_structure', [], {'x': 1, 'y': True}, [(tf.int32), (tf.bool)]),
)
# pyformat: enable
def test_pack_args_into_struct_with_type_spec_expect_failure(
self, args, kwargs, type_spec):
with self.assertRaises(TypeError):
function_utils.pack_args_into_struct(args, kwargs, type_spec,
NoopIngestContextForTest())
# pyformat: disable
@parameterized.named_parameters(
('none', None, [], {}, 'None'),
('int', tf.int32, [1], {}, '1'),
('tuple_unnamed', [tf.int32, tf.bool], [1, True], {}, '<1,True>'),
('tuple_named_with_args', [('x', tf.int32), ('y', tf.bool)], [1, True],
{}, '<x=1,y=True>'),
('tuple_named_with_kwargs', [('x', tf.int32), ('y', tf.bool)], [1],
{'y': True}, '<x=1,y=True>'),
('tuple_with_args_struct', [tf.int32, tf.bool],
[structure.Struct([(None, 1), (None, True)])], {}, '<1,True>'))
# pyformat: enable
def test_pack_args(self, parameter_type, args, kwargs, expected_value_string):
self.assertEqual(
str(
function_utils.pack_args(parameter_type, args, kwargs,
NoopIngestContextForTest())),
expected_value_string)
# pyformat: disable
@parameterized.named_parameters(
('const', lambda: 10, None, None, None, 10),
('add_const', lambda x=1: x + 10, None, None, None, 11),
('add_const_with_type', lambda x=1: x + 10, tf.int32, None, 20, 30),
('add', lambda x, y: x + y, [tf.int32, tf.int32], None,
structure.Struct([('x', 5), ('y', 6)]), 11),
('str_tuple', lambda *args: str(args), [tf.int32, tf.int32], True,
structure.Struct([(None, 5), (None, 6)]), '(5, 6)'),
('str_tuple_with_named_type', lambda *args: str(args),
[('x', tf.int32), ('y', tf.int32)], False,
structure.Struct([('x', 5), ('y', 6)]),
'(Struct([(\'x\', 5), (\'y\', 6)]),)'),
('str_ing', lambda x: str(x), # pylint: disable=unnecessary-lambda
[tf.int32], None, structure.Struct([(None, 10)]), '[10]'),
)
# pyformat: enable
def test_wrap_as_zero_or_one_arg_callable(self, fn, parameter_type, unpack,
arg, expected_result):
parameter_type = computation_types.to_type(parameter_type)
unpack_arguments = function_utils.create_argument_unpacking_fn(
fn, parameter_type, unpack)
args, kwargs = unpack_arguments(arg)
actual_result = fn(*args, **kwargs)
self.assertEqual(actual_result, expected_result)
class PolymorphicFunctionTest(test_case.TestCase):
def test_call_returns_result(self):
class TestContext(context_base.Context):
def ingest(self, val, type_spec):
return val
def invoke(self, comp, arg):
return 'name={},type={},arg={},unpack={}'.format(
comp.name, comp.type_signature.parameter, arg, comp.unpack)
class TestContextStack(context_stack_base.ContextStack):
def __init__(self):
super().__init__()
self._context = TestContext()
@property
def current(self):
return self._context
def install(self, ctx):
del ctx # Unused
return self._context
context_stack = TestContextStack()
class TestFunction(function_utils.ConcreteFunction):
def __init__(self, name, unpack, parameter_type):
self._name = name
self._unpack = unpack
type_signature = computation_types.FunctionType(parameter_type,
tf.string)
super().__init__(type_signature, context_stack)
@property
def name(self):
return self._name
@property
def unpack(self):
return self._unpack
class TestFunctionFactory(object):
def __init__(self):
self._count = 0
def __call__(self, parameter_type, unpack):
self._count = self._count + 1
return TestFunction(str(self._count), str(unpack), parameter_type)
fn = function_utils.PolymorphicFunction(TestFunctionFactory())
self.assertEqual(fn(10), 'name=1,type=<int32>,arg=<10>,unpack=True')
self.assertEqual(
fn(20, x=True),
'name=2,type=<int32,x=bool>,arg=<20,x=True>,unpack=True')
fn_with_bool_arg = fn.fn_for_argument_type(
computation_types.to_type(tf.bool))
self.assertEqual(
fn_with_bool_arg(True), 'name=3,type=bool,arg=True,unpack=None')
self.assertEqual(
fn(30, x=40), 'name=4,type=<int32,x=int32>,arg=<30,x=40>,unpack=True')
self.assertEqual(fn(50), 'name=1,type=<int32>,arg=<50>,unpack=True')
self.assertEqual(
fn(0, x=False),
'name=2,type=<int32,x=bool>,arg=<0,x=False>,unpack=True')
fn_with_bool_arg = fn.fn_for_argument_type(
computation_types.to_type(tf.bool))
self.assertEqual(
fn_with_bool_arg(False), 'name=3,type=bool,arg=False,unpack=None')
self.assertEqual(
fn(60, x=70), 'name=4,type=<int32,x=int32>,arg=<60,x=70>,unpack=True')
if __name__ == '__main__':
test_case.main()
| apache-2.0 | -6,018,502,755,279,471,000 | 36.455142 | 85 | 0.580768 | false |
lmandres/MeSH-Analysis-Methodology | searchlib/helper.py | 1 | 30718 | '''
Created on Jul 5, 2011
@author: Leo Andres (user)
'''
from datetime import timedelta
import os
import re
import xml.parsers.expat
class TextXMLParser:
element_path = ''
element_dictionary = {}
current_element_data = {}
def __init__(self):
self.element_path = ''
self.element_dictionary = {}
self.current_element_data = {}
def __element_path_to_list(self, element_path_in):
element_indexes = {
'path_list' : [],
'path_indexes' : []}
for match_item in re.finditer('<(.*?)(<\d*?>)?>', element_path_in):
element_indexes['path_list'].append(match_item.group(1))
path_index = None
try:
path_index = int(match_item.group(2).strip()[1:len(match_item.group(2).strip())-1])
except AttributeError:
pass
except ValueError:
pass
element_indexes['path_indexes'].append(path_index)
return element_indexes
def __append_element_data(self, element_path_in, element_attributes_in, element_data_in):
def __insert_item(
path_list_in,
path_indexes_in,
element_dictionary_in,
element_attr_in,
element_cdata_in):
element_dictionary_out = element_dictionary_in
try:
path_index = len(element_dictionary_in[path_list_in[0]])-1
try:
path_index = int(path_indexes_in[0])
except TypeError:
pass
except ValueError:
pass
if len(path_list_in) == 1:
element_dictionary_out[path_list_in[0]][path_index]['attributes'] = element_attr_in
element_dictionary_out[path_list_in[0]][path_index]['character_data'] = element_cdata_in
else:
element_dictionary_out[path_list_in[0]][path_index]['sub_elements'] = __insert_item(
path_list_in[1:],
path_indexes_in[1:],
element_dictionary_out[path_list_in[0]][path_index]['sub_elements'],
element_attr_in,
element_cdata_in)
except IndexError:
return None
except KeyError:
return None
return element_dictionary_out
self.element_dictionary = __insert_item(
self.__element_path_to_list(element_path_in)['path_list'],
self.__element_path_to_list(element_path_in)['path_indexes'],
self.element_dictionary,
element_attributes_in,
element_data_in)
def __append_element_dict(self, element_path_in):
def __insert_sub_element_dict(
path_list_in,
element_dictionary_in):
element_dictionary_out = element_dictionary_in
if path_list_in[0] not in element_dictionary_out.keys():
element_dictionary_out[path_list_in[0]] = []
if len(path_list_in) == 1:
element_dictionary_out[path_list_in[0]].append({
'attributes' : {},
'character_data' : [],
'sub_elements' : {}})
else:
path_index = len(element_dictionary_out[path_list_in[0]])-1
if len(element_dictionary_out[path_list_in[0]]) <= 0 or 'sub_elements' not in element_dictionary_out[path_list_in[0]][path_index].keys():
element_dictionary_out[path_list_in[0]].append({
'attributes' : {},
'character_data' : [],
'sub_elements' : {}})
element_dictionary_out[path_list_in[0]][path_index]['sub_elements'] = __insert_sub_element_dict(
path_list_in[1:],
element_dictionary_out[path_list_in[0]][path_index]['sub_elements'])
return element_dictionary_out
self.element_dictionary = __insert_sub_element_dict(
self.__element_path_to_list(element_path_in)['path_list'],
self.element_dictionary)
def __start_element_handler(self, element_name, element_attributes):
if self.element_path == '':
self.element_dictionary = {}
self.current_element_data = {}
self.element_path += '<' + element_name.strip() + '>'
self.__append_element_dict(self.element_path)
self.current_element_data[self.element_path] = {
'attrs' : {},
'cdata' : []}
if element_attributes:
self.current_element_data[self.element_path]['attrs'] = element_attributes
def __end_element_handler(self, element_name):
if self.current_element_data[self.element_path]['attrs'] or self.current_element_data[self.element_path]['cdata']:
self.__append_element_data(
self.element_path,
self.current_element_data[self.element_path]['attrs'],
self.current_element_data[self.element_path]['cdata'])
del(self.current_element_data[self.element_path])
if self.element_path.endswith('<' + element_name.strip() + '>'):
self.element_path = self.element_path[:self.element_path.rfind('<' + element_name.strip() + '>')]
def __character_data_handler(self, element_data):
if element_data.strip():
self.current_element_data[self.element_path]['cdata'].append(element_data.strip())
def parse_xml_file(self, xml_file_in):
self.element_path = ''
self.element_dictionary = {}
self.current_element_data = {}
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = self.__start_element_handler
parser.EndElementHandler = self.__end_element_handler
parser.CharacterDataHandler = self.__character_data_handler
parser.ParseFile(xml_file_in)
def parse_xml_string(self, xml_string_in, is_final = True):
self.element_path = ''
self.element_dictionary = {}
self.current_element_data = {}
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = self.__start_element_handler
parser.EndElementHandler = self.__end_element_handler
parser.CharacterDataHandler = self.__character_data_handler
parser.Parse(xml_string_in, is_final)
def get_element_item(self, element_path_in):
def __retrieve_item(path_list_in, path_indexes_in, element_dictionary_in):
try:
path_index = len(element_dictionary_in[path_list_in[0]])-1
try:
path_index = int(path_indexes_in[0])
except TypeError:
pass
except ValueError:
pass
if len(path_list_in) == 1:
return element_dictionary_in[path_list_in[0]][path_index]
else:
return __retrieve_item(
path_list_in[1:],
path_indexes_in[1:],
element_dictionary_in[path_list_in[0]][path_index]['sub_elements'])
except IndexError:
return None
except KeyError:
return None
return __retrieve_item(
self.__element_path_to_list(element_path_in)['path_list'],
self.__element_path_to_list(element_path_in)['path_indexes'],
self.element_dictionary)
def get_string_cdata(self, element_path_in):
element_value = None
try:
element_value_list = self.get_element_item(element_path_in)['character_data']
element_value = ''.join(element_value_list)
except TypeError:
pass
except IndexError:
pass
return element_value
def get_integer_cdata(self, element_path_in):
element_value = None
try:
element_value = int(self.get_string_cdata(element_path_in))
except TypeError:
pass
except IndexError:
pass
return element_value
def get_string_attr(self, element_path_in, element_attr_in):
element_value = None
try:
element_value = self.get_element_item(element_path_in)['attributes'][element_attr_in]
except TypeError:
pass
except KeyError:
pass
return element_value
def get_integer_attr(self, element_path_in, element_attr_in):
element_value = None
try:
element_value = int(self.get_string_attr(element_path_in, element_attr_in))
except ValueError:
pass
return element_value
class PubMedSearchSettings:
'''
classdocs
'''
settings_filename = os.path.abspath('pubmed_conf.xml')
search_settings = None
def __init__(self):
'''
Constructor
'''
self.read_settings()
def read_settings(self):
self.search_settings = TextXMLParser()
file_in = open(self.settings_filename, 'rb')
self.search_settings.parse_xml_file(file_in)
file_in.close()
def get_database_connection_type(self):
return self.search_settings.get_string_attr('<BiblioAnalysisSettings><DatabaseConnection>', 'type')
def get_database_connection_properties(self):
database_connection_properties = {}
database_connection_type_case = None
try:
database_connection_type_case = self.get_database_connection_type().upper()
except TypeError:
pass
if database_connection_type_case == 'JDBCODBCDRIVER':
database_odbc_dbq = None
database_odbc_driver = None
try:
database_odbc_driver = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Driver>').strip()
except TypeError:
pass
try:
database_odbc_dbq = os.path.abspath(self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><DBQ>').strip())
except TypeError:
pass
if database_odbc_driver and database_odbc_dbq:
database_connection_properties['driver'] = database_odbc_driver.strip()
database_connection_properties['dbq'] = database_odbc_dbq.strip()
elif database_connection_type_case == 'PYODBCDRIVER':
database_odbc_dbq = None
database_odbc_driver = None
database_odbc_server = None
database_odbc_db = None
database_odbc_trusted_conn = False
database_odbc_uid = None
database_odbc_pwd = None
try:
database_odbc_driver = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Driver>').strip()
except AttributeError:
pass
try:
database_odbc_dbq = os.path.abspath(self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><DBQ>').strip())
except AttributeError:
pass
try:
database_odbc_server = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Server>').strip()
except AttributeError:
pass
try:
database_odbc_db = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Database>').strip()
except AttributeError:
pass
try:
database_odbc_trusted_conn_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><TrustedConnection>').strip().upper()
if database_odbc_trusted_conn_case == 'YES':
database_odbc_trusted_conn = True
elif database_odbc_trusted_conn_case == 'NO':
database_odbc_trusted_conn = False
elif database_odbc_trusted_conn_case == 'TRUE':
database_odbc_trusted_conn = True
elif database_odbc_trusted_conn_case == 'FALSE':
database_odbc_trusted_conn = False
elif database_odbc_trusted_conn_case == '1':
database_odbc_trusted_conn = True
elif database_odbc_trusted_conn_case == '0':
database_odbc_trusted_conn = False
else:
database_odbc_trusted_conn = False
except AttributeError:
pass
try:
database_odbc_uid = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Uid>')
except AttributeError:
pass
try:
database_odbc_pwd = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Pwd>')
except AttributeError:
pass
if database_odbc_driver and database_odbc_dbq:
database_connection_properties['driver'] = database_odbc_driver.strip()
database_connection_properties['dbq'] = database_odbc_dbq.strip()
if database_odbc_driver and database_odbc_server and database_odbc_db:
database_connection_properties['driver'] = database_odbc_driver.strip()
database_connection_properties['server'] = database_odbc_server.strip()
database_connection_properties['database'] = database_odbc_db.strip()
database_connection_properties['trusted_connection'] = 'NO'
if database_odbc_trusted_conn:
database_connection_properties['trusted_connection'] = 'YES'
if database_odbc_uid and database_odbc_pwd:
database_connection_properties['uid'] = database_odbc_uid
database_connection_properties['pwd'] = database_odbc_pwd
elif database_connection_type_case == 'PYPYODBCDRIVER':
database_odbc_dbq = None
database_odbc_driver = None
try:
database_odbc_driver = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Driver>').strip()
except TypeError:
pass
try:
database_odbc_dbq = os.path.abspath(self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><DBQ>').strip())
except TypeError:
pass
if database_odbc_driver and database_odbc_dbq:
database_connection_properties['driver'] = database_odbc_driver.strip()
database_connection_properties['dbq'] = database_odbc_dbq.strip()
elif database_connection_type_case == 'SQLITE3DRIVER':
database_filename = None
try:
database_filename = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><FileName>').strip()
except TypeError:
pass
if database_filename:
database_connection_properties['filename'] = database_filename.strip()
elif database_connection_type_case == 'MYSQLDRIVER':
database_host = None
database_user = None
database_passwd = None
database_db = None
try:
database_host = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Host>').strip()
database_user = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><User>').strip()
database_passwd = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><Passwd>').strip()
database_db = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><DatabaseConnection><DB>').strip()
except TypeError:
pass
if database_host and database_user and database_passwd and database_db:
database_connection_properties['host'] = database_host.strip()
database_connection_properties['user'] = database_user.strip()
database_connection_properties['passwd'] = database_passwd.strip()
database_connection_properties['db'] = database_db.strip()
return database_connection_properties
def get_search_tool_name(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><SearchToolName>')
def get_reset_database_tables(self):
reset_database_tables = False
reset_database_tables_case = None
try:
reset_database_tables_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><ResetDatabaseTables>').strip().upper()
except TypeError:
pass
if reset_database_tables_case == 'YES':
reset_database_tables = True
elif reset_database_tables_case == 'NO':
reset_database_tables = False
elif reset_database_tables_case == 'TRUE':
reset_database_tables = True
elif reset_database_tables_case == 'FALSE':
reset_database_tables = False
elif reset_database_tables_case == '1':
reset_database_tables = True
elif reset_database_tables_case == '0':
reset_database_tables = False
else:
reset_database_tables = False
return reset_database_tables
def get_update_investigator_ids(self):
update_investigator_ids = False
update_investigator_ids_case = None
try:
update_investigator_ids_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><UpdateInvestigatorIDs>').strip().upper()
except TypeError:
pass
if update_investigator_ids_case == 'YES':
update_investigator_ids = True
elif update_investigator_ids_case == 'NO':
update_investigator_ids = False
elif update_investigator_ids_case == 'TRUE':
update_investigator_ids = True
elif update_investigator_ids_case == 'FALSE':
update_investigator_ids = False
elif update_investigator_ids_case == '1':
update_investigator_ids = True
elif update_investigator_ids_case == '0':
update_investigator_ids = False
else:
update_investigator_ids = False
return update_investigator_ids
def get_update_publication_results(self):
update_publication_results = False
update_publication_results_case = None
try:
update_publication_results_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><UpdatePublicationResults>').strip().upper()
except TypeError:
pass
if update_publication_results_case == 'YES':
update_publication_results = True
elif update_publication_results_case == 'NO':
update_publication_results = False
elif update_publication_results_case == 'TRUE':
update_publication_results = True
elif update_publication_results_case == 'FALSE':
update_publication_results = False
elif update_publication_results_case == '1':
update_publication_results = True
elif update_publication_results_case == '0':
update_publication_results = False
else:
update_publication_results = False
return update_publication_results
def get_eutils_address(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><EUtilsAddress>')
def get_email_address(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><EMailAddress>')
def get_http_delay(self):
return self.search_settings.get_integer_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><HTTPDelay>')
def get_weekday_hours_start_time(self):
return_timedelta = None
start_time_hours = None
start_time_minutes = None
start_time_string = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><WeekdayHours><StartTime>')
try:
match = re.match('(\d+):(\d+)', start_time_string)
if match:
start_time_hours = int(match.group(1))
start_time_minutes = int(match.group(2))
return_timedelta = timedelta(hours=start_time_hours, minutes=start_time_minutes)
except TypeError:
pass
return return_timedelta
def get_weekday_hours_end_time(self):
return_timedelta = None
end_time_hours = None
end_time_minutes = None
end_time_string = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><WeekdayHours><EndTime>')
try:
match = re.match('(\d+):(\d+)', end_time_string)
if match:
end_time_hours = int(match.group(1))
end_time_minutes = int(match.group(2))
return_timedelta = timedelta(hours=end_time_hours, minutes=end_time_minutes)
except TypeError:
pass
return return_timedelta
def get_return_maximum(self):
return self.search_settings.get_integer_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><ReturnMaximum>')
def get_minimum_date(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><MinimumDate>')
def get_maximum_date(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><MaximumDate>')
def get_maximum_url_length(self):
return self.search_settings.get_integer_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><MaximumURLLength>')
def get_maximum_tries(self):
return self.search_settings.get_integer_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><MaximumTries>')
def get_eutils_use_history(self):
eutils_use_history = False
eutils_use_history_case = None
try:
eutils_use_history_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><EUtilsUseHistory>').strip().upper()
except TypeError:
pass
if eutils_use_history_case == 'YES':
eutils_use_history = True
elif eutils_use_history_case == 'NO':
eutils_use_history = False
elif eutils_use_history_case == 'TRUE':
eutils_use_history = True
elif eutils_use_history_case == 'FALSE':
eutils_use_history = False
elif eutils_use_history_case == '1':
eutils_use_history = True
elif eutils_use_history_case == '0':
eutils_use_history = False
else:
eutils_use_history = False
return eutils_use_history
def get_search_strategies(self):
PERSON_BY_PERSON = 2**0
PERSON_ORGANIZATION = 2**1
PERSON_GRANT = 2**2
PERSON_COAUTHOR = 2**3
CTSA_GRANT = 2**4
PMCID_CITE_BY_PMCID = 2**5
PUBMED_ID_CITE_BY_PUBMED_ID = 2**6
PUBMED_ID_NEIGHBOR_PUBMED_ID = 2**7
search_strategies_array = [
'PersonByPerson',
'PersonOrganization',
'PersonGrant',
'PersonCoauthor',
'CTSAGrant',
'PMCIDCiteByPMCID',
'PubMedIDCiteByPubMedID',
'PubMedIDNeighborPubMedID']
search_strategies = 0
for strategy_index in range(0, len(search_strategies_array), 1):
search_strategy_case = None
try:
search_strategy_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><SearchStrategies><' + search_strategies_array[strategy_index] + '>').strip().upper()
except TypeError:
pass
if search_strategy_case == 'YES':
search_strategies |= (2**strategy_index)
elif search_strategy_case == 'NO':
search_strategies &= ~(2**strategy_index)
elif search_strategy_case == 'TRUE':
search_strategies |= (2**strategy_index)
elif search_strategy_case == 'FALSE':
search_strategies &= ~(2**strategy_index)
elif search_strategy_case == '1':
search_strategies |= (2**strategy_index)
elif search_strategy_case == '0':
search_strategies &= ~(2**strategy_index)
else:
search_strategies &= ~(2**strategy_index)
return search_strategies
def get_timeout(self):
return self.search_settings.get_integer_cdata('<BiblioAnalysisSettings><SearchPubMedSettings><Timeout>')
class ClassifyMeSHTermsSettings:
'''
classdocs
'''
settings_filename = os.path.abspath('pubmed_conf.xml')
search_settings = None
def __init__(self):
'''
Constructor
'''
self.read_settings()
def read_settings(self):
self.search_settings = TextXMLParser()
file_in = open(self.settings_filename, 'rb')
self.search_settings.ParseFile(file_in)
file_in.close()
def get_descriptors_file_name(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><ClassifyMeSHTermsSettings><MeSHDescriptorsFileName>')
def get_qualifiers_file_name(self):
return self.search_settings.get_string_cdata('<BiblioAnalysisSettings><ClassifyMeSHTermsSettings><MeSHQualifiersFileName>')
def get_reset_database_tables(self):
reset_database_tables = False
reset_database_tables_case = None
try:
reset_database_tables_case = self.search_settings.get_string_cdata('<BiblioAnalysisSettings><ClassifyMeSHTermsSettings><ResetDatabaseTables>').strip().upper()
except TypeError:
pass
if reset_database_tables_case == 'YES':
reset_database_tables = True
elif reset_database_tables_case == 'NO':
reset_database_tables = False
elif reset_database_tables_case == 'TRUE':
reset_database_tables = True
elif reset_database_tables_case == 'FALSE':
reset_database_tables = False
elif reset_database_tables_case == '1':
reset_database_tables = True
elif reset_database_tables_case == '0':
reset_database_tables = False
else:
reset_database_tables = False
return reset_database_tables
| gpl-2.0 | 2,402,271,350,444,328,000 | 39.259502 | 209 | 0.521388 | false |
abenton/wgcca | src/wgccaTest.py | 1 | 3225 | '''
Test suite for weighted generalized canonical correlation analysis.
Adrian Benton
8/8/2016
'''
import os
import unittest
import wgcca as WGCCA
import numpy as np
import scipy
import scipy.linalg
class TestWeightedGCCA(unittest.TestCase):
def setUp(self):
### Generate sample data with 3 views ###
self.N = 1000 # Number of examples
self.F1 = 50 # Number of features in view 1
self.F2 = 30
self.F3 = 40
self.k = 5 # Number of latent features
def scale(X):
X_mean = np.mean(X, axis=0)
X -= X_mean
X_std = np.std(X, axis=0)
X_std[X_std==0.] = 1.0
X /= X_std
return X
def orth(X):
''' http://stackoverflow.com/questions/13940056/orthogonalize-matrix-numpy '''
return X.dot( scipy.linalg.inv(scipy.linalg.sqrtm( X.T.dot(X) )))
# Maps to each view
W1 = np.random.normal(size=(self.F1, self.k))
W2 = np.random.normal(size=(self.F2, self.k))
W3 = np.random.normal(size=(self.F3, self.k))
W1 = scale(W1)
W2 = scale(W2)
W3 = scale(W3)
G = np.random.normal(size=(self.N, self.k)) # Latent examples
self.G = orth(G)
# Observations
self.V1 = W1.dot(self.G.T).T # N X F1
self.V2 = W2.dot(self.G.T).T # N X F2
self.V3 = W3.dot(self.G.T).T # N X F3
### Write sample data to test file ###
outFile = open('gcca_test_file.tsv', 'w')
for i in range(self.N):
vStrs = [' '.join([str(val) for val in v[i,:]]) for v in [self.V1, self.V2, self.V3]]
# Assume each view is populated from a single document
outFile.write('%d\t1\t1\t1\t%s\n' % (i, '\t'.join(vStrs)))
outFile.close()
def tearDown(self):
''' Remove sample file '''
if os.path.exists('gcca_test_file.tsv'):
os.remove('gcca_test_file.tsv')
def test_recoverG(self):
'''
Test GCCA implementation by seeing if it can recover G.
'''
eps = 1.e-10
Vs = [self.V1, self.V2, self.V3]
wgcca = WGCCA.WeightedGCCA(3, [self.F1, self.F2, self.F3],
self.k, [eps, eps, eps], verbose=True)
wgcca.learn(Vs)
U1 = wgcca.U[0]
U2 = wgcca.U[1]
U3 = wgcca.U[2]
Gprime = wgcca.G
# Rotate G to minimize norm of difference between G and G'
R, B = scipy.linalg.orthogonal_procrustes(self.G, Gprime)
normDiff = scipy.linalg.norm(self.G.dot(R) - Gprime)
print ('Recovered G up to rotation; difference in norm:', normDiff)
self.assertTrue( normDiff < 1.e-6 )
self.assertTrue( np.allclose(self.G.dot(R), Gprime) )
def test_ldViews(self):
''' Try loading views from file -- ensure they are the same as generated data '''
ids, views = WGCCA.ldViews('gcca_test_file.tsv', [0, 1, 2],
replaceEmpty=False, maxRows=-1)
for V, Vprime in zip([self.V1, self.V2, self.V3], views):
self.assertTrue(np.allclose(V, Vprime))
def test_ldK(self):
''' K should be 1 for each view, each example '''
K = WGCCA.ldK('gcca_test_file.tsv', [0, 1, 2])
self.assertTrue( np.all(K == 1.) )
def main():
unittest.main()
if __name__ == '__main__':
main()
| mit | 4,759,145,673,995,391,000 | 26.330508 | 91 | 0.577054 | false |
hanjihun/Car | segurarse_com_ar/segurarse_com_ar/middlewares.py | 1 | 1888 | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SegurarseComArSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| mit | 6,918,560,916,649,596,000 | 32.678571 | 78 | 0.664899 | false |
grow/grow | grow/cache/object_cache.py | 1 | 2129 | """
Cache for storing and retrieving data in a pod.
Supports arbitrary data based on a cache key.
The contents of the cache should be raw and not internationalized as it will
be shared between locales.
"""
import re
FILE_OBJECT_CACHE = 'objectcache.json'
FILE_OBJECT_SUB_CACHE = 'objectcache.{}.json'
class ObjectCache(object):
"""Object cache for caching arbitrary data in a pod."""
def __contains__(self, key):
return key in self._cache
def __init__(self):
self._cache = {}
self._is_dirty = False
self.reset()
def add(self, key, value):
"""Add a new item to the cache or overwrite an existing value."""
if not self._is_dirty and (key not in self._cache or self._cache[key] != value):
self._is_dirty = True
self._cache[key] = value
def add_all(self, key_to_cached):
"""Update the cache with a preexisting set of data."""
for key, value in key_to_cached.items():
self.add(key, value)
def export(self):
"""Returns the raw cache data."""
return self._cache
def get(self, key):
"""Retrieve the value from the cache."""
return self._cache.get(key, None)
@property
def is_dirty(self):
"""Have the contents of the object cache been modified?"""
return self._is_dirty
def mark_clean(self):
"""Mark that the object cache is clean."""
self._is_dirty = False
def remove(self, key):
"""Removes a single element from the cache."""
self._is_dirty = True
return self._cache.pop(key, None)
def reset(self):
"""Reset the internal cache object."""
self._cache = {}
self._is_dirty = False
def search(self, pattern):
"""Search through the cache and return all the matching elements."""
if type(pattern) is not type(re.compile('.')):
pattern = re.compile(pattern)
results = {}
for key, value in self._cache.items():
if pattern.search(key) is not None:
results[key] = value
return results
| mit | 8,474,668,111,042,571,000 | 26.649351 | 88 | 0.592767 | false |
rrader/Pomodoro | pomodoro/mainframe.py | 1 | 4124 | #!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
# mainframe.py
# Pomodoro
#
# Created by Roman Rader on 22.06.11.
# New BSD License 2011 Antigluk https://github.com/antigluk/Pomodoro
"""
Contains main frame of application.
"""
import wx
from state import PomodoroStateProxy as PomodoroState
from NotificationCenter.NotificationCenter import NotificationCenter
import logging
logging.getLogger('Pomodoro')
class MainFrameController(wx.Frame):
"""Main frame of Pomodoro"""
def __init__(self):
wx.Frame.__init__(
self,
None,
-1,
'Pomodoro it!',
style=wx.BORDER_DEFAULT | wx.STAY_ON_TOP,
size=(220, 120),
)
state = PomodoroState()
self.__state_dict = {
state.StateNoState: {'bs': '...'},
state.StateInPomodoro: {'bs': u"Отменить..."},
state.StateInRest: {'bs': u"Отдыхайте!"},
state.StateWaitingPomodoro: {'bs': u"Начать помидору"},
state.StateWaitingRest: {'bs': u"Начать отдых"},
state.StatePomodoroKilled: {'bs': u"Начать помидору"},
}
self.buildFrame()
self.updateUI()
self.makeMenu()
self.Show(False)
NotificationCenter().addObserver(self,self.onDBUpdate,"dbUpdated")
NotificationCenter().addObserver(self,self.onUpdateUI,"updateUI")
def buildFrame(self):
self.panel = wx.Panel(self)
self.txt = wx.StaticText(self.panel, pos=(10, 10),
label='Pomodoro!')
self.times_l = wx.StaticText(self.panel, pos=(120, 10),
label=u"0 помидор")
self.timer_ctrl = wx.TextCtrl(self.panel, pos=(10, 30),
size=(200, -1), style=wx.TE_READONLY | wx.TE_CENTER)
self.start_button = wx.Button(self.panel, pos=(20, 70), label=''
, size=(170, -1))
self.start_button.Bind(wx.EVT_BUTTON, self.bClick)
def onUpdateUI(self, event):
self.updateUI()
def updateUI(self):
#TODO: проверять видимо ли окно. иначе не обновлять
#TODO: remove this ugly method
state = PomodoroState()
self.timer_ctrl.SetValue(state.text)
self.start_button.SetLabel(self.__state_dict[state.active]['bs'])
self.txt.SetLabel(state.caption)
self.times_l.SetLabel(u"%d помидор" % state.GetTodayCount())
def bClick(self, m):
logging.debug("Toggle state called from menu")
self.controller.toggleState()
def onExit(self,m):
logging.debug("Quit called from menu")
self.controller.quit()
def makeMenu(self):
self.menuBar = wx.MenuBar()
self.filemenu = wx.Menu()
self.pomodmenu = wx.Menu()
item = self.filemenu.Append(wx.ID_ANY, "Hide")
self.Bind(wx.EVT_MENU, self.hideFrame, item)
item = self.filemenu.Append(wx.ID_ANY, "Toggle pomodoro")
self.Bind(wx.EVT_MENU, self.bClick, item)
self.filemenu.AppendSeparator()
item = self.filemenu.Append(wx.ID_EXIT, "&Quit", "quit")
self.Bind(wx.EVT_MENU, self.onExit, id=wx.ID_EXIT)
item = self.pomodmenu.Append(wx.ID_ANY, "All", "List of pomodoros")
self.Bind(wx.EVT_MENU, self.showListOfPomodoros, item)
item = self.pomodmenu.Append(wx.ID_ANY, "Statistics", "Statistics")
self.Bind(wx.EVT_MENU, self.showStatistics, item)
self.menuBar.Append(self.filemenu, "&File")
self.menuBar.Append(self.pomodmenu, "&Pomodors")
self.SetMenuBar(self.menuBar)
def onDBUpdate(self, obj):
pass
def hideFrame(self, m):
logging.debug("Hide frame called from menu")
self.Show(False)
def showListOfPomodoros(self, m):
logging.debug("Show list of pomodors called from menu")
self.controller.showListOfPomodoros()
def showStatistics(self, m):
logging.debug("Show statistics of pomodors called from menu")
self.controller.showStatistics() | bsd-3-clause | -2,721,271,360,558,041,600 | 32.764706 | 75 | 0.608912 | false |
Fillll/reddit2telegram | reddit2telegram/supplier.py | 1 | 4782 | #encoding:utf-8
import logging
import yaml
import praw
import utils
from reporting_stuff import report_error
from utils.tech import long_sleep, short_sleep
def send_to_channel_from_subreddit(how_to_post, channel_to_post, subreddit, submissions_ranking, submissions_limit, config, **kwargs):
reddit = praw.Reddit(
user_agent=config['reddit']['user_agent'],
client_id=config['reddit']['client_id'],
client_secret=config['reddit']['client_secret'],
username=config['reddit']['username'],
password=config['reddit']['password']
)
if submissions_ranking == 'top':
submissions = reddit.subreddit(subreddit).top(limit=submissions_limit)
elif submissions_ranking == 'hot':
submissions = reddit.subreddit(subreddit).hot(limit=submissions_limit)
elif submissions_ranking == 'new':
submissions = reddit.subreddit(subreddit).new(limit=submissions_limit)
else:
logging.error('Unknown submissions_ranking. {}'.format(submissions_ranking))
r2t = utils.Reddit2TelegramSender(channel_to_post, config)
success = False
for submission in submissions:
link = submission.shortlink
if r2t.was_before(link):
continue
if r2t.too_much_errors(link):
continue
if kwargs.get('extra_args', False):
success = how_to_post(submission, r2t, **kwargs)
else:
success = how_to_post(submission, r2t)
if success == utils.SupplyResult.SUCCESSFULLY:
# Every thing is ok, post was sent
r2t.mark_as_was_before(link, sent=True)
break
elif success == utils.SupplyResult.DO_NOT_WANT_THIS_SUBMISSION:
# Do not want to send this post
r2t.mark_as_was_before(link, sent=False)
continue
elif success == utils.SupplyResult.SKIP_FOR_NOW:
# Do not want to send now
continue
elif success == utils.SupplyResult.STOP_THIS_SUPPLY:
# If None — do not want to send anything this time
break
else:
logging.error('Unknown SupplyResult. {}'.format(success))
@report_error
def supply(submodule_name, config, is_test=False):
if not is_test:
long_sleep(2)
submodule = utils.channels_stuff.import_submodule(submodule_name)
submissions_ranking_stated = getattr(submodule, 'submissions_ranking', None)
if submissions_ranking_stated not in ['hot', 'new', 'top']:
submissions_ranking = 'hot'
else:
submissions_ranking = submissions_ranking_stated
submissions_limit = getattr(submodule, 'submissions_limit', 100)
channel_to_post = submodule.t_channel if not is_test else '@r_channels_test'
success = send_to_channel_from_subreddit(how_to_post=submodule.send_post,
channel_to_post=channel_to_post,
subreddit=submodule.subreddit,
submissions_ranking=submissions_ranking,
submissions_limit=submissions_limit,
config=config,
extra_args=False
)
if success is False:
logging.info('Nothing to post from {sub} to {channel}.'.format(
sub=submodule.subreddit, channel=submodule.t_channel))
if submissions_ranking_stated is None:
success = send_to_channel_from_subreddit(how_to_post=submodule.send_post,
channel_to_post=channel_to_post,
subreddit=submodule.subreddit,
submissions_ranking='new',
submissions_limit=submissions_limit,
config=config,
extra_args=False
)
if success is False:
success = send_to_channel_from_subreddit(how_to_post=submodule.send_post,
channel_to_post=channel_to_post,
subreddit=submodule.subreddit,
submissions_ranking='top',
submissions_limit=submissions_limit,
config=config,
extra_args=False
)
utils.clean_after_module(channel_to_post)
def main(config_filename, sub, is_test=False):
with open(config_filename) as config_file:
config = yaml.safe_load(config_file.read())
if not is_test:
supply(sub, config, is_test)
else:
for i in range(100):
print('i =', i, '>')
supply(sub, config, is_test)
short_sleep(0.1)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='configs/prod.yml')
parser.add_argument('--test', action='store_true')
parser.add_argument('--sub')
args = parser.parse_args()
main(args.config, args.sub, args.test)
| mit | -1,040,563,936,805,397,800 | 38.180328 | 134 | 0.617782 | false |
ace-han/onedegree | admin/account/api/v1/fields.py | 1 | 1049 | from rest_framework import serializers
from rest_framework.relations import RelatedField
class TaggedItemRelatedField(serializers.PrimaryKeyRelatedField):
"""
A custom field to use for the `tagged_object` generic relationship.
"""
def __init__(self, **kwargs):
self.pk_field = kwargs.pop('pk_field', None)
kwargs.pop('many', None)
kwargs.pop('allow_empty', None)
self.queryset = kwargs.pop('queryset', self.queryset)
super(RelatedField, self).__init__(**kwargs)
def to_internal_value(self, data):
value = serializers.PrimaryKeyRelatedField.to_internal_value(self, data)
# self.root.instance => Profile instance
# relationship = get_attribute(instance, self.source_attrs)
# relationship.set(*value)
return value
# def to_representation(self, value):
# """
# Serialize tagged objects to a simple textual representation.
# """
# raise Exception('Unexpected type of tagged object') | bsd-3-clause | 4,808,190,821,871,013,000 | 38.423077 | 80 | 0.638704 | false |
gjhiggins/electrum | lib/wallet.py | 1 | 75104 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import hashlib
import ast
import threading
import random
import time
import json
import copy
from operator import itemgetter
from util import NotEnoughFunds, PrintError, profiler
from bitcoin import *
from account import *
from version import *
from transaction import Transaction
from plugins import run_hook
import bitcoin
from synchronizer import Synchronizer
from mnemonic import Mnemonic
import paymentrequest
# internal ID for imported account
IMPORTED_ACCOUNT = '/x'
class WalletStorage(PrintError):
def __init__(self, path):
self.lock = threading.RLock()
self.data = {}
self.path = path
self.file_exists = False
self.modified = False
self.print_error("wallet path", self.path)
if self.path:
self.read(self.path)
def read(self, path):
"""Read the contents of the wallet file."""
try:
with open(self.path, "r") as f:
data = f.read()
except IOError:
return
try:
self.data = json.loads(data)
except:
try:
d = ast.literal_eval(data) #parse raw data from reading wallet file
except Exception as e:
raise IOError("Cannot read wallet file '%s'" % self.path)
self.data = {}
# In old versions of Electrum labels were latin1 encoded, this fixes breakage.
labels = d.get('labels', {})
for i, label in labels.items():
try:
unicode(label)
except UnicodeDecodeError:
d['labels'][i] = unicode(label.decode('latin1'))
for key, value in d.items():
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('Failed to convert label to json format', key)
continue
self.data[key] = value
self.file_exists = True
def get(self, key, default=None):
with self.lock:
v = self.data.get(key)
if v is None:
v = default
else:
v = copy.deepcopy(v)
return v
def put(self, key, value, save = True):
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error("json error: cannot save", key)
return
with self.lock:
if value is not None:
if self.data.get(key) != value:
self.modified = True
self.data[key] = copy.deepcopy(value)
elif key in self.data:
self.modified = True
self.data.pop(key)
if save:
self.write()
def write(self):
assert not threading.currentThread().isDaemon()
if not self.modified:
return
with self.lock:
s = json.dumps(self.data, indent=4, sort_keys=True)
temp_path = "%s.tmp.%s" % (self.path, os.getpid())
with open(temp_path, "w") as f:
f.write(s)
f.flush()
os.fsync(f.fileno())
# perform atomic write on POSIX systems
try:
os.rename(temp_path, self.path)
except:
os.remove(self.path)
os.rename(temp_path, self.path)
if 'ANDROID_DATA' not in os.environ:
import stat
os.chmod(self.path,stat.S_IREAD | stat.S_IWRITE)
self.print_error("saved")
class Abstract_Wallet(PrintError):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
def __init__(self, storage):
self.storage = storage
self.network = None
self.electrum_version = ELECTRUM_VERSION
self.gap_limit_for_change = 6 # constant
# saved fields
self.seed_version = storage.get('seed_version', NEW_SEED_VERSION)
self.use_change = storage.get('use_change',True)
self.use_encryption = storage.get('use_encryption', False)
self.seed = storage.get('seed', '') # encrypted
self.labels = storage.get('labels', {})
self.frozen_addresses = set(storage.get('frozen_addresses',[]))
self.stored_height = storage.get('stored_height', 0) # last known height (for offline mode)
self.history = storage.get('addr_history',{}) # address -> list(txid, height)
# This attribute is set when wallet.start_threads is called.
self.synchronizer = None
# imported_keys is deprecated. The GUI should call convert_imported_keys
self.imported_keys = self.storage.get('imported_keys',{})
self.load_accounts()
self.load_transactions()
self.build_reverse_history()
# load requests
self.receive_requests = self.storage.get('payment_requests', {})
# spv
self.verifier = None
# Transactions pending verification. A map from tx hash to transaction
# height. Access is not contended so no lock is needed.
self.unverified_tx = {}
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3',{})
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
self.lock = threading.Lock()
self.transaction_lock = threading.Lock()
self.tx_event = threading.Event()
self.check_history()
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type, True)
def diagnostic_name(self):
return self.basename()
@profiler
def load_transactions(self):
self.txi = self.storage.get('txi', {})
self.txo = self.storage.get('txo', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None and (tx_hash not in self.pruned_txo.values()):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
@profiler
def save_transactions(self):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
# Flush storage only with the last put
self.storage.put('transactions', tx, False)
self.storage.put('txi', self.txi, False)
self.storage.put('txo', self.txo, False)
self.storage.put('pruned_txo', self.pruned_txo, True)
def clear_history(self):
with self.transaction_lock:
self.txi = {}
self.txo = {}
self.pruned_txo = {}
self.save_transactions()
with self.lock:
self.history = {}
self.tx_addr_hist = {}
self.storage.put('addr_history', self.history, True)
@profiler
def build_reverse_history(self):
self.tx_addr_hist = {}
for addr, hist in self.history.items():
for tx_hash, h in hist:
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
@profiler
def check_history(self):
save = False
for addr, hist in self.history.items():
if not self.is_mine(addr):
self.history.pop(addr)
save = True
continue
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo.values() or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
tx.deserialize()
self.add_transaction(tx_hash, tx)
if save:
self.storage.put('addr_history', self.history, True)
# wizard action
def get_action(self):
pass
def basename(self):
return os.path.basename(self.storage.path)
def convert_imported_keys(self, password):
for k, v in self.imported_keys.items():
sec = pw_decode(v, password)
pubkey = public_key_from_private_key(sec)
address = public_key_to_bc_address(pubkey.decode('hex'))
if address != k:
raise InvalidPassword()
self.import_key(sec, password)
self.imported_keys.pop(k)
self.storage.put('imported_keys', self.imported_keys)
def load_accounts(self):
self.accounts = {}
d = self.storage.get('accounts', {})
for k, v in d.items():
if self.wallet_type == 'old' and k in [0, '0']:
v['mpk'] = self.storage.get('master_public_key')
self.accounts['0'] = OldAccount(v)
elif v.get('imported'):
self.accounts[k] = ImportedAccount(v)
elif v.get('xpub'):
self.accounts[k] = BIP32_Account(v)
elif v.get('pending'):
try:
self.accounts[k] = PendingAccount(v)
except:
pass
else:
self.print_error("cannot load account", v)
def synchronize(self):
pass
def can_create_accounts(self):
return False
def set_up_to_date(self,b):
with self.lock: self.up_to_date = b
def is_up_to_date(self):
with self.lock: return self.up_to_date
def update(self):
self.up_to_date = False
while not self.is_up_to_date():
time.sleep(0.1)
def is_imported(self, addr):
account = self.accounts.get(IMPORTED_ACCOUNT)
if account:
return addr in account.get_addresses(0)
else:
return False
def has_imported_keys(self):
account = self.accounts.get(IMPORTED_ACCOUNT)
return account is not None
def import_key(self, sec, password):
assert self.can_import(), 'This wallet cannot import private keys'
try:
pubkey = public_key_from_private_key(sec)
address = public_key_to_bc_address(pubkey.decode('hex'))
except Exception:
raise Exception('Invalid private key')
if self.is_mine(address):
raise Exception('Address already in wallet')
if self.accounts.get(IMPORTED_ACCOUNT) is None:
self.accounts[IMPORTED_ACCOUNT] = ImportedAccount({'imported':{}})
self.accounts[IMPORTED_ACCOUNT].add(address, pubkey, sec, password)
self.save_accounts()
# force resynchronization, because we need to re-run add_transaction
if address in self.history:
self.history.pop(address)
if self.synchronizer:
self.synchronizer.add(address)
return address
def delete_imported_key(self, addr):
account = self.accounts[IMPORTED_ACCOUNT]
account.remove(addr)
if not account.get_addresses(0):
self.accounts.pop(IMPORTED_ACCOUNT)
self.save_accounts()
def set_label(self, name, text = None):
changed = False
old_text = self.labels.get(name)
if text:
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels, True)
return changed
def addresses(self, include_change = True):
return list(addr for acc in self.accounts for addr in self.get_account_addresses(acc, include_change))
def is_mine(self, address):
return address in self.addresses(True)
def is_change(self, address):
if not self.is_mine(address): return False
acct, s = self.get_address_index(address)
if s is None: return False
return s[0] == 1
def get_address_index(self, address):
for acc_id in self.accounts:
for for_change in [0,1]:
addresses = self.accounts[acc_id].get_addresses(for_change)
if address in addresses:
return acc_id, (for_change, addresses.index(address))
raise Exception("Address not found", address)
def get_private_key(self, address, password):
if self.is_watching_only():
return []
account_id, sequence = self.get_address_index(address)
return self.accounts[account_id].get_private_key(sequence, self, password)
def get_public_keys(self, address):
account_id, sequence = self.get_address_index(address)
return self.accounts[account_id].get_pubkeys(*sequence)
def sign_message(self, address, message, password):
keys = self.get_private_key(address, password)
assert len(keys) == 1
sec = keys[0]
key = regenerate_key(sec)
compressed = is_compressed(sec)
return key.sign_message(message, compressed, address)
def decrypt_message(self, pubkey, message, password):
address = public_key_to_bc_address(pubkey.decode('hex'))
keys = self.get_private_key(address, password)
secret = keys[0]
ec = regenerate_key(secret)
decrypted = ec.decrypt_message(message)
return decrypted
def add_unverified_tx(self, tx_hash, tx_height):
# Only add if confirmed and not verified
if tx_height > 0 and tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
def add_verified_tx(self, tx_hash, info):
# Remove from the unverified map and add to the verified map and
self.unverified_tx.pop(tx_hash, None)
with self.lock:
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
self.storage.put('verified_tx3', self.verified_tx, True)
conf, timestamp = self.get_confirmations(tx_hash)
self.network.trigger_callback('verified', (tx_hash, conf, timestamp))
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
return self.unverified_tx
def undo_verifications(self, height):
'''Used by the verifier when a reorg has happened'''
txs = []
with self.lock:
for tx_hash, item in self.verified_tx:
tx_height, timestamp, pos = item
if tx_height >= height:
self.verified_tx.pop(tx_hash, None)
txs.append(tx_hash)
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.stored_height
def get_confirmations(self, tx):
""" return the number of confirmations of a monitored transaction. """
with self.lock:
if tx in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx]
conf = (self.get_local_height() - height + 1)
if conf <= 0: timestamp = None
elif tx in self.unverified_tx:
conf = -1
timestamp = None
else:
conf = 0
timestamp = None
return conf, timestamp
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
x = self.verified_tx.get(tx_hash)
y = self.unverified_tx.get(tx_hash)
if x:
height, timestamp, pos = x
return height, pos
elif y:
return y, 0
else:
return 1e12, 0
def is_found(self):
return self.history.values() != [[]] * len(self.history)
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.history.get(address, []))
def get_tx_delta(self, tx_hash, address):
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo.values():
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx):
""" effect of tx on wallet """
addresses = self.addresses(True)
is_relevant = False
is_send = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in tx.inputs:
addr = item.get('address')
if addr in addresses:
is_send = True
is_relevant = True
d = self.txo.get(item['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == item['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_send:
is_partial = False
for addr, value in tx.get_outputs():
v_out += value
if addr in addresses:
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_send:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
is_send = v < 0
else:
# all inputs are mine
fee = v_out - v_in
return is_relevant, is_send, v, fee
def get_addr_io(self, address):
h = self.history.get(address, [])
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
return coins
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
# return the balance of a vcoin address: confirmed and matured, unconfirmed, unmatured
def get_addr_balance(self, address):
received, sent = self.get_addr_io(address)
c = u = x = 0
for txo, (tx_height, v, is_cb) in received.items():
if is_cb and tx_height + COINBASE_MATURITY > self.get_local_height():
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
return c, u, x
def get_spendable_coins(self, domain = None, exclude_frozen = True):
coins = []
if domain is None:
domain = self.addresses(True)
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
c = self.get_addr_utxo(addr)
for txo, v in c.items():
tx_height, value, is_cb = v
if is_cb and tx_height + COINBASE_MATURITY > self.get_local_height():
continue
prevout_hash, prevout_n = txo.split(':')
output = {
'address':addr,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb
}
coins.append((tx_height, output))
continue
# sort by age
if coins:
coins = sorted(coins)
if coins[-1][0] != 0:
while coins[0][0] == 0:
coins = coins[1:] + [ coins[0] ]
return [value for height, value in coins]
def get_account_name(self, k):
return self.labels.get(k, self.accounts[k].get_name(k))
def get_account_names(self):
account_names = {}
for k in self.accounts.keys():
account_names[k] = self.get_account_name(k)
return account_names
def get_account_addresses(self, acc_id, include_change=True):
if acc_id is None:
addr_list = self.addresses(include_change)
elif acc_id in self.accounts:
acc = self.accounts[acc_id]
addr_list = acc.get_addresses(0)
if include_change:
addr_list += acc.get_addresses(1)
return addr_list
def get_account_from_address(self, addr):
"Returns the account that contains this address, or None"
for acc_id in self.accounts: # similar to get_address_index but simpler
if addr in self.get_account_addresses(acc_id):
return acc_id
return None
def get_account_balance(self, account):
return self.get_balance(self.get_account_addresses(account))
def get_frozen_balance(self):
return self.get_balance(self.frozen_addresses)
def get_balance(self, domain=None):
if domain is None:
domain = self.addresses(True)
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
with self.lock:
return self.history.get(address, [])
def get_status(self, h):
if not h:
return None
status = ''
for tx_hash, height in h:
status += tx_hash + ':%d:' % height
return hashlib.sha256( status ).digest().encode('hex')
def find_pay_to_pubkey_address(self, prevout_hash, prevout_n):
dd = self.txo.get(prevout_hash, {})
for addr, l in dd.items():
for n, v, is_cb in l:
if n == prevout_n:
self.print_error("found pay-to-pubkey address:", addr)
return addr
def add_transaction(self, tx_hash, tx):
is_coinbase = tx.inputs[0].get('is_coinbase') == True
with self.transaction_lock:
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs:
addr = txi.get('address')
if not txi.get('is_coinbase'):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d'%prevout_n
if addr == "(pubkey)":
addr = self.find_pay_to_pubkey_address(prevout_hash, prevout_n)
# find value from prev output
if addr and self.is_mine(addr):
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
if d.get(addr) is None:
d[addr] = []
d[addr].append((ser, v))
break
else:
self.pruned_txo[ser] = tx_hash
# add outputs
self.txo[tx_hash] = d = {}
for n, txo in enumerate(tx.outputs):
ser = tx_hash + ':%d'%n
_type, x, v = txo
if _type == 'address':
addr = x
elif _type == 'pubkey':
addr = public_key_to_bc_address(x.decode('hex'))
else:
addr = None
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = []
d[addr].append((n, v, is_coinbase))
# give v to txi that spends me
next_tx = self.pruned_txo.get(ser)
if next_tx is not None:
self.pruned_txo.pop(ser)
dd = self.txi.get(next_tx, {})
if dd.get(addr) is None:
dd[addr] = []
dd[addr].append((ser, v))
# save
self.transactions[tx_hash] = tx
def remove_transaction(self, tx_hash):
with self.transaction_lock:
self.print_error("removing tx from history", tx_hash)
#tx = self.transactions.pop(tx_hash)
for ser, hh in self.pruned_txo.items():
if hh == tx_hash:
self.pruned_txo.pop(ser)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in dd.items():
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
l.remove(item)
self.pruned_txo[ser] = next_tx
if l == []:
dd.pop(addr)
else:
dd[addr] = l
self.txi.pop(tx_hash)
self.txo.pop(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
def receive_history_callback(self, addr, hist):
with self.lock:
old_hist = self.history.get(addr, [])
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
# remove tx if it's not referenced in histories
self.tx_addr_hist[tx_hash].remove(addr)
if not self.tx_addr_hist[tx_hash]:
self.remove_transaction(tx_hash)
self.history[addr] = hist
self.storage.put('addr_history', self.history, True)
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
tx.deserialize()
self.add_transaction(tx_hash, tx)
def get_history(self, domain=None):
from collections import defaultdict
# get domain
if domain is None:
domain = self.get_account_addresses(None)
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash, delta in tx_deltas.items():
conf, timestamp = self.get_confirmations(tx_hash)
history.append((tx_hash, conf, delta, timestamp))
history.sort(key = lambda x: self.get_txpos(x[0]))
history.reverse()
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for item in history:
tx_hash, conf, delta, timestamp = item
h2.append((tx_hash, conf, delta, timestamp, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
h2.reverse()
# fixme: this may happen if history is incomplete
if balance not in [None, 0]:
self.print_error("Error: history not synchronized")
return []
return h2
def get_label(self, tx_hash):
label = self.labels.get(tx_hash)
is_default = (label == '') or (label is None)
if is_default:
label = self.get_default_label(tx_hash)
return label, is_default
def get_default_label(self, tx_hash):
if self.txi.get(tx_hash) == {}:
d = self.txo.get(tx_hash, {})
labels = []
for addr in d.keys():
label = self.labels.get(addr)
if label:
labels.append(label)
return ', '.join(labels)
return ''
def fee_per_kb(self, config):
b = config.get('dynamic_fees')
f = config.get('fee_factor', 50)
F = config.get('fee_per_kb', bitcoin.RECOMMENDED_FEE)
return min(F, self.network.fee*(50 + f)/100) if b and self.network and self.network.fee else F
def get_tx_fee(self, tx):
# this method can be overloaded
return tx.get_fee()
@profiler
def estimated_fee(self, tx, fee_per_kb):
estimated_size = len(tx.serialize(-1))/2
fee = int(fee_per_kb * estimated_size / 1000.)
if fee < MIN_RELAY_TX_FEE: # and tx.requires_fee(self):
fee = MIN_RELAY_TX_FEE
return fee
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None, change_addr=None):
# check outputs
for type, data, value in outputs:
if type == 'address':
assert is_address(data), "Address " + data + " is invalid!"
fee_per_kb = self.fee_per_kb(config)
amount = sum(map(lambda x:x[2], outputs))
total = 0
inputs = []
tx = Transaction.from_io(inputs, outputs)
fee = fixed_fee if fixed_fee is not None else 0
# add inputs, sorted by age
for item in coins:
v = item.get('value')
total += v
self.add_input_info(item)
tx.add_input(item)
# no need to estimate fee until we have reached desired amount
if total < amount + fee:
continue
fee = fixed_fee if fixed_fee is not None else self.estimated_fee(tx, fee_per_kb)
if total >= amount + fee:
break
else:
raise NotEnoughFunds()
# remove unneeded inputs.
removed = False
for item in sorted(tx.inputs, key=itemgetter('value')):
v = item.get('value')
if total - v >= amount + fee:
tx.inputs.remove(item)
total -= v
removed = True
continue
else:
break
if removed:
fee = fixed_fee if fixed_fee is not None else self.estimated_fee(tx, fee_per_kb)
for item in sorted(tx.inputs, key=itemgetter('value')):
v = item.get('value')
if total - v >= amount + fee:
tx.inputs.remove(item)
total -= v
fee = fixed_fee if fixed_fee is not None else self.estimated_fee(tx, fee_per_kb)
continue
break
self.print_error("using %d inputs"%len(tx.inputs))
# change address
if not change_addr:
# send change to one of the accounts involved in the tx
address = inputs[0].get('address')
account, _ = self.get_address_index(address)
if self.use_change and self.accounts[account].has_change():
# New change addresses are created only after a few confirmations.
# Choose an unused change address if any, otherwise take one at random
change_addrs = self.accounts[account].get_addresses(1)[-self.gap_limit_for_change:]
for change_addr in change_addrs:
if self.get_num_tx(change_addr) == 0:
break
else:
change_addr = random.choice(change_addrs)
else:
change_addr = address
# if change is above dust threshold, add a change output.
change_amount = total - ( amount + fee )
if fixed_fee is not None and change_amount > 0:
tx.outputs.append(('address', change_addr, change_amount))
elif change_amount > DUST_THRESHOLD:
tx.outputs.append(('address', change_addr, change_amount))
# recompute fee including change output
fee = self.estimated_fee(tx, fee_per_kb)
# remove change output
tx.outputs.pop()
# if change is still above dust threshold, re-add change output.
change_amount = total - ( amount + fee )
if change_amount > DUST_THRESHOLD:
tx.outputs.append(('address', change_addr, change_amount))
self.print_error('change', change_amount)
else:
self.print_error('not keeping dust', change_amount)
else:
self.print_error('not keeping dust', change_amount)
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None):
coins = self.get_spendable_coins(domain)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr)
self.sign_transaction(tx, password)
return tx
def add_input_info(self, txin):
address = txin['address']
account_id, sequence = self.get_address_index(address)
account = self.accounts[account_id]
redeemScript = account.redeem_script(*sequence)
pubkeys = account.get_pubkeys(*sequence)
x_pubkeys = account.get_xpubkeys(*sequence)
# sort pubkeys and x_pubkeys, using the order of pubkeys
pubkeys, x_pubkeys = zip( *sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = list(pubkeys)
txin['x_pubkeys'] = list(x_pubkeys)
txin['signatures'] = [None] * len(pubkeys)
if redeemScript:
txin['redeemScript'] = redeemScript
txin['num_sig'] = account.m
else:
txin['redeemPubkey'] = account.get_pubkey(*sequence)
txin['num_sig'] = 1
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
# Raise if password is not correct.
self.check_password(password)
# Add derivation for utxo in wallets
for i, addr in self.utxo_can_sign(tx):
txin = tx.inputs[i]
txin['address'] = addr
self.add_input_info(txin)
# Add private keys
keypairs = {}
for x in self.xkeys_can_sign(tx):
sec = self.get_private_key_from_xpubkey(x, password)
if sec:
keypairs[x] = sec
# Sign
if keypairs:
tx.sign(keypairs)
# Run hook, and raise if error
tx.error = None
run_hook('sign_transaction', self, tx, password)
if tx.error:
raise BaseException(tx.error)
def sendtx(self, tx):
# synchronous
h = self.send_tx(tx)
self.tx_event.wait()
return self.receive_tx(h, tx)
def send_tx(self, tx):
# asynchronous
self.tx_event.clear()
self.network.send([('blockchain.transaction.broadcast', [str(tx)])], self.on_broadcast)
return tx.hash()
def on_broadcast(self, r):
self.tx_result = r.get('result')
self.tx_event.set()
def receive_tx(self, tx_hash, tx):
out = self.tx_result
if out != tx_hash:
return False, "error: " + out
run_hook('receive_tx', tx, self)
return True, out
def update_password(self, old_password, new_password):
if new_password == '':
new_password = None
if self.has_seed():
decoded = self.get_seed(old_password)
self.seed = pw_encode( decoded, new_password)
self.storage.put('seed', self.seed, True)
imported_account = self.accounts.get(IMPORTED_ACCOUNT)
if imported_account:
imported_account.update_password(old_password, new_password)
self.save_accounts()
if hasattr(self, 'master_private_keys'):
for k, v in self.master_private_keys.items():
b = pw_decode(v, old_password)
c = pw_encode(b, new_password)
self.master_private_keys[k] = c
self.storage.put('master_private_keys', self.master_private_keys, True)
self.use_encryption = (new_password != None)
self.storage.put('use_encryption', self.use_encryption,True)
def is_frozen(self, addr):
return addr in self.frozen_addresses
def set_frozen_state(self, addrs, freeze):
'''Set frozen state of the addresses to FREEZE, True or False'''
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
self.storage.put('frozen_addresses', list(self.frozen_addresses), True)
return True
return False
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self.history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx (tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = self.verified_tx.keys() + self.unverified_tx.keys()
for tx_hash in self.transactions.keys():
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
from verifier import SPV
self.network = network
if self.network is not None:
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
network.add_jobs([self.verifier, self.synchronizer])
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
self.network.remove_jobs([self.synchronizer, self.verifier])
self.synchronizer = None
self.verifier = None
self.storage.put('stored_height', self.get_local_height(), True)
def restore(self, cb):
pass
def get_accounts(self):
return self.accounts
def add_account(self, account_id, account):
self.accounts[account_id] = account
self.save_accounts()
def save_accounts(self):
d = {}
for k, v in self.accounts.items():
d[k] = v.dump()
self.storage.put('accounts', d, True)
def can_import(self):
return not self.is_watching_only()
def can_export(self):
return not self.is_watching_only()
def is_used(self, address):
h = self.history.get(address,[])
c, u, x = self.get_addr_balance(address)
return len(h) > 0 and c + u + x == 0
def is_empty(self, address):
c, u, x = self.get_addr_balance(address)
return c+u+x == 0
def address_is_old(self, address, age_limit=2):
age = -1
h = self.history.get(address, [])
for tx_hash, tx_height in h:
if tx_height == 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
if tx_age > age:
age = tx_age
return age > age_limit
def can_sign(self, tx):
if self.is_watching_only():
return False
if tx.is_complete():
return False
if self.xkeys_can_sign(tx):
return True
if self.utxo_can_sign(tx):
return True
return False
def utxo_can_sign(self, tx):
out = set()
coins = self.get_spendable_coins()
for i in tx.inputs_without_script():
txin = tx.inputs[i]
for item in coins:
if txin.get('prevout_hash') == item.get('prevout_hash') and txin.get('prevout_n') == item.get('prevout_n'):
out.add((i, item.get('address')))
return out
def xkeys_can_sign(self, tx):
out = set()
for x in tx.inputs_to_sign():
if self.can_sign_xpubkey(x):
out.add(x)
return out
def get_private_key_from_xpubkey(self, x_pubkey, password):
if x_pubkey[0:2] in ['02','03','04']:
addr = bitcoin.public_key_to_bc_address(x_pubkey.decode('hex'))
if self.is_mine(addr):
return self.get_private_key(addr, password)[0]
elif x_pubkey[0:2] == 'ff':
xpub, sequence = BIP32_Account.parse_xpubkey(x_pubkey)
for k, v in self.master_public_keys.items():
if v == xpub:
xprv = self.get_master_private_key(k, password)
if xprv:
_, _, _, c, k = deserialize_xkey(xprv)
return bip32_private_key(sequence, k, c)
elif x_pubkey[0:2] == 'fe':
xpub, sequence = OldAccount.parse_xpubkey(x_pubkey)
for k, account in self.accounts.items():
if xpub in account.get_master_pubkeys():
pk = account.get_private_key(sequence, self, password)
return pk[0]
elif x_pubkey[0:2] == 'fd':
addrtype = ord(x_pubkey[2:4].decode('hex'))
addr = hash_160_to_bc_address(x_pubkey[4:].decode('hex'), addrtype)
if self.is_mine(addr):
return self.get_private_key(addr, password)[0]
else:
raise BaseException("z")
def can_sign_xpubkey(self, x_pubkey):
if x_pubkey[0:2] in ['02','03','04']:
addr = bitcoin.public_key_to_bc_address(x_pubkey.decode('hex'))
return self.is_mine(addr)
elif x_pubkey[0:2] == 'ff':
if not isinstance(self, BIP32_Wallet): return False
xpub, sequence = BIP32_Account.parse_xpubkey(x_pubkey)
return xpub in [ self.master_public_keys[k] for k in self.master_private_keys.keys() ]
elif x_pubkey[0:2] == 'fe':
if not isinstance(self, OldWallet): return False
xpub, sequence = OldAccount.parse_xpubkey(x_pubkey)
return xpub == self.get_master_public_key()
elif x_pubkey[0:2] == 'fd':
addrtype = ord(x_pubkey[2:4].decode('hex'))
addr = hash_160_to_bc_address(x_pubkey[4:].decode('hex'), addrtype)
return self.is_mine(addr)
else:
raise BaseException("z")
def is_watching_only(self):
False
def can_change_password(self):
return not self.is_watching_only()
def get_unused_address(self, account):
# fixme: use slots from expired requests
domain = self.get_account_addresses(account, include_change=False)
for addr in domain:
if not self.history.get(addr) and addr not in self.receive_requests.keys():
return addr
def get_payment_request(self, addr, config):
import util
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
out['URI'] = 'vcoin:' + addr + '?amount=' + util.format_satoshis(out.get('amount'))
out['status'] = self.get_request_status(addr)
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr)
path = os.path.join(rdir, key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, key)
out['URI'] += '&r=' + out['request_url']
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
return out
def get_request_status(self, key):
from paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
r = self.receive_requests[key]
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
if amount:
if self.up_to_date:
paid = amount <= self.get_addr_received(address)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status
def make_payment_request(self, addr, amount, message, expiration):
timestamp = int(time.time())
_id = Hash(addr + "%d"%timestamp).encode('hex')[0:10]
r = {'time':timestamp, 'amount':amount, 'exp':expiration, 'address':addr, 'memo':message, 'id':_id}
return r
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.get_private_key(alias_addr, password)[0]
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = pr.pki_data
req['sig'] = pr.signature.encode('hex')
self.receive_requests[key] = req
self.storage.put('payment_requests', self.receive_requests)
def add_payment_request(self, req, config):
import os
addr = req['address']
amount = req.get('amount')
message = req.get('memo')
self.receive_requests[addr] = req
self.storage.put('payment_requests', self.receive_requests)
self.set_label(addr, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, key)
with open(path, 'w') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
with open(os.path.join(rdir, key + '.json'), 'w') as f:
f.write(json.dumps(req))
return req
def remove_payment_request(self, addr, config):
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr)
for s in ['.json', '']:
n = os.path.join(rdir, key + s)
if os.path.exists(n):
os.unlink(n)
self.storage.put('payment_requests', self.receive_requests)
return True
def get_sorted_requests(self, config):
return sorted(map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys()), key=lambda x: x.get('time', 0))
class Imported_Wallet(Abstract_Wallet):
wallet_type = 'imported'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
a = self.accounts.get(IMPORTED_ACCOUNT)
if not a:
self.accounts[IMPORTED_ACCOUNT] = ImportedAccount({'imported':{}})
def is_watching_only(self):
acc = self.accounts[IMPORTED_ACCOUNT]
n = acc.keypairs.values()
return len(n) > 0 and n == [[None, None]] * len(n)
def has_seed(self):
return False
def is_deterministic(self):
return False
def check_password(self, password):
self.accounts[IMPORTED_ACCOUNT].get_private_key((0,0), self, password)
def is_used(self, address):
return False
def get_master_public_keys(self):
return {}
def is_beyond_limit(self, address, account, is_change):
return False
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
def has_seed(self):
return self.seed != ''
def is_deterministic(self):
return True
def is_watching_only(self):
return not self.has_seed()
def add_seed(self, seed, password):
if self.seed:
raise Exception("a seed exists")
self.seed_version, self.seed = self.format_seed(seed)
if password:
self.seed = pw_encode( self.seed, password)
self.use_encryption = True
else:
self.use_encryption = False
self.storage.put('seed', self.seed, False)
self.storage.put('seed_version', self.seed_version, False)
self.storage.put('use_encryption', self.use_encryption,True)
def get_seed(self, password):
return pw_decode(self.seed, password)
def get_mnemonic(self, password):
return self.get_seed(password)
def change_gap_limit(self, value):
assert isinstance(value, int), 'gap limit must be of type int, not of %s'%type(value)
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit, True)
return True
elif value >= self.min_acceptable_gap():
for key, account in self.accounts.items():
addresses = account.get_addresses(False)
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
account.receiving_pubkeys = account.receiving_pubkeys[0:n]
account.receiving_addresses = account.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit, True)
self.save_accounts()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for a in addresses[::-1]:
if self.history.get(a):break
k = k + 1
return k
def min_acceptable_gap(self):
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
for account in self.accounts.values():
addresses = account.get_addresses(0)
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if self.history.get(a):
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def default_account(self):
return self.accounts['0']
def create_new_address(self, account=None, for_change=0):
if account is None:
account = self.default_account()
address = account.create_new_address(for_change)
self.add_address(address)
return address
def add_address(self, address):
if address not in self.history:
self.history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.save_accounts()
def synchronize(self):
with self.lock:
for account in self.accounts.values():
account.synchronize(self)
def restore(self, callback):
from i18n import _
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
apply(callback, (msg,))
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
msg = "%s \n" % (_("Connecting..."))
apply(callback, (msg,))
time.sleep(0.1)
# wait until we are connected, because the user might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def is_beyond_limit(self, address, account, is_change):
if type(account) == ImportedAccount:
return False
addr_list = account.get_addresses(is_change)
i = addr_list.index(address)
prev_addresses = addr_list[:max(0, i)]
limit = self.gap_limit_for_change if is_change else self.gap_limit
if len(prev_addresses) < limit:
return False
prev_addresses = prev_addresses[max(0, i - limit):]
for addr in prev_addresses:
if self.history.get(addr):
return False
return True
def get_action(self):
if not self.get_master_public_key():
return 'create_seed'
if not self.accounts:
return 'create_accounts'
def get_master_public_keys(self):
out = {}
for k, account in self.accounts.items():
if type(account) == ImportedAccount:
continue
name = self.get_account_name(k)
mpk_text = '\n\n'.join(account.get_master_pubkeys())
out[name] = mpk_text
return out
class BIP32_Wallet(Deterministic_Wallet):
# abstract class, bip32 logic
root_name = 'x/'
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
self.master_public_keys = storage.get('master_public_keys', {})
self.master_private_keys = storage.get('master_private_keys', {})
self.gap_limit = storage.get('gap_limit', 20)
def is_watching_only(self):
return not bool(self.master_private_keys)
def can_import(self):
return False
def get_master_public_key(self):
return self.master_public_keys.get(self.root_name)
def get_master_private_key(self, account, password):
k = self.master_private_keys.get(account)
if not k: return
xprv = pw_decode(k, password)
try:
deserialize_xkey(xprv)
except:
raise InvalidPassword()
return xprv
def check_password(self, password):
xpriv = self.get_master_private_key(self.root_name, password)
xpub = self.master_public_keys[self.root_name]
if deserialize_xkey(xpriv)[3] != deserialize_xkey(xpub)[3]:
raise InvalidPassword()
def add_master_public_key(self, name, xpub):
if xpub in self.master_public_keys.values():
raise BaseException('Duplicate master public key')
self.master_public_keys[name] = xpub
self.storage.put('master_public_keys', self.master_public_keys, True)
def add_master_private_key(self, name, xpriv, password):
self.master_private_keys[name] = pw_encode(xpriv, password)
self.storage.put('master_private_keys', self.master_private_keys, True)
def derive_xkeys(self, root, derivation, password):
x = self.master_private_keys[root]
root_xprv = pw_decode(x, password)
xprv, xpub = bip32_private_derivation(root_xprv, root, derivation)
return xpub, xprv
def create_master_keys(self, password):
seed = self.get_seed(password)
self.add_cosigner_seed(seed, self.root_name, password)
def add_cosigner_seed(self, seed, name, password, passphrase=''):
# we don't store the seed, only the master xpriv
xprv, xpub = bip32_root(self.mnemonic_to_seed(seed, passphrase))
xprv, xpub = bip32_private_derivation(xprv, "m/", self.root_derivation)
self.add_master_public_key(name, xpub)
self.add_master_private_key(name, xprv, password)
def add_cosigner_xpub(self, seed, name):
# store only master xpub
xprv, xpub = bip32_root(self.mnemonic_to_seed(seed,''))
xprv, xpub = bip32_private_derivation(xprv, "m/", self.root_derivation)
self.add_master_public_key(name, xpub)
def mnemonic_to_seed(self, seed, password):
return Mnemonic.mnemonic_to_seed(seed, password)
def make_seed(self, lang=None):
return Mnemonic(lang).make_seed()
def format_seed(self, seed):
return NEW_SEED_VERSION, ' '.join(seed.split())
class BIP32_Simple_Wallet(BIP32_Wallet):
# Wallet with a single BIP32 account, no seed
# gap limit 20
wallet_type = 'xpub'
def create_xprv_wallet(self, xprv, password):
xpub = bitcoin.xpub_from_xprv(xprv)
account = BIP32_Account({'xpub':xpub})
self.storage.put('seed_version', self.seed_version, True)
self.add_master_private_key(self.root_name, xprv, password)
self.add_master_public_key(self.root_name, xpub)
self.add_account('0', account)
self.use_encryption = (password != None)
self.storage.put('use_encryption', self.use_encryption,True)
def create_xpub_wallet(self, xpub):
account = BIP32_Account({'xpub':xpub})
self.storage.put('seed_version', self.seed_version, True)
self.add_master_public_key(self.root_name, xpub)
self.add_account('0', account)
class BIP32_HD_Wallet(BIP32_Wallet):
# wallet that can create accounts
def __init__(self, storage):
self.next_account = storage.get('next_account2', None)
BIP32_Wallet.__init__(self, storage)
def can_create_accounts(self):
return self.root_name in self.master_private_keys.keys()
def addresses(self, b=True):
l = BIP32_Wallet.addresses(self, b)
if self.next_account:
_, _, _, next_address = self.next_account
if next_address not in l:
l.append(next_address)
return l
def get_address_index(self, address):
if self.next_account:
next_id, next_xpub, next_pubkey, next_address = self.next_account
if address == next_address:
return next_id, (0,0)
return BIP32_Wallet.get_address_index(self, address)
def num_accounts(self):
keys = []
for k, v in self.accounts.items():
if type(v) != BIP32_Account:
continue
keys.append(k)
i = 0
while True:
account_id = '%d'%i
if account_id not in keys:
break
i += 1
return i
def get_next_account(self, password):
account_id = '%d'%self.num_accounts()
derivation = self.root_name + "%d'"%int(account_id)
xpub, xprv = self.derive_xkeys(self.root_name, derivation, password)
self.add_master_public_key(derivation, xpub)
if xprv:
self.add_master_private_key(derivation, xprv, password)
account = BIP32_Account({'xpub':xpub})
addr, pubkey = account.first_address()
self.add_address(addr)
return account_id, xpub, pubkey, addr
def create_main_account(self, password):
# First check the password is valid (this raises if it isn't).
self.check_password(password)
assert self.num_accounts() == 0
self.create_account('Main account', password)
def create_account(self, name, password):
account_id, xpub, _, _ = self.get_next_account(password)
account = BIP32_Account({'xpub':xpub})
self.add_account(account_id, account)
self.set_label(account_id, name)
# add address of the next account
self.next_account = self.get_next_account(password)
self.storage.put('next_account2', self.next_account)
def account_is_pending(self, k):
return type(self.accounts.get(k)) == PendingAccount
def delete_pending_account(self, k):
assert type(self.accounts.get(k)) == PendingAccount
self.accounts.pop(k)
self.save_accounts()
def create_pending_account(self, name, password):
if self.next_account is None:
self.next_account = self.get_next_account(password)
self.storage.put('next_account2', self.next_account)
next_id, next_xpub, next_pubkey, next_address = self.next_account
if name:
self.set_label(next_id, name)
self.accounts[next_id] = PendingAccount({'pending':True, 'address':next_address, 'pubkey':next_pubkey})
self.save_accounts()
def synchronize(self):
# synchronize existing accounts
BIP32_Wallet.synchronize(self)
if self.next_account is None and not self.use_encryption:
try:
self.next_account = self.get_next_account(None)
self.storage.put('next_account2', self.next_account)
except:
self.print_error('cannot get next account')
# check pending account
if self.next_account is not None:
next_id, next_xpub, next_pubkey, next_address = self.next_account
if self.address_is_old(next_address):
self.print_error("creating account", next_id)
self.add_account(next_id, BIP32_Account({'xpub':next_xpub}))
# here the user should get a notification
self.next_account = None
self.storage.put('next_account2', self.next_account)
elif self.history.get(next_address, []):
if next_id not in self.accounts:
self.print_error("create pending account", next_id)
self.accounts[next_id] = PendingAccount({'pending':True, 'address':next_address, 'pubkey':next_pubkey})
self.save_accounts()
class NewWallet(BIP32_Wallet, Mnemonic):
# Standard wallet
root_derivation = "m/"
wallet_type = 'standard'
def create_main_account(self, password):
xpub = self.master_public_keys.get("x/")
account = BIP32_Account({'xpub':xpub})
self.add_account('0', account)
class Multisig_Wallet(BIP32_Wallet, Mnemonic):
# generic m of n
root_name = "x1/"
root_derivation = "m/"
def __init__(self, storage):
BIP32_Wallet.__init__(self, storage)
self.wallet_type = storage.get('wallet_type')
m = re.match('(\d+)of(\d+)', self.wallet_type)
self.m = int(m.group(1))
self.n = int(m.group(2))
def load_accounts(self):
self.accounts = {}
d = self.storage.get('accounts', {})
v = d.get('0')
if v:
if v.get('xpub3'):
v['xpubs'] = [v['xpub'], v['xpub2'], v['xpub3']]
elif v.get('xpub2'):
v['xpubs'] = [v['xpub'], v['xpub2']]
self.accounts = {'0': Multisig_Account(v)}
def create_main_account(self, password):
account = Multisig_Account({'xpubs': self.master_public_keys.values(), 'm': self.m})
self.add_account('0', account)
def get_master_public_keys(self):
return self.master_public_keys
def get_action(self):
for i in range(self.n):
if self.master_public_keys.get("x%d/"%(i+1)) is None:
return 'create_seed' if i == 0 else 'add_cosigners'
if not self.accounts:
return 'create_accounts'
class OldWallet(Deterministic_Wallet):
wallet_type = 'old'
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 5)
def make_seed(self):
import old_mnemonic
seed = random_seed(128)
return ' '.join(old_mnemonic.mn_encode(seed))
def format_seed(self, seed):
import old_mnemonic
# see if seed was entered as hex
seed = seed.strip()
try:
assert seed
seed.decode('hex')
return OLD_SEED_VERSION, str(seed)
except Exception:
pass
words = seed.split()
seed = old_mnemonic.mn_decode(words)
if not seed:
raise Exception("Invalid seed")
return OLD_SEED_VERSION, seed
def create_master_keys(self, password):
seed = self.get_seed(password)
mpk = OldAccount.mpk_from_seed(seed)
self.storage.put('master_public_key', mpk, True)
def get_master_public_key(self):
return self.storage.get("master_public_key")
def get_master_public_keys(self):
return {'Main Account':self.get_master_public_key()}
def create_main_account(self, password):
mpk = self.storage.get("master_public_key")
self.create_account(mpk)
def create_account(self, mpk):
self.accounts['0'] = OldAccount({'mpk':mpk, 0:[], 1:[]})
self.save_accounts()
def create_watching_only_wallet(self, mpk):
self.seed_version = OLD_SEED_VERSION
self.storage.put('seed_version', self.seed_version, False)
self.storage.put('master_public_key', mpk, True)
self.create_account(mpk)
def get_seed(self, password):
seed = pw_decode(self.seed, password).encode('utf8')
return seed
def check_password(self, password):
seed = self.get_seed(password)
self.accounts['0'].check_seed(seed)
def get_mnemonic(self, password):
import old_mnemonic
s = self.get_seed(password)
return ' '.join(old_mnemonic.mn_encode(s))
wallet_types = [
# category type description constructor
('standard', 'old', ("Old wallet"), OldWallet),
('standard', 'xpub', ("BIP32 Import"), BIP32_Simple_Wallet),
('standard', 'standard', ("Standard wallet"), NewWallet),
('standard', 'imported', ("Imported wallet"), Imported_Wallet),
('multisig', '2of2', ("Multisig wallet (2 of 2)"), Multisig_Wallet),
('multisig', '2of3', ("Multisig wallet (2 of 3)"), Multisig_Wallet)
]
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
seed_version = storage.get('seed_version')
if not seed_version:
seed_version = OLD_SEED_VERSION if len(storage.get('master_public_key','')) == 128 else NEW_SEED_VERSION
if seed_version not in [OLD_SEED_VERSION, NEW_SEED_VERSION]:
msg = "Your wallet has an unsupported seed version."
msg += '\n\nWallet file: %s' % os.path.abspath(storage.path)
if seed_version in [5, 7, 8, 9, 10]:
msg += "\n\nTo open this wallet, try 'git checkout seed_v%d'"%seed_version
if seed_version == 6:
# version 1.9.8 created v6 wallets when an incorrect seed was entered in the restore dialog
msg += '\n\nThis file was created because of a bug in version 1.9.8.'
if storage.get('master_public_keys') is None and storage.get('master_private_keys') is None and storage.get('imported_keys') is None:
# pbkdf2 was not included with the binaries, and wallet creation aborted.
msg += "\nIt does not contain any keys, and can safely be removed."
else:
# creation was complete if electrum was run from source
msg += "\nPlease open this file with Electrum 1.9.8, and move your coins to a new wallet."
raise BaseException(msg)
wallet_type = storage.get('wallet_type')
if wallet_type:
for cat, t, name, loader in wallet_types:
if t == wallet_type:
if cat in ['hardware', 'twofactor']:
WalletClass = lambda storage: apply(loader().constructor, (storage,))
else:
WalletClass = loader
break
else:
if re.match('(\d+)of(\d+)', wallet_type):
WalletClass = Multisig_Wallet
else:
raise BaseException('unknown wallet type', wallet_type)
else:
if seed_version == OLD_SEED_VERSION:
WalletClass = OldWallet
else:
WalletClass = NewWallet
return WalletClass(storage)
@classmethod
def is_seed(self, seed):
if not seed:
return False
elif is_old_seed(seed):
return True
elif is_new_seed(seed):
return True
else:
return False
@classmethod
def is_old_mpk(self, mpk):
try:
int(mpk, 16)
assert len(mpk) == 128
return True
except:
return False
@classmethod
def is_xpub(self, text):
try:
assert text[0:4] == 'xpub'
deserialize_xkey(text)
return True
except:
return False
@classmethod
def is_xprv(self, text):
try:
assert text[0:4] == 'xprv'
deserialize_xkey(text)
return True
except:
return False
@classmethod
def is_address(self, text):
if not text:
return False
for x in text.split():
if not bitcoin.is_address(x):
return False
return True
@classmethod
def is_private_key(self, text):
if not text:
return False
for x in text.split():
if not bitcoin.is_private_key(x):
return False
return True
@classmethod
def from_seed(self, seed, password, storage):
if is_old_seed(seed):
klass = OldWallet
elif is_new_seed(seed):
klass = NewWallet
w = klass(storage)
w.add_seed(seed, password)
w.create_master_keys(password)
w.create_main_account(password)
return w
@classmethod
def from_address(self, text, storage):
w = Imported_Wallet(storage)
for x in text.split():
w.accounts[IMPORTED_ACCOUNT].add(x, None, None, None)
w.save_accounts()
return w
@classmethod
def from_private_key(self, text, password, storage):
w = Imported_Wallet(storage)
w.update_password(None, password)
for x in text.split():
w.import_key(x, password)
return w
@classmethod
def from_old_mpk(self, mpk, storage):
w = OldWallet(storage)
w.seed = ''
w.create_watching_only_wallet(mpk)
return w
@classmethod
def from_xpub(self, xpub, storage):
w = BIP32_Simple_Wallet(storage)
w.create_xpub_wallet(xpub)
return w
@classmethod
def from_xprv(self, xprv, password, storage):
w = BIP32_Simple_Wallet(storage)
w.create_xprv_wallet(xprv, password)
return w
@classmethod
def from_multisig(klass, key_list, password, storage, wallet_type):
storage.put('wallet_type', wallet_type, True)
self = Multisig_Wallet(storage)
key_list = sorted(key_list, key = lambda x: klass.is_xpub(x))
for i, text in enumerate(key_list):
assert klass.is_seed(text) or klass.is_xprv(text) or klass.is_xpub(text)
name = "x%d/"%(i+1)
if klass.is_xprv(text):
xpub = bitcoin.xpub_from_xprv(text)
self.add_master_public_key(name, xpub)
self.add_master_private_key(name, text, password)
elif klass.is_xpub(text):
self.add_master_public_key(name, text)
elif klass.is_seed(text):
if name == 'x1/':
self.add_seed(text, password)
self.create_master_keys(password)
else:
self.add_cosigner_seed(text, name, password)
self.use_encryption = (password != None)
self.storage.put('use_encryption', self.use_encryption, True)
self.create_main_account(password)
return self
| gpl-3.0 | 9,193,974,612,486,709,000 | 35.142445 | 149 | 0.551622 | false |
python-technopark/MoneyMoney | src/main.py | 1 | 9184 | from mmhandler import MmHandler
import telebot
import argparse
import re
import logging
import yaml
import telebot
import os
bot = telebot.TeleBot('280771706:AAG2jJxVekewCG_aTgcr2WQ3S6CcS7EZ_cg')
from mmhandler import MmHandler
TOKEN = ''
bot = telebot.TeleBot(TOKEN)
handler = MmHandler(0) # по умолчанию user_id = 0
help_file = open('help.txt', 'r')
help_message = help_file.read()
help_file.close()
report_periods = {'день', 'неделю', 'месяц', 'год'}
category_mods = {'расходов', 'доходов'}
format_error = Exception('Неправильный формат команды!')
@bot.message_handler(commands=['start'])
def start(message):
handler.user_id = message.chat.id
handler_message = handler.start()
bot.send_message(message.chat.id, handler_message)
@bot.message_handler(commands=['help'])
def help(message):
handler.user_id = message.chat.id
bot.send_message(message.chat.id, 'Привет! Список моих команд:')
bot.send_message(message.chat.id, help_message)
@bot.message_handler(content_types=["text"])
def parse(message):
handler.user_id = message.chat.id
try:
str_array = message.text.lower().split()
length = len(str_array)
# if empty line
if length == 0:
bot.send_message(message.chat.id, 'Забыл список команд? Держи:')
bot.send_message(message.chat.id, help_message)
elif length == 2 and (str_array[0] == "удалить" or str_array[0] == "удали") and str_array[1] == "другое":
str_array[1]="other"
handler_message = handler.del_category(str_array[1])
bot.send_message(message.chat.id, handler_message)
# if format +/-....
elif str_array[0][0] == '+' or str_array[0][0] == '-':
if length == 1:
if re.match('[+]\d+', str_array[0]) or re.match('[-]\d+', str_array[0]):
handler_message = handler.add_operation(int(str_array[0]))
bot.send_message(message.chat.id, handler_message)
else:
raise format_error
elif length >= 2 and re.match('[а-яa-zА-ЯA-Z]+', str_array[1]):
if re.match('[+]\d+', str_array[0]) or re.match('[-]\d+', str_array[0]):
if length >= 3:
description_buf = ' '.join(str_array[2:length])
handler_message = handler.add_operation(int(str_array[0]), str_array[1], description_buf)
else:
handler_message = handler.add_operation(int(str_array[0]), str_array[1])
bot.send_message(message.chat.id, handler_message)
else:
raise format_error
elif length >= 2 and (str_array[0] == 'показать' or str_array[0] == "покажи") and str_array[1] == 'категории':
if length == 3:
if str_array[2] in category_mods:
handler_message = handler.show_categories(str_array[2])
bot.send_message(message.chat.id, handler_message)
else:
raise Exception('Неправильный формат команды!')
else:
handler_message = handler.show_categories()
bot.send_message(message.chat.id, handler_message)
elif length == 3 and str_array[1] == 'категорию' and re.match('[а-яa-zA-ZА-Я]+', str_array[2]):
if str_array[0] == 'удалить' or str_array[0] == "удали":
if str_array[2] == "другое":
bot.send_message(message.chat.id, "Для того чтобы удалить категорию другое и все операции связанные с ней, введите команду: удалить другое")
return
handler_message = handler.del_category(str_array[2])
bot.send_message(message.chat.id, handler_message)
elif str_array[0] == 'добавить' or str_array[0] == 'добавь':
handler_message = handler.add_category(str_array[2])
bot.send_message(message.chat.id, handler_message)
else:
raise format_error
elif str_array[0] == 'отчет':
if length >= 3:
if str_array[1] == 'за' and (str_array[2] in report_periods):
handler_message = handler.view_report(str_array[2])
bot.send_message(message.chat.id, handler_message)
bot.send_chat_action(message.chat.id, 'typing')
image_file = open('tmp/income' + str(handler.user_id) + '.png', 'rb')
bot.send_photo(message.chat.id, image_file)
os.remove('tmp/income' + str(handler.user_id) + '.png')
image_file = open('tmp/expense' + str(handler.user_id) + '.png', 'rb')
bot.send_photo(message.chat.id, image_file)
os.remove('tmp/expense' + str(handler.user_id) + '.png')
elif str_array[1] == 'с' and re.match('\d{1,2}-\d{1,2}-\d{4}', str_array[2]):
date_from_split_reverse = str_array[2].split('-')[::-1]
date_from = '-'.join(date_from_split_reverse)
if length >= 4 and str_array[3] == 'по' and re.match('\d{1,2}-\d{1,2}-\d{4}', str_array[4]):
date_to_split_reverse = str_array[4].split('-')[::-1]
date_to = '-'.join(date_to_split_reverse)
handler_message = handler.view_custom_report(date_from, date_to)
else:
handler_message = handler.view_custom_report(date_from)
bot.send_message(message.chat.id, handler_message)
else:
raise format_error
elif length == 1:
keyboard = telebot.types.InlineKeyboardMarkup()
button_month = telebot.types.InlineKeyboardButton(text="месяц", callback_data="месяц")
button_day = telebot.types.InlineKeyboardButton(text="день", callback_data="день")
button_week = telebot.types.InlineKeyboardButton(text="неделя", callback_data="неделю")
button_year = telebot.types.InlineKeyboardButton(text="год", callback_data="год")
keyboard.add(button_day)
keyboard.add(button_week)
keyboard.add(button_month)
keyboard.add(button_year)
bot.send_message(message.chat.id, "Выбери период", reply_markup=keyboard)
else:
raise format_error
else:
bot.send_message(message.chat.id, 'Не знаю такой команды! Список моих команд:')
bot.send_message(message.chat.id, help_message)
except Exception as e:
handler_message = 'Ошибка: {} '.format(e)
bot.send_message(message.chat.id, handler_message)
# handler for inline-keyboard
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
if call.message:
if call.data in report_periods:
handler.user_id = call.message.chat.id
handler_message = handler.view_report(call.data)
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,
text=handler_message)
bot.send_chat_action(call.message.chat.id, 'typing')
image_file = open('tmp/income' + str(handler.user_id) + '.png', 'rb')
bot.send_photo(call.message.chat.id, image_file)
os.remove('tmp/income' + str(handler.user_id) + '.png')
image_file = open('tmp/expense' + str(handler.user_id) + '.png', 'rb')
bot.send_photo(call.message.chat.id, image_file)
os.remove('tmp/expense' + str(handler.user_id) + '.png')
# бесконечная петля опроса
if __name__ == '__main__':
token = ""
parser = argparse.ArgumentParser(description='Process some flags.')
# parser.add_argument('-o', '--output')
# parser.add_argument('-v', dest='verbose', action='store_true')
group = parser.add_mutually_exclusive_group()
group.add_argument('--develop', help='Develop dirs', action="store_true")
group.add_argument('--production', help='Production dirs', action="store_true")
args = parser.parse_args()
if args.develop:
yaml_config = open('../config/config.yaml', 'r')
elif args.prod:
yaml_config = open('/etc/moneymoney.d/config.yaml', 'r')
else:
ArgumentParser.error("You should specify either --develop or --production option!")
config = yaml.load(yaml_config)
print(config)
TOKEN = config['token']
print(TOKEN)
logger = telebot.logger
telebot.logger.setLevel(logging.DEBUG)
bot.token = TOKEN
bot.polling(none_stop=True)
| mit | -8,096,265,834,219,133,000 | 43.642857 | 160 | 0.5728 | false |
gizela/gizela | gizela/stat/PointLocalGamaDisplTest.py | 1 | 3147 | # gizela
#
# Copyright (C) 2010 Michal Seidl, Tomas Kubin
# Author: Tomas Kubin <[email protected]>
# URL: <http://slon.fsv.cvut.cz/gizela>
#
# $Id$
from gizela.data.PointLocalGama import PointLocalGama
from gizela.stat.PointDisplBase import PointDisplBase
from gizela.stat.TestResult import TestResult
from gizela.data.GamaCoordStatus import GamaCoordStatus
from gizela.stat.DisplacementTestType import DisplacementTestType
from gizela.stat.displ_test_text_table import displ_test_text_table
class PointLocalGamaDisplTest(PointLocalGama, PointDisplBase, TestResult):
"""
class for gama-local point with displacement and test results
point: x, y, z, status, covmat
displ: dx, dy, dz, dcovmat
test: testStat, testPassed, testPValue, testReliability, testType
"""
def __init__(self, id,
x=None, y=None, z=None,
status=GamaCoordStatus.unused,
covmat=None, index=None,
dx=None, dy=None, dz=None,
dcovmat=None, dindex=None,
testStat=None,
testPassed=None,
testPValue=None,
testReliability=None,
testType=DisplacementTestType.none,
textTable=None,
epochIndex=None):
if isinstance(id, PointLocalGama):
p = id
id = p.id; x = p.x; y = p.y; z = p.z;
status = p.status; covmat = p.covmat; index = p.index
if textTable == None:
textTable = displ_test_text_table()
PointLocalGama.__init__(self, id=id, x=x, y=y, z=z,
status=status, covmat=covmat, index=index)
PointDisplBase.__init__(self, id=id, dx=dx, dy=dy, dz=dz,
covmat=dcovmat, index=dindex)
TestResult.__init__(self, testStat=testStat, testPassed=testPassed,
testPValue=testPValue,
testReliability=testReliability,
testType=testType, textTable=textTable)
self.epochIndex = epochIndex
def make_table_row(self):
row = [self.id, self.x, self.y, self.z]
row.append(self.get_status_string())
row.extend(self.var)
row.extend([self.displ.x, self.displ.y, self.displ.z])
row.extend(self.displ.var)
row.extend([self.testStat, self.testPassed, self.testPValue,
self.testReliability, self.get_test_type_string(),
self.get_test_dim()])
return self.textTable.make_table_row(row)
if __name__ == "__main__":
p0 = PointLocalGama(id="AB", x=1e6, y=2e6, z=3e6)
p = PointLocalGamaDisplTest(id="AB", x=1e6, y=2e6, z=3e6)
p = PointLocalGamaDisplTest(p0)
p.set_test_result(testPValue=0.97, testType=DisplacementTestType.xyz,
testPassed=True)
from gizela.data.PointCartCovMat import PointCartCovMat
p.set_displacement(PointCartCovMat(id=None, x=0.1, y=0.2, z=0.3))
print p
from gizela.data.PointCart import PointCart
print isinstance(p, PointCart)
| gpl-3.0 | -1,544,846,842,559,281,000 | 33.966667 | 75 | 0.60089 | false |
nextgis/nextgisweb_compulink | nextgisweb_compulink/compulink_admin/__init__.py | 1 | 1255 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os import path
from nextgisweb.component import Component, require
from .model import Base, PROJECT_STATUS_PROJECT
from .ident import COMP_ID
from nextgisweb_compulink.init_data.command import DBInit
from .view import get_regions_from_resource, get_districts_from_resource, get_project_statuses
BASE_PATH = path.abspath(path.dirname(__file__))
class CompulinkAdminComponent(Component):
identity = COMP_ID
metadata = Base.metadata
@require('lookup_table')
def initialize(self):
pass
@require('lookup_table')
def initialize_db(self):
return
#TODO: fake! Need refactoring
args = self
args.action = 'all'
args.force = False
DBInit.execute(args, make_transaction=False)
@require('lookup_table')
def setup_pyramid(self, config):
from . import view
view.setup_pyramid(self, config)
def client_settings(self, request):
return dict(
regions_dict=get_regions_from_resource(sort=True),
districts_dict=get_districts_from_resource(sort=True),
statuses_dict=get_project_statuses(),
def_status=PROJECT_STATUS_PROJECT
)
| gpl-2.0 | 5,916,443,532,301,515,000 | 27.522727 | 94 | 0.667729 | false |
katembu/world-cup | world_cup/user_management/urls.py | 1 | 1522 | from django.conf.urls import patterns, url
from django.contrib.auth.views import password_reset, password_reset_done, password_reset_confirm, password_reset_complete
urlpatterns = patterns('user_management.views',
url(r'^$', 'index'),
url(r'^accounts/login/$', 'login_user'),
url(r'^accounts/logout/$', 'logout_user'),
url(r'^accounts/create/$', 'create_user'),
url(r'^accounts/password/reset/$', password_reset,
{'post_reset_redirect': '/accounts/password/reset/done/'}, name="password_reset"),
url(r'^accounts/password/reset/done/$', password_reset_done),
url(r'^accounts/password/reset/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$',
password_reset_confirm, {'post_reset_redirect': '/accounts/password/done/'},
name="password_reset_confirm"),
url(r'^accounts/password/done/$', password_reset_complete),
url(r'^messages/$', 'messages'),
url(r'^message/form/$', 'message_form'),
url(r'^message/send/$', 'message_send'),
url(r'^message/delete/$', 'message_delete'),
url(r'^userlist/$', 'user_list'),
url(r'^profile/$', 'user_profile'),
url(r'^unsubscribe/$', 'unsubscribe'),
)
| mit | -1,450,905,086,284,081,200 | 65.173913 | 122 | 0.492773 | false |
techtonik/readthedocs.org | readthedocs/projects/tasks.py | 1 | 31592 | """Tasks related to projects
This includes fetching repository code, cleaning ``conf.py`` files, and
rebuilding documentation.
"""
import os
import shutil
import json
import logging
import socket
import requests
import hashlib
from collections import defaultdict
from celery import task, Task
from djcelery import celery as celery_app
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from readthedocs.builds.constants import (LATEST,
BUILD_STATE_CLONING,
BUILD_STATE_INSTALLING,
BUILD_STATE_BUILDING)
from readthedocs.builds.models import Build, Version
from readthedocs.core.utils import send_email, run_on_app_servers
from readthedocs.cdn.purge import purge
from readthedocs.doc_builder.loader import get_builder_class
from readthedocs.doc_builder.environments import (LocalEnvironment,
DockerEnvironment)
from readthedocs.doc_builder.exceptions import BuildEnvironmentError
from readthedocs.projects.exceptions import ProjectImportError
from readthedocs.projects.models import ImportedFile, Project
from readthedocs.projects.utils import make_api_version, make_api_project, symlink
from readthedocs.projects.constants import LOG_TEMPLATE
from readthedocs.privacy.loader import Syncer
from readthedocs.search.parse_json import process_all_json_files
from readthedocs.search.utils import process_mkdocs_json
from readthedocs.restapi.utils import index_search_request
from readthedocs.vcs_support import utils as vcs_support_utils
from readthedocs.api.client import api as api_v1
from readthedocs.restapi.client import api as api_v2
from readthedocs.projects.signals import before_vcs, after_vcs, before_build, after_build
from readthedocs.core.resolver import resolve_path
log = logging.getLogger(__name__)
HTML_ONLY = getattr(settings, 'HTML_ONLY_PROJECTS', ())
class UpdateDocsTask(Task):
"""
The main entry point for updating documentation.
It handles all of the logic around whether a project is imported or we
created it. Then it will build the html docs and other requested parts.
`pk`
Primary key of the project to update
`record`
Whether or not to keep a record of the update in the database. Useful
for preventing changes visible to the end-user when running commands
from the shell, for example.
"""
max_retries = 5
default_retry_delay = (7 * 60)
name = 'update_docs'
def __init__(self, build_env=None, force=False, search=True, localmedia=True,
build=None, project=None, version=None):
self.build_env = build_env
self.build_force = force
self.build_search = search
self.build_localmedia = localmedia
self.build = {}
if build is not None:
self.build = build
self.version = {}
if version is not None:
self.version = version
self.project = {}
if project is not None:
self.project = project
def run(self, pk, version_pk=None, build_pk=None, record=True, docker=False,
search=True, force=False, localmedia=True, **kwargs):
env_cls = LocalEnvironment
if docker or settings.DOCKER_ENABLE:
env_cls = DockerEnvironment
self.project = self.get_project(pk)
self.version = self.get_version(self.project, version_pk)
self.build = self.get_build(build_pk)
self.build_search = search
self.build_localmedia = localmedia
self.build_force = force
self.build_env = env_cls(project=self.project, version=self.version,
build=self.build, record=record)
with self.build_env:
if self.project.skip:
raise BuildEnvironmentError(
_('Builds for this project are temporarily disabled'))
try:
self.setup_vcs()
except vcs_support_utils.LockTimeout, e:
self.retry(exc=e, throw=False)
raise BuildEnvironmentError(
'Version locked, retrying in 5 minutes.',
status_code=423
)
if self.project.documentation_type == 'auto':
self.update_documentation_type()
self.setup_environment()
# TODO the build object should have an idea of these states, extend
# the model to include an idea of these outcomes
outcomes = self.build_docs()
build_id = self.build.get('id')
# Web Server Tasks
if build_id:
finish_build.delay(
version_pk=self.version.pk,
build_pk=build_id,
hostname=socket.gethostname(),
html=outcomes['html'],
search=outcomes['search'],
localmedia=outcomes['localmedia'],
pdf=outcomes['pdf'],
epub=outcomes['epub'],
)
if self.build_env.failed:
self.send_notifications()
@staticmethod
def get_project(project_pk):
"""Get project from API"""
project_data = api_v1.project(project_pk).get()
project = make_api_project(project_data)
return project
@staticmethod
def get_version(project, version_pk):
"""Ensure we're using a sane version"""
if version_pk:
version_data = api_v1.version(version_pk).get()
else:
version_data = (api_v1
.version(project.slug)
.get(slug=LATEST)['objects'][0])
return make_api_version(version_data)
@staticmethod
def get_build(build_pk):
"""
Retrieve build object from API
:param build_pk: Build primary key
"""
build = {}
if build_pk:
build = api_v2.build(build_pk).get()
return dict((key, val) for (key, val) in build.items()
if key not in ['project', 'version', 'resource_uri',
'absolute_uri'])
def update_documentation_type(self):
"""
Force Sphinx for 'auto' documentation type
This used to determine the type and automatically set the documentation
type to Sphinx for rST and Mkdocs for markdown. It now just forces
Sphinx, due to markdown support.
"""
ret = 'sphinx'
project_data = api_v2.project(self.project.pk).get()
project_data['documentation_type'] = ret
api_v2.project(self.project.pk).put(project_data)
self.project.documentation_type = ret
def setup_vcs(self):
"""
Update the checkout of the repo to make sure it's the latest.
This also syncs versions in the DB.
:param build_env: Build environment
"""
self.build_env.update_build(state=BUILD_STATE_CLONING)
log.info(LOG_TEMPLATE
.format(project=self.project.slug,
version=self.version.slug,
msg='Updating docs from VCS'))
try:
update_imported_docs(self.version.pk)
commit = self.project.vcs_repo(self.version.slug).commit
if commit:
self.build['commit'] = commit
except ProjectImportError:
raise BuildEnvironmentError('Failed to import project',
status_code=404)
def setup_environment(self):
"""
Build the virtualenv and install the project into it.
Always build projects with a virtualenv.
:param build_env: Build environment to pass commands and execution through.
"""
build_dir = os.path.join(
self.project.venv_path(version=self.version.slug),
'build')
self.build_env.update_build(state=BUILD_STATE_INSTALLING)
if os.path.exists(build_dir):
log.info(LOG_TEMPLATE
.format(project=self.project.slug,
version=self.version.slug,
msg='Removing existing build directory'))
shutil.rmtree(build_dir)
site_packages = '--no-site-packages'
if self.project.use_system_packages:
site_packages = '--system-site-packages'
self.build_env.run(
self.project.python_interpreter,
'-mvirtualenv',
site_packages,
self.project.venv_path(version=self.version.slug)
)
# Install requirements
requirements = [
'sphinx==1.3.1',
'Pygments==2.0.2',
'virtualenv==13.1.0',
'setuptools==18.0.1',
'docutils==0.11',
'mkdocs==0.14.0',
'mock==1.0.1',
'pillow==2.6.1',
'readthedocs-sphinx-ext==0.5.4',
'sphinx-rtd-theme==0.1.9',
'alabaster>=0.7,<0.8,!=0.7.5',
'recommonmark==0.1.1',
]
cmd = [
'python',
self.project.venv_bin(version=self.version.slug, filename='pip'),
'install',
'--use-wheel',
'-U',
]
if self.project.use_system_packages:
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
cmd.append('-I')
cmd.extend(requirements)
self.build_env.run(
*cmd,
bin_path=self.project.venv_bin(version=self.version.slug)
)
# Handle requirements
requirements_file_path = self.project.requirements_file
checkout_path = self.project.checkout_path(self.version.slug)
if not requirements_file_path:
builder_class = get_builder_class(self.project.documentation_type)
docs_dir = (builder_class(self.build_env)
.docs_dir())
for path in [docs_dir, '']:
for req_file in ['pip_requirements.txt', 'requirements.txt']:
test_path = os.path.join(checkout_path, path, req_file)
if os.path.exists(test_path):
requirements_file_path = test_path
break
if requirements_file_path:
self.build_env.run(
'python',
self.project.venv_bin(version=self.version.slug, filename='pip'),
'install',
'--exists-action=w',
'-r{0}'.format(requirements_file_path),
cwd=checkout_path,
bin_path=self.project.venv_bin(version=self.version.slug)
)
# Handle setup.py
checkout_path = self.project.checkout_path(self.version.slug)
setup_path = os.path.join(checkout_path, 'setup.py')
if os.path.isfile(setup_path) and self.project.use_virtualenv:
if getattr(settings, 'USE_PIP_INSTALL', False):
self.build_env.run(
'python',
self.project.venv_bin(version=self.version.slug, filename='pip'),
'install',
'--ignore-installed',
'.',
cwd=checkout_path,
bin_path=self.project.venv_bin(version=self.version.slug)
)
else:
self.build_env.run(
'python',
'setup.py',
'install',
'--force',
cwd=checkout_path,
bin_path=self.project.venv_bin(version=self.version.slug)
)
def build_docs(self):
"""Wrapper to all build functions
Executes the necessary builds for this task and returns whether the
build was successful or not.
:returns: Build outcomes with keys for html, search, localmedia, pdf,
and epub
:rtype: dict
"""
self.build_env.update_build(state=BUILD_STATE_BUILDING)
before_build.send(sender=self.version)
outcomes = defaultdict(lambda: False)
with self.project.repo_nonblockinglock(
version=self.version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
outcomes['html'] = self.build_docs_html()
outcomes['search'] = self.build_docs_search()
outcomes['localmedia'] = self.build_docs_localmedia()
outcomes['pdf'] = self.build_docs_pdf()
outcomes['epub'] = self.build_docs_epub()
after_build.send(sender=self.version)
return outcomes
def build_docs_html(self):
"""Build HTML docs"""
html_builder = get_builder_class(self.project.documentation_type)(
self.build_env
)
if self.build_force:
html_builder.force()
html_builder.append_conf()
success = html_builder.build()
if success:
html_builder.move()
# Gracefully attempt to move files via task on web workers.
try:
move_files.delay(
version_pk=self.version.pk,
html=True,
hostname=socket.gethostname(),
)
except socket.error:
# TODO do something here
pass
return success
def build_docs_search(self):
"""Build search data with separate build"""
if self.build_search:
if self.project.is_type_mkdocs:
return self.build_docs_class('mkdocs_json')
if self.project.is_type_sphinx:
return self.build_docs_class('sphinx_search')
return False
def build_docs_localmedia(self):
"""Get local media files with separate build"""
if self.build_localmedia:
if self.project.is_type_sphinx:
return self.build_docs_class('sphinx_singlehtmllocalmedia')
return False
def build_docs_pdf(self):
"""Build PDF docs"""
if (self.project.slug in HTML_ONLY or
not self.project.is_type_sphinx or
not self.project.enable_pdf_build):
return False
return self.build_docs_class('sphinx_pdf')
def build_docs_epub(self):
"""Build ePub docs"""
if (self.project.slug in HTML_ONLY or
not self.project.is_type_sphinx or
not self.project.enable_epub_build):
return False
return self.build_docs_class('sphinx_epub')
def build_docs_class(self, builder_class):
"""Build docs with additional doc backends
These steps are not necessarily required for the build to halt, so we
only raise a warning exception here. A hard error will halt the build
process.
"""
builder = get_builder_class(builder_class)(self.build_env)
success = builder.build()
builder.move()
return success
def send_notifications(self):
"""Send notifications on build failure"""
send_notifications.delay(self.version.pk, build_pk=self.build['id'])
update_docs = celery_app.tasks[UpdateDocsTask.name]
@task()
def update_imported_docs(version_pk):
"""
Check out or update the given project's repository
:param version_pk: Version id to update
"""
version_data = api_v1.version(version_pk).get()
version = make_api_version(version_data)
project = version.project
ret_dict = {}
# Make Dirs
if not os.path.exists(project.doc_path):
os.makedirs(project.doc_path)
if not project.vcs_repo():
raise ProjectImportError(("Repo type '{0}' unknown".format(project.repo_type)))
with project.repo_nonblockinglock(
version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
# Get the actual code on disk
try:
before_vcs.send(sender=version)
if version:
log.info(
LOG_TEMPLATE.format(
project=project.slug,
version=version.slug,
msg='Checking out version {slug}: {identifier}'.format(
slug=version.slug,
identifier=version.identifier
)
)
)
version_slug = version.slug
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.checkout(version.identifier)
else:
# Does this ever get called?
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Updating to latest revision'))
version_slug = LATEST
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.update()
except Exception:
raise
finally:
after_vcs.send(sender=version)
# Update tags/version
version_post_data = {'repo': version_repo.repo_url}
if version_repo.supports_tags:
version_post_data['tags'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.tags
]
if version_repo.supports_branches:
version_post_data['branches'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.branches
]
try:
api_v2.project(project.pk).sync_versions.post(version_post_data)
except Exception, e:
print "Sync Versions Exception: %s" % e.message
return ret_dict
# Web tasks
@task(queue='web')
def finish_build(version_pk, build_pk, hostname=None, html=False,
localmedia=False, search=False, pdf=False, epub=False):
"""Build Finished, do house keeping bits"""
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
if html:
version.active = True
version.built = True
version.save()
if not pdf:
clear_pdf_artifacts(version)
if not epub:
clear_epub_artifacts(version)
move_files(
version_pk=version_pk,
hostname=hostname,
html=html,
localmedia=localmedia,
search=search,
pdf=pdf,
epub=epub,
)
symlink(project=version.project)
# Delayed tasks
update_static_metadata.delay(version.project.pk)
fileify.delay(version.pk, commit=build.commit)
update_search.delay(version.pk, commit=build.commit)
@task(queue='web')
def move_files(version_pk, hostname, html=False, localmedia=False, search=False,
pdf=False, epub=False):
"""Task to move built documentation to web servers
:param version_pk: Version id to sync files for
:param hostname: Hostname to sync to
:param html: Sync HTML
:type html: bool
:param localmedia: Sync local media files
:type localmedia: bool
:param search: Sync search files
:type search: bool
:param pdf: Sync PDF files
:type pdf: bool
:param epub: Sync ePub files
:type epub: bool
"""
version = Version.objects.get(pk=version_pk)
if html:
from_path = version.project.artifact_path(
version=version.slug, type_=version.project.documentation_type)
target = version.project.rtd_build_path(version.slug)
Syncer.copy(from_path, target, host=hostname)
if 'sphinx' in version.project.documentation_type:
if localmedia:
from_path = version.project.artifact_path(
version=version.slug, type_='sphinx_localmedia')
to_path = version.project.get_production_media_path(
type_='htmlzip', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if search:
from_path = version.project.artifact_path(
version=version.slug, type_='sphinx_search')
to_path = version.project.get_production_media_path(
type_='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
# Always move PDF's because the return code lies.
if pdf:
from_path = version.project.artifact_path(version=version.slug,
type_='sphinx_pdf')
to_path = version.project.get_production_media_path(
type_='pdf', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if epub:
from_path = version.project.artifact_path(version=version.slug,
type_='sphinx_epub')
to_path = version.project.get_production_media_path(
type_='epub', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if 'mkdocs' in version.project.documentation_type:
if search:
from_path = version.project.artifact_path(version=version.slug,
type_='mkdocs_json')
to_path = version.project.get_production_media_path(
type_='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
@task(queue='web')
def update_search(version_pk, commit, delete_non_commit_files=True):
"""Task to update search indexes
:param version_pk: Version id to update
:param commit: Commit that updated index
:param delete_non_commit_files: Delete files not in commit from index
"""
version = Version.objects.get(pk=version_pk)
if version.project.is_type_sphinx:
page_list = process_all_json_files(version, build_dir=False)
elif version.project.is_type_mkdocs:
page_list = process_mkdocs_json(version, build_dir=False)
else:
log.error('Unknown documentation type: %s',
version.project.documentation_type)
return
log_msg = ' '.join([page['path'] for page in page_list])
log.info("(Search Index) Sending Data: %s [%s]", version.project.slug,
log_msg)
index_search_request(
version=version,
page_list=page_list,
commit=commit,
project_scale=0,
page_scale=0,
# Don't index sections to speed up indexing.
# They aren't currently exposed anywhere.
section=False,
delete=delete_non_commit_files,
)
@task(queue='web')
def fileify(version_pk, commit):
"""
Create ImportedFile objects for all of a version's files.
This is a prereq for indexing the docs for search.
It also causes celery-haystack to kick off an index of the file.
"""
version = Version.objects.get(pk=version_pk)
project = version.project
if not project.cdn_enabled:
return
if not commit:
log.info(LOG_TEMPLATE
.format(project=project.slug, version=version.slug,
msg=('Imported File not being built because no commit '
'information')))
path = project.rtd_build_path(version.slug)
if path:
log.info(LOG_TEMPLATE
.format(project=version.project.slug, version=version.slug,
msg='Creating ImportedFiles'))
_manage_imported_files(version, path, commit)
else:
log.info(LOG_TEMPLATE
.format(project=project.slug, version=version.slug,
msg='No ImportedFile files'))
def _manage_imported_files(version, path, commit):
"""Update imported files for version
:param version: Version instance
:param path: Path to search
:param commit: Commit that updated path
"""
changed_files = set()
for root, __, filenames in os.walk(path):
for filename in filenames:
dirpath = os.path.join(root.replace(path, '').lstrip('/'),
filename.lstrip('/'))
full_path = os.path.join(root, filename)
md5 = hashlib.md5(open(full_path, 'rb').read()).hexdigest()
try:
obj, __ = ImportedFile.objects.get_or_create(
project=version.project,
version=version,
path=dirpath,
name=filename,
)
except ImportedFile.MultipleObjectsReturned:
log.exception('Error creating ImportedFile')
continue
if obj.md5 != md5:
obj.md5 = md5
changed_files.add(dirpath)
if obj.commit != commit:
obj.commit = commit
obj.save()
# Delete ImportedFiles from previous versions
ImportedFile.objects.filter(project=version.project,
version=version
).exclude(commit=commit).delete()
# Purge Cache
changed_files = [resolve_path(
version.project, filename=file, version_slug=version.slug,
) for file in changed_files]
cdn_ids = getattr(settings, 'CDN_IDS', None)
if cdn_ids:
if version.project.slug in cdn_ids:
purge(cdn_ids[version.project.slug], changed_files)
@task(queue='web')
def send_notifications(version_pk, build_pk):
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
for hook in version.project.webhook_notifications.all():
webhook_notification(version, build, hook.url)
for email in version.project.emailhook_notifications.all().values_list('email', flat=True):
email_notification(version, build, email)
def email_notification(version, build, email):
"""Send email notifications for build failure
:param version: :py:cls:`Version` instance that failed
:param build: :py:cls:`Build` instance that failed
:param email: Email recipient address
"""
log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='sending email to: %s' % email))
context = {'version': version,
'project': version.project,
'build': build,
'build_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
build.get_absolute_url()),
'unsub_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
reverse('projects_notifications', args=[version.project.slug])),
}
if build.commit:
title = _('Failed: {project.name} ({commit})').format(commit=build.commit[:8], **context)
else:
title = _('Failed: {project.name} ({version.verbose_name})').format(**context)
send_email(
email,
title,
template='projects/email/build_failed.txt',
template_html='projects/email/build_failed.html',
context=context
)
def webhook_notification(version, build, hook_url):
"""Send webhook notification for project webhook
:param version: Version instance to send hook for
:param build: Build instance that failed
:param hook_url: Hook URL to send to
"""
project = version.project
data = json.dumps({
'name': project.name,
'slug': project.slug,
'build': {
'id': build.id,
'success': build.success,
'date': build.date.strftime('%Y-%m-%d %H:%M:%S'),
}
})
log.debug(LOG_TEMPLATE
.format(project=project.slug, version='',
msg='sending notification to: %s' % hook_url))
requests.post(hook_url, data=data)
@task(queue='web')
def update_static_metadata(project_pk, path=None):
"""Update static metadata JSON file
Metadata settings include the following project settings:
version
The default version for the project, default: `latest`
language
The default language for the project, default: `en`
languages
List of languages built by linked translation projects.
"""
project = Project.objects.get(pk=project_pk)
if not path:
path = project.static_metadata_path()
log.info(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Updating static metadata',
))
translations = [trans.language for trans in project.translations.all()]
languages = set(translations)
# Convert to JSON safe types
metadata = {
'version': project.default_version,
'language': project.language,
'languages': list(languages),
'single_version': project.single_version,
}
try:
fh = open(path, 'w+')
json.dump(metadata, fh)
fh.close()
Syncer.copy(path, path, host=socket.gethostname(), file=True)
except (AttributeError, IOError) as e:
log.debug(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Cannot write to metadata.json: {0}'.format(e)
))
# Random Tasks
@task()
def remove_dir(path):
"""
Remove a directory on the build/celery server.
This is mainly a wrapper around shutil.rmtree so that app servers
can kill things on the build server.
"""
log.info("Removing %s", path)
shutil.rmtree(path)
@task(queue='web')
def clear_artifacts(version_pk):
"""Remove artifacts from the web servers"""
version = Version.objects.get(pk=version_pk)
clear_pdf_artifacts(version)
clear_epub_artifacts(version)
clear_htmlzip_artifacts(version)
clear_html_artifacts(version)
def clear_pdf_artifacts(version):
run_on_app_servers('rm -rf %s'
% version.project.get_production_media_path(
type_='pdf', version_slug=version.slug))
def clear_epub_artifacts(version):
run_on_app_servers('rm -rf %s'
% version.project.get_production_media_path(
type_='epub', version_slug=version.slug))
def clear_htmlzip_artifacts(version):
run_on_app_servers('rm -rf %s'
% version.project.get_production_media_path(
type_='htmlzip', version_slug=version.slug))
def clear_html_artifacts(version):
run_on_app_servers('rm -rf %s' % version.project.rtd_build_path(version=version.slug))
@task(queue='web')
def remove_path_from_web(path):
"""
Remove the given path from the web servers file system.
"""
# Santity check for spaces in the path since spaces would result in
# deleting unpredictable paths with "rm -rf".
assert ' ' not in path, "No spaces allowed in path"
# TODO: We need some proper escaping here for the given path.
run_on_app_servers('rm -rf {path}'.format(path=path))
| mit | 5,030,569,804,307,383,000 | 34.576577 | 99 | 0.584167 | false |
XiaoJianfeng/ibl | util/txt2xlsx.py | 1 | 1634 | #!/usr/bin/env python
import sys
import argparse
import csv
import xlsxwriter
''' convert txt file[s] to .xlsx file
usage: $0 [txt1 txt2 txt...] xlsx_file
multiple txts will be added as separate excel sheet
'''
#-----------------------------------------------------------------------------
def correct_data_type(v):
""" convert v to int if possible, else float if possible, else just return as it is"""
try:
return int(v)
except:
try:
return float(v)
except:
return v
def txt2xlsx(f_out, *f_in, **kwds):
sep = kwds['sep'] if ('sep' in kwds) else '\t'
workbook = xlsxwriter.Workbook(f_out)
for n, fn in enumerate(f_in):
fobj_in = sys.stdin if fn == '-' else open(fn, 'r')
f = csv.reader(fobj_in, delimiter=sep)
short_fn = "Sheet {}".format(n+1)
worksheet = workbook.add_worksheet(short_fn)
for i, row in enumerate(f):
worksheet.write_row(i, 0, map(correct_data_type, row))
sys.stderr.write("{num} lines in total for {name}\n".format(num=i+1, name=fn))
workbook.close()
#-----------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='convert txt to excel xlsx')
parser.add_argument('-d', '--delimiter', default='\t', help="delimiter for the txt file")
parser.add_argument('input', nargs='*', default=['-'], help="input file[s], blank for stdin")
parser.add_argument('output', help="output file")
args = parser.parse_args()
txt2xlsx(args.output, *args.input, sep=args.delimiter)
| lgpl-3.0 | -6,901,755,302,760,224,000 | 32.346939 | 97 | 0.55814 | false |
jseabold/statsmodels | statsmodels/tsa/statespace/tests/test_impulse_responses.py | 3 | 26272 | """
Tests for impulse responses of time series
Author: Chad Fulton
License: Simplified-BSD
"""
import warnings
import numpy as np
import pandas as pd
from scipy.stats import ortho_group
import pytest
from numpy.testing import assert_, assert_allclose
from statsmodels.tools.sm_exceptions import EstimationWarning
from statsmodels.tsa.statespace import (mlemodel, sarimax, structural, varmax,
dynamic_factor)
def test_sarimax():
# AR(1)
mod = sarimax.SARIMAX([0], order=(1, 0, 0))
phi = 0.5
actual = mod.impulse_responses([phi, 1], steps=10)
desired = np.r_[[phi**i for i in range(11)]]
assert_allclose(actual, desired)
# MA(1)
mod = sarimax.SARIMAX([0], order=(0, 0, 1))
theta = 0.5
actual = mod.impulse_responses([theta, 1], steps=10)
desired = np.r_[1, theta, [0]*9]
assert_allclose(actual, desired)
# ARMA(2, 2) + constant
# Stata:
# webuse lutkepohl2
# arima dln_inc, arima(2, 0, 2)
# irf create irf1, set(irf1) step(10)
# irf table irf
params = [.01928228, -.03656216, .7588994,
.27070341, -.72928328, .01122177**0.5]
mod = sarimax.SARIMAX([0], order=(2, 0, 2), trend='c')
actual = mod.impulse_responses(params, steps=10)
desired = [1, .234141, .021055, .17692, .00951, .133917, .002321, .101544,
-.001951, .077133, -.004301]
assert_allclose(actual, desired, atol=1e-6)
# SARIMAX(1,1,1)x(1,0,1,4) + constant + exog
# Stata:
# webuse lutkepohl2
# gen exog = _n^2
# arima inc exog, arima(1,1,1) sarima(1,0,1,4)
# irf create irf2, set(irf2) step(10)
# irf table irf
params = [.12853289, 12.207156, .86384742, -.71463236,
.81878967, -.9533955, 14.043884**0.5]
exog = np.arange(1, 92)**2
mod = sarimax.SARIMAX(np.zeros(91), order=(1, 1, 1),
seasonal_order=(1, 0, 1, 4), trend='c', exog=exog,
simple_differencing=True)
actual = mod.impulse_responses(params, steps=10)
desired = [1, .149215, .128899, .111349, -.038417, .063007, .054429,
.047018, -.069598, .018641, .016103]
assert_allclose(actual, desired, atol=1e-6)
def test_structural():
steps = 10
# AR(1)
mod = structural.UnobservedComponents([0], autoregressive=1)
phi = 0.5
actual = mod.impulse_responses([1, phi], steps)
desired = np.r_[[phi**i for i in range(steps + 1)]]
assert_allclose(actual, desired)
# ARX(1)
# This is adequately tested in test_simulate.py, since in the time-varying
# case `impulse_responses` just calls `simulate`
# Irregular
mod = structural.UnobservedComponents([0], 'irregular')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Fixed intercept
# (in practice this is a deterministic constant, because an irregular
# component must be added)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod = structural.UnobservedComponents([0], 'fixed intercept')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Deterministic constant
mod = structural.UnobservedComponents([0], 'deterministic constant')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Local level
mod = structural.UnobservedComponents([0], 'local level')
actual = mod.impulse_responses([1., 1.], steps)
assert_allclose(actual, 1)
# Random walk
mod = structural.UnobservedComponents([0], 'random walk')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 1)
# Fixed slope
# (in practice this is a deterministic trend, because an irregular
# component must be added)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod = structural.UnobservedComponents([0], 'fixed slope')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Deterministic trend
mod = structural.UnobservedComponents([0], 'deterministic trend')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Local linear deterministic trend
mod = structural.UnobservedComponents(
[0], 'local linear deterministic trend')
actual = mod.impulse_responses([1., 1.], steps)
assert_allclose(actual, 1)
# Random walk with drift
mod = structural.UnobservedComponents([0], 'random walk with drift')
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 1)
# Local linear trend
mod = structural.UnobservedComponents([0], 'local linear trend')
# - shock the level
actual = mod.impulse_responses([1., 1., 1.], steps)
assert_allclose(actual, 1)
# - shock the trend
actual = mod.impulse_responses([1., 1., 1.], steps, impulse=1)
assert_allclose(actual, np.arange(steps + 1))
# Smooth trend
mod = structural.UnobservedComponents([0], 'smooth trend')
actual = mod.impulse_responses([1., 1.], steps)
assert_allclose(actual, np.arange(steps + 1))
# Random trend
mod = structural.UnobservedComponents([0], 'random trend')
actual = mod.impulse_responses([1., 1.], steps)
assert_allclose(actual, np.arange(steps + 1))
# Seasonal (deterministic)
mod = structural.UnobservedComponents([0], 'irregular', seasonal=2,
stochastic_seasonal=False)
actual = mod.impulse_responses([1.], steps)
assert_allclose(actual, 0)
# Seasonal (stochastic)
mod = structural.UnobservedComponents([0], 'irregular', seasonal=2)
actual = mod.impulse_responses([1., 1.], steps)
desired = np.r_[1, np.tile([-1, 1], steps // 2)]
assert_allclose(actual, desired)
# Cycle (deterministic)
mod = structural.UnobservedComponents([0], 'irregular', cycle=True)
actual = mod.impulse_responses([1., 1.2], steps)
assert_allclose(actual, 0)
# Cycle (stochastic)
mod = structural.UnobservedComponents([0], 'irregular', cycle=True,
stochastic_cycle=True)
actual = mod.impulse_responses([1., 1., 1.2], steps=10)
x1 = [np.cos(1.2), np.sin(1.2)]
x2 = [-np.sin(1.2), np.cos(1.2)]
T = np.array([x1, x2])
desired = np.zeros(steps + 1)
states = [1, 0]
for i in range(steps + 1):
desired[i] += states[0]
states = np.dot(T, states)
assert_allclose(actual, desired)
def test_varmax():
steps = 10
# Clear warnings
varmax.__warningregistry__ = {}
# VAR(2) - single series
mod1 = varmax.VARMAX([[0]], order=(2, 0), trend='n')
mod2 = sarimax.SARIMAX([0], order=(2, 0, 0))
actual = mod1.impulse_responses([0.5, 0.2, 1], steps)
desired = mod2.impulse_responses([0.5, 0.2, 1], steps)
assert_allclose(actual, desired)
# VMA(2) - single series
mod1 = varmax.VARMAX([[0]], order=(0, 2), trend='n')
mod2 = sarimax.SARIMAX([0], order=(0, 0, 2))
actual = mod1.impulse_responses([0.5, 0.2, 1], steps)
desired = mod2.impulse_responses([0.5, 0.2, 1], steps)
assert_allclose(actual, desired)
# VARMA(2, 2) - single series
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod1 = varmax.VARMAX([[0]], order=(2, 2), trend='n')
mod2 = sarimax.SARIMAX([0], order=(2, 0, 2))
actual = mod1.impulse_responses([0.5, 0.2, 0.1, -0.2, 1], steps)
desired = mod2.impulse_responses([0.5, 0.2, 0.1, -0.2, 1], steps)
assert_allclose(actual, desired)
# VARMA(2, 2) + trend - single series
warning = EstimationWarning
match = r'VARMA\(p,q\) models is not'
with pytest.warns(warning, match=match):
mod1 = varmax.VARMAX([[0]], order=(2, 2), trend='c')
mod2 = sarimax.SARIMAX([0], order=(2, 0, 2), trend='c')
actual = mod1.impulse_responses([10, 0.5, 0.2, 0.1, -0.2, 1], steps)
desired = mod2.impulse_responses([10, 0.5, 0.2, 0.1, -0.2, 1], steps)
assert_allclose(actual, desired)
# VAR(2) + constant
# Stata:
# webuse lutkepohl2
# var dln_inv dln_inc, lags(1/2)
# irf create irf3, set(irf3) step(10)
# irf table irf
# irf table oirf
params = [-.00122728, .01503679,
-.22741923, .71030531, -.11596357, .51494891,
.05974659, .02094608, .05635125, .08332519,
.04297918, .00159473, .01096298]
irf_00 = [1, -.227419, -.021806, .093362, -.001875, -.00906, .009605,
.001323, -.001041, .000769, .00032]
irf_01 = [0, .059747, .044015, -.008218, .007845, .004629, .000104,
.000451, .000638, .000063, .000042]
irf_10 = [0, .710305, .36829, -.065697, .084398, .043038, .000533,
.005755, .006051, .000548, .000526]
irf_11 = [1, .020946, .126202, .066419, .028735, .007477, .009878,
.003287, .001266, .000986, .0005]
oirf_00 = [0.042979, -0.008642, -0.00035, 0.003908, 0.000054, -0.000321,
0.000414, 0.000066, -0.000035, 0.000034, 0.000015]
oirf_01 = [0.001595, 0.002601, 0.002093, -0.000247, 0.000383, 0.000211,
0.00002, 0.000025, 0.000029, 4.30E-06, 2.60E-06]
oirf_10 = [0, 0.007787, 0.004037, -0.00072, 0.000925, 0.000472, 5.80E-06,
0.000063, 0.000066, 6.00E-06, 5.80E-06]
oirf_11 = [0.010963, 0.00023, 0.001384, 0.000728, 0.000315, 0.000082,
0.000108, 0.000036, 0.000014, 0.000011, 5.50E-06]
mod = varmax.VARMAX([[0, 0]], order=(2, 0), trend='c')
# IRFs
actual = mod.impulse_responses(params, steps, impulse=0)
assert_allclose(actual, np.c_[irf_00, irf_01], atol=1e-6)
actual = mod.impulse_responses(params, steps, impulse=1)
assert_allclose(actual, np.c_[irf_10, irf_11], atol=1e-6)
# Orthogonalized IRFs
actual = mod.impulse_responses(params, steps, impulse=0,
orthogonalized=True)
assert_allclose(actual, np.c_[oirf_00, oirf_01], atol=1e-6)
actual = mod.impulse_responses(params, steps, impulse=1,
orthogonalized=True)
assert_allclose(actual, np.c_[oirf_10, oirf_11], atol=1e-6)
# VARMA(2, 2) + trend + exog
# TODO: This is just a smoke test
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod = varmax.VARMAX(
np.random.normal(size=(steps, 2)), order=(2, 2), trend='c',
exog=np.ones(steps), enforce_stationarity=False,
enforce_invertibility=False)
mod.impulse_responses(mod.start_params, steps)
def test_dynamic_factor():
steps = 10
exog = np.random.normal(size=steps)
# DFM: 2 series, AR(2) factor
mod1 = dynamic_factor.DynamicFactor([[0, 0]], k_factors=1, factor_order=2)
mod2 = sarimax.SARIMAX([0], order=(2, 0, 0))
actual = mod1.impulse_responses([-0.9, 0.8, 1., 1., 0.5, 0.2], steps)
desired = mod2.impulse_responses([0.5, 0.2, 1], steps)
assert_allclose(actual[:, 0], -0.9 * desired)
assert_allclose(actual[:, 1], 0.8 * desired)
# DFM: 2 series, AR(2) factor, exog
mod1 = dynamic_factor.DynamicFactor(np.zeros((steps, 2)), k_factors=1,
factor_order=2, exog=exog)
mod2 = sarimax.SARIMAX([0], order=(2, 0, 0))
actual = mod1.impulse_responses(
[-0.9, 0.8, 5, -2, 1., 1., 0.5, 0.2], steps)
desired = mod2.impulse_responses([0.5, 0.2, 1], steps)
assert_allclose(actual[:, 0], -0.9 * desired)
assert_allclose(actual[:, 1], 0.8 * desired)
# DFM, 3 series, VAR(2) factor, exog, error VAR
# TODO: This is just a smoke test
mod = dynamic_factor.DynamicFactor(np.random.normal(size=(steps, 3)),
k_factors=2, factor_order=2, exog=exog,
error_order=2, error_var=True,
enforce_stationarity=False)
mod.impulse_responses(mod.start_params, steps)
def test_time_varying_ssm():
# Create an ad-hoc time-varying model
mod = sarimax.SARIMAX([0] * 11, order=(1, 0, 0))
mod.update([0.5, 1.0])
T = np.zeros((1, 1, 11))
T[..., :5] = 0.5
T[..., 5:] = 0.2
mod['transition'] = T
irfs = mod.ssm.impulse_responses()
desired = np.cumprod(np.r_[1, [0.5] * 4, [0.2] * 5]).reshape(10, 1)
assert_allclose(irfs, desired)
class TVSS(mlemodel.MLEModel):
"""
Time-varying state space model for testing
This creates a state space model with randomly generated time-varying
system matrices. When used in a test, that test should use
`reset_randomstate` to ensure consistent test runs.
"""
def __init__(self, endog, _k_states=None):
k_states = 2
k_posdef = 2
# Allow subcasses to add additional states
if _k_states is None:
_k_states = k_states
super(TVSS, self).__init__(endog, k_states=_k_states,
k_posdef=k_posdef, initialization='diffuse')
self['obs_intercept'] = np.random.normal(
size=(self.k_endog, self.nobs))
self['design'] = np.zeros((self.k_endog, self.k_states, self.nobs))
self['transition'] = np.zeros(
(self.k_states, self.k_states, self.nobs))
self['selection'] = np.zeros(
(self.k_states, self.ssm.k_posdef, self.nobs))
self['design', :, :k_states, :] = np.random.normal(
size=(self.k_endog, k_states, self.nobs))
# For the transition matrices, enforce eigenvalues not too far outside
# unit circle. Otherwise, the random draws will often lead to large
# eigenvalues that cause the covariance matrices to blow up to huge
# values during long periods of missing data, which leads to numerical
# problems and essentially spurious test failures
D = [np.diag(d)
for d in np.random.uniform(-1.1, 1.1, size=(self.nobs, k_states))]
Q = ortho_group.rvs(k_states, size=self.nobs)
self['transition', :k_states, :k_states, :] = (
Q @ D @ Q.transpose(0, 2, 1)).transpose(1, 2, 0)
self['selection', :k_states, :, :] = np.random.normal(
size=(k_states, self.ssm.k_posdef, self.nobs))
# Need to make sure the covariances are positive definite
H05 = np.random.normal(size=(self.k_endog, self.k_endog, self.nobs))
Q05 = np.random.normal(
size=(self.ssm.k_posdef, self.ssm.k_posdef, self.nobs))
H = np.zeros_like(H05)
Q = np.zeros_like(Q05)
for t in range(self.nobs):
H[..., t] = np.dot(H05[..., t], H05[..., t].T)
Q[..., t] = np.dot(Q05[..., t], Q05[..., t].T)
self['obs_cov'] = H
self['state_cov'] = Q
def clone(self, endog, exog=None, **kwargs):
mod = self.__class__(endog, **kwargs)
for key in self.ssm.shapes.keys():
if key in ['obs', 'state_intercept']:
continue
n = min(self.nobs, mod.nobs)
mod[key, ..., :n] = self.ssm[key, ..., :n]
return mod
def test_time_varying_in_sample(reset_randomstate):
mod = TVSS(np.zeros((10, 2)))
# Compute the max number of in-sample IRFs
irfs = mod.impulse_responses([], steps=mod.nobs - 1)
# Compute the same thing, but now with explicit anchor argument
irfs_anchor = mod.impulse_responses([], steps=mod.nobs - 1, anchor=0)
# Cumulative IRFs
cirfs = mod.impulse_responses([], steps=mod.nobs - 1, cumulative=True)
# Orthogonalized IRFs
oirfs = mod.impulse_responses([], steps=mod.nobs - 1, orthogonalized=True)
# Cumulative, orthogonalized IRFs
coirfs = mod.impulse_responses([], steps=mod.nobs - 1, cumulative=True,
orthogonalized=True)
# Compute IRFs manually
Z = mod['design']
T = mod['transition']
R = mod['selection']
Q = mod['state_cov', ..., 0]
L = np.linalg.cholesky(Q)
desired_irfs = np.zeros((mod.nobs - 1, 2)) * np.nan
desired_oirfs = np.zeros((mod.nobs - 1, 2)) * np.nan
tmp = R[..., 0]
for i in range(1, mod.nobs):
desired_irfs[i - 1] = Z[:, :, i].dot(tmp)[:, 0]
desired_oirfs[i - 1] = Z[:, :, i].dot(tmp).dot(L)[:, 0]
tmp = T[:, :, i].dot(tmp)
assert_allclose(irfs, desired_irfs)
assert_allclose(irfs_anchor, desired_irfs)
assert_allclose(cirfs, np.cumsum(desired_irfs, axis=0))
assert_allclose(oirfs, desired_oirfs)
assert_allclose(coirfs, np.cumsum(desired_oirfs, axis=0))
def test_time_varying_out_of_sample(reset_randomstate):
mod = TVSS(np.zeros((10, 2)))
# Compute all in-sample IRFs and also one out-of-sample IRF
new_Z = np.random.normal(size=mod['design', :, :, -1].shape)
new_T = np.random.normal(size=mod['transition', :, :, -1].shape)
irfs = mod.impulse_responses(
[], steps=mod.nobs, design=new_Z[:, :, None],
transition=new_T[:, :, None])
# Compute the same thing, but now with explicit anchor argument
irfs_anchor = mod.impulse_responses(
[], steps=mod.nobs, anchor=0, design=new_Z[:, :, None],
transition=new_T[:, :, None])
# Cumulative IRFs
cirfs = mod.impulse_responses([], steps=mod.nobs, design=new_Z[:, :, None],
transition=new_T[:, :, None],
cumulative=True)
# Orthogonalized IRFs
oirfs = mod.impulse_responses([], steps=mod.nobs, design=new_Z[:, :, None],
transition=new_T[:, :, None],
orthogonalized=True)
# Cumulative, orthogonalized IRFs
coirfs = mod.impulse_responses(
[], steps=mod.nobs, design=new_Z[:, :, None],
transition=new_T[:, :, None], cumulative=True, orthogonalized=True)
# Compute IRFs manually
Z = mod['design']
T = mod['transition']
R = mod['selection']
Q = mod['state_cov', ..., 0]
L = np.linalg.cholesky(Q)
desired_irfs = np.zeros((mod.nobs, 2)) * np.nan
desired_oirfs = np.zeros((mod.nobs, 2)) * np.nan
tmp = R[..., 0]
for i in range(1, mod.nobs):
desired_irfs[i - 1] = Z[:, :, i].dot(tmp)[:, 0]
desired_oirfs[i - 1] = Z[:, :, i].dot(tmp).dot(L)[:, 0]
tmp = T[:, :, i].dot(tmp)
desired_irfs[mod.nobs - 1] = new_Z.dot(tmp)[:, 0]
desired_oirfs[mod.nobs - 1] = new_Z.dot(tmp).dot(L)[:, 0]
assert_allclose(irfs, desired_irfs)
assert_allclose(irfs_anchor, desired_irfs)
assert_allclose(cirfs, np.cumsum(desired_irfs, axis=0))
assert_allclose(oirfs, desired_oirfs)
assert_allclose(coirfs, np.cumsum(desired_oirfs, axis=0))
def test_time_varying_in_sample_anchored(reset_randomstate):
mod = TVSS(np.zeros((10, 2)))
# Compute the max number of in-sample IRFs
anchor = 2
irfs = mod.impulse_responses(
[], steps=mod.nobs - 1 - anchor, anchor=anchor)
# Cumulative IRFs
cirfs = mod.impulse_responses(
[], steps=mod.nobs - 1 - anchor, anchor=anchor,
cumulative=True)
# Orthogonalized IRFs
oirfs = mod.impulse_responses(
[], steps=mod.nobs - 1 - anchor, anchor=anchor,
orthogonalized=True)
# Cumulative, orthogonalized IRFs
coirfs = mod.impulse_responses(
[], steps=mod.nobs - 1 - anchor, anchor=anchor,
cumulative=True, orthogonalized=True)
# Compute IRFs manually
Z = mod['design']
T = mod['transition']
R = mod['selection']
Q = mod['state_cov', ..., anchor]
L = np.linalg.cholesky(Q)
desired_irfs = np.zeros((mod.nobs - anchor - 1, 2)) * np.nan
desired_oirfs = np.zeros((mod.nobs - anchor - 1, 2)) * np.nan
tmp = R[..., anchor]
for i in range(1, mod.nobs - anchor):
desired_irfs[i - 1] = Z[:, :, i + anchor].dot(tmp)[:, 0]
desired_oirfs[i - 1] = Z[:, :, i + anchor].dot(tmp).dot(L)[:, 0]
tmp = T[:, :, i + anchor].dot(tmp)
assert_allclose(irfs, desired_irfs)
assert_allclose(cirfs, np.cumsum(desired_irfs, axis=0))
assert_allclose(oirfs, desired_oirfs)
assert_allclose(coirfs, np.cumsum(desired_oirfs, axis=0))
def test_time_varying_out_of_sample_anchored(reset_randomstate):
mod = TVSS(np.zeros((10, 2)))
# Compute all in-sample IRFs and also one out-of-sample IRF
anchor = 2
new_Z = mod['design', :, :, -1]
new_T = mod['transition', :, :, -1]
irfs = mod.impulse_responses(
[], steps=mod.nobs - anchor, anchor=anchor, design=new_Z[:, :, None],
transition=new_T[:, :, None])
# Cumulative IRFs
cirfs = mod.impulse_responses(
[], steps=mod.nobs - anchor, anchor=anchor,
design=new_Z[:, :, None], transition=new_T[:, :, None],
cumulative=True)
# Orthogonalized IRFs
oirfs = mod.impulse_responses(
[], steps=mod.nobs - anchor, anchor=anchor,
design=new_Z[:, :, None], transition=new_T[:, :, None],
orthogonalized=True)
# Cumulative, orthogonalized IRFs
coirfs = mod.impulse_responses(
[], steps=mod.nobs - anchor, anchor=anchor,
design=new_Z[:, :, None], transition=new_T[:, :, None],
cumulative=True, orthogonalized=True)
# Compute IRFs manually
Z = mod['design']
T = mod['transition']
R = mod['selection']
Q = mod['state_cov', ..., anchor]
L = np.linalg.cholesky(Q)
desired_irfs = np.zeros((mod.nobs - anchor, 2)) * np.nan
desired_oirfs = np.zeros((mod.nobs - anchor, 2)) * np.nan
tmp = R[..., anchor]
for i in range(1, mod.nobs - anchor):
desired_irfs[i - 1] = Z[:, :, i + anchor].dot(tmp)[:, 0]
desired_oirfs[i - 1] = Z[:, :, i + anchor].dot(tmp).dot(L)[:, 0]
tmp = T[:, :, i + anchor].dot(tmp)
desired_irfs[mod.nobs - anchor - 1] = new_Z.dot(tmp)[:, 0]
desired_oirfs[mod.nobs - anchor - 1] = new_Z.dot(tmp).dot(L)[:, 0]
assert_allclose(irfs, desired_irfs)
assert_allclose(cirfs, np.cumsum(desired_irfs, axis=0))
assert_allclose(oirfs, desired_oirfs)
assert_allclose(coirfs, np.cumsum(desired_oirfs, axis=0))
def test_time_varying_out_of_sample_anchored_end(reset_randomstate):
mod = TVSS(np.zeros((10, 2)))
# Cannot compute the any in-sample IRFs when anchoring at the end
with pytest.raises(ValueError, match='Model has time-varying'):
mod.impulse_responses([], steps=2, anchor='end')
# Compute two out-of-sample IRFs
new_Z = np.random.normal(size=mod['design', :, :, -2:].shape)
new_T = np.random.normal(size=mod['transition', :, :, -2:].shape)
irfs = mod.impulse_responses([], steps=2, anchor='end',
design=new_Z, transition=new_T)
# Cumulative IRFs
cirfs = mod.impulse_responses(
[], steps=2, anchor='end', design=new_Z, transition=new_T,
cumulative=True)
# Orthogonalized IRFs
oirfs = mod.impulse_responses(
[], steps=2, anchor='end', design=new_Z, transition=new_T,
orthogonalized=True)
# Cumulative, orthogonalized IRFs
coirfs = mod.impulse_responses(
[], steps=2, anchor='end', design=new_Z, transition=new_T,
cumulative=True, orthogonalized=True)
# Compute IRFs manually
R = mod['selection']
Q = mod['state_cov', ..., -1]
L = np.linalg.cholesky(Q)
desired_irfs = np.zeros((2, 2)) * np.nan
desired_oirfs = np.zeros((2, 2)) * np.nan
# desired[0] = 0
# Z_{T+1} R_T
tmp = R[..., -1]
desired_irfs[0] = new_Z[:, :, 0].dot(tmp)[:, 0]
desired_oirfs[0] = new_Z[:, :, 0].dot(tmp).dot(L)[:, 0]
# T_{T+1} R_T
tmp = new_T[..., 0].dot(tmp)
# Z_{T+2} T_{T+1} R_T
desired_irfs[1] = new_Z[:, :, 1].dot(tmp)[:, 0]
desired_oirfs[1] = new_Z[:, :, 1].dot(tmp).dot(L)[:, 0]
assert_allclose(irfs, desired_irfs)
assert_allclose(cirfs, np.cumsum(desired_irfs, axis=0))
assert_allclose(oirfs, desired_oirfs)
assert_allclose(coirfs, np.cumsum(desired_oirfs, axis=0))
def test_pandas_univariate_rangeindex():
# Impulse responses have RangeIndex
endog = pd.Series(np.zeros(1))
mod = sarimax.SARIMAX(endog)
res = mod.filter([0.5, 1.])
actual = res.impulse_responses(2)
desired = pd.Series([1., 0.5, 0.25])
assert_allclose(res.impulse_responses(2), desired)
assert_(actual.index.equals(desired.index))
def test_pandas_univariate_dateindex():
# Impulse responses still have RangeIndex (i.e. aren't wrapped with dates)
ix = pd.date_range(start='2000', periods=1, freq='M')
endog = pd.Series(np.zeros(1), index=ix)
mod = sarimax.SARIMAX(endog)
res = mod.filter([0.5, 1.])
actual = res.impulse_responses(2)
desired = pd.Series([1., 0.5, 0.25])
assert_allclose(res.impulse_responses(2), desired)
assert_(actual.index.equals(desired.index))
def test_pandas_multivariate_rangeindex():
# Impulse responses have RangeIndex
endog = pd.DataFrame(np.zeros((1, 2)))
mod = varmax.VARMAX(endog, trend='n')
res = mod.filter([0.5, 0., 0., 0.2, 1., 0., 1.])
actual = res.impulse_responses(2)
desired = pd.DataFrame([[1., 0.5, 0.25], [0., 0., 0.]]).T
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
def test_pandas_multivariate_dateindex():
# Impulse responses still have RangeIndex (i.e. aren't wrapped with dates)
ix = pd.date_range(start='2000', periods=1, freq='M')
endog = pd.DataFrame(np.zeros((1, 2)), index=ix)
mod = varmax.VARMAX(endog, trend='n')
res = mod.filter([0.5, 0., 0., 0.2, 1., 0., 1.])
actual = res.impulse_responses(2)
desired = pd.DataFrame([[1., 0.5, 0.25], [0., 0., 0.]]).T
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
def test_pandas_anchor():
# Test that anchor with dates works
ix = pd.date_range(start='2000', periods=10, freq='M')
endog = pd.DataFrame(np.zeros((10, 2)), index=ix)
mod = TVSS(endog)
res = mod.filter([])
desired = res.impulse_responses(2, anchor=1)
# Anchor to date
actual = res.impulse_responses(2, anchor=ix[1])
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
# Anchor to negative index
actual = res.impulse_responses(2, anchor=-9)
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
| bsd-3-clause | 1,764,803,812,234,432,300 | 36.801439 | 79 | 0.59748 | false |
ricarkol/agentless-system-crawler | tests/test_namespace.py | 1 | 3368 | import unittest
import docker
import requests.exceptions
import tempfile
import os
import shutil
import subprocess
import sys
from crawler.namespace import run_as_another_namespace
from crawler.crawler_exceptions import CrawlTimeoutError, CrawlError
all_namespaces = ["user", "pid", "uts", "ipc", "net", "mnt"]
# Functions used to test the library
def func(arg1=None, arg2=None):
return "test %s %s" % (arg1, arg2)
def func_no_args(arg="default"):
return "test %s" % (arg)
class FooError(Exception):
pass
def func_crash(arg):
raise FooError("oops")
def func_infinite_loop(arg):
while True:
a = 1
# Tests conducted with a single container running.
class NamespaceLibTests(unittest.TestCase):
image_name = 'alpine:latest'
def setUp(self):
self.docker = docker.Client(
base_url='unix://var/run/docker.sock', version='auto')
try:
if len(self.docker.containers()) != 0:
raise Exception(
"Sorry, this test requires a machine with no docker containers running.")
except requests.exceptions.ConnectionError as e:
print "Error connecting to docker daemon, are you in the docker group? You need to be in the docker group."
self.docker.pull(repository='alpine', tag='latest')
self.container = self.docker.create_container(
image=self.image_name, command='/bin/sleep 60')
self.tempd = tempfile.mkdtemp(prefix='crawlertest.')
self.docker.start(container=self.container['Id'])
inspect = self.docker.inspect_container(self.container['Id'])
print inspect
self.pid = str(inspect['State']['Pid'])
def tearDown(self):
self.docker.stop(container=self.container['Id'])
self.docker.remove_container(container=self.container['Id'])
shutil.rmtree(self.tempd)
def test_run_as_another_namespace_simple_function(self):
res = run_as_another_namespace(
self.pid, all_namespaces, func, "arg1", "arg2")
assert res == "test arg1 arg2"
print sys._getframe().f_code.co_name, 1
def test_run_as_another_namespace_simple_function_no_args(self):
res = run_as_another_namespace(self.pid, all_namespaces, func_no_args)
assert res == "test default"
print sys._getframe().f_code.co_name, 1
def test_run_as_another_namespace_crashing_function(self):
try:
res = run_as_another_namespace(
self.pid, all_namespaces, func_crash, "arg")
except FooError, e:
# we shuld get a FooError exception
pass # all good
except Exception, e:
assert false
# TODO: why it fails here and not at old/test_namespace.py?
def _test_run_as_another_namespace_infinite_loop_function(self):
try:
res = run_as_another_namespace(
self.pid, all_namespaces, func_infinite_loop, "arg")
except CrawlTimeoutError, e:
# we should get a TimeoutError exception
pass # all good
except Exception, e:
assert false
if __name__ == '__main__':
logging.basicConfig(filename='test_namespace.log', filemode='a',
format='%(asctime)s %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| apache-2.0 | 5,370,705,531,158,389,000 | 31.384615 | 119 | 0.629157 | false |
eResearchSA/reporting-producers | reporting/utilities.py | 1 | 3059 | #!/usr/bin/env python
# pylint: disable=broad-except
import logging
import sys
import random
import string
import socket
import datetime
import platform
import time
import os
from reporting.exceptions import PluginInitialisationError
global_vars=None
def set_global(vars):
global global_vars
global_vars = vars
def getLogger(name):
"""Get logging.Logger instance with logger name convention
"""
if "." in name:
name = "producer.%s" % name.rpartition(".")[-1]
return logging.getLogger(name)
log = getLogger(__name__)
def excepthook(exc_type, exc_value, exc_traceback):
"""Except hook used to log unhandled exceptions to log
"""
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
getLogger("producer").critical(
"Unhandled exception in reporting producer:", exc_info=(exc_type, exc_value, exc_traceback))
#return sys.__excepthook__(exctype, value, traceback)
def get_log_level(verbose):
if verbose <= 0:
return logging.ERROR
elif verbose == 1:
return logging.WARNING
elif verbose == 2:
return logging.INFO
return logging.DEBUG
def get_hostname():
"""Get the host name of a producer"""
global global_vars
if global_vars is not None and 'hostname' in global_vars:
return global_vars['hostname']
try:
return socket.getfqdn()
except:
return platform.node()
def list_to_dict(d, l, value):
if len(l) == 1:
d[l[0]] = value
else:
if l[0] not in d:
d[l[0]] = {}
list_to_dict(d[l[0]], l[1:], value)
def formatExceptionInfo():
""" Consistently format exception information """
cla, exc = sys.exc_info()[:2]
return (cla.__name__, str(exc))
def init_message():
return {'timestamp': int(time.time()), 'hostname': get_hostname()}
def init_object(class_name, *args, **kwargs):
mod_name = '.'.join(class_name.split('.')[:-1])
class_name = class_name.split('.')[-1]
log.debug("Loading plugin %s %s"%(mod_name, class_name))
try:
mod = __import__(mod_name, globals(), locals(), [class_name])
except SyntaxError as e:
raise PluginInitialisationError(
"Plugin %s (%s) contains a syntax error at line %s" %
(class_name, e.filename, e.lineno))
except ImportError as e:
log.exception(e)
raise PluginInitialisationError(
"Failed to import plugin %s: %s" %
(class_name, e[0]))
klass = getattr(mod, class_name, None)
if not klass:
raise PluginInitialisationError(
'Plugin class %s does not exist' % class_name)
try:
return klass(*args, **kwargs)
except Exception as exc:
raise PluginInitialisationError(
"Failed to load plugin %s with "
"the following error: %s - %s" %
(class_name, exc.__class__.__name__, exc.message))
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times) | apache-2.0 | -4,377,217,827,614,726,000 | 28.142857 | 100 | 0.61883 | false |
nateGeorge/IDmyDog | process_ims/other/2d_haralick_map.py | 1 | 3493 | from __future__ import print_function
import pandas as pd
import pickle as pk
import cv2
import os
import re
import progressbar
import imutils
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from mahotas.features import haralick
import json
from sklearn.decomposition import PCA
plt.style.use('seaborn-dark')
def get_fg_bg_rects(fg):
b, g, r, a = cv2.split(fg)
h, w = fg.shape[:2]
h -= 1
w -= 1 # to avoid indexing problems
rectDims = [10, 10] # h, w of rectangles
hRects = h / rectDims[0]
wRects = w / rectDims[1]
fgRects = []
bgRects = []
for i in range(wRects):
for j in range(hRects):
pt1 = (i * rectDims[0], j * rectDims[1])
pt2 = ((i + 1) * rectDims[0], (j + 1) * rectDims[1])
# alpha is 255 over the part of the dog
if a[pt1[1], pt1[0]] == 255 and a[pt2[1], pt2[0]] == 255:
fgRects.append([pt1, pt2])
#cv2.rectangle(fgcp, pt1, pt2, [0, 0, 255], 2) # for debugging
elif a[pt1[1], pt1[0]] == 0 and a[pt2[1], pt2[0]] == 0:
bgRects.append([pt1, pt2])
#cv2.rectangle(bgcp, pt1, pt2, [0, 0, 255], 2)
return fgRects, bgRects
def get_avg_hara(im, rects):
# returns the haralick texture averaged over all rectangles in an image
if len(rects)==0:
return None
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
hara = 0
for r in rects:
# slice images as: img[y0:y1, x0:x1]
hara += haralick(im[r[0][1]:r[1][1], r[0][0]:r[1][0]]).mean(0)
hara /= (len(rects))
return hara
def make_hara_map(im, rects):
# draws heatmap of haralick texture PCA dim1 variance
if len(rects)==0:
return None
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
hara = []
for r in rects:
# slice images as: img[y0:y1, x0:x1]
hara.append(pcaFG.transform(haralick(im[r[0][1]:r[1][1], r[0][0]:r[1][0]]).mean(0).reshape(1, -1)))
hara = np.array(hara)
haraMean = np.mean(hara, axis=0)
haraStd = np.std(hara, axis=0)
haraMins = np.min(hara, axis=0)
haraMaxs = np.max(hara, axis=0)
norm = (haraMaxs-haraMins)
copy = im.copy()
copy = cv2.cvtColor(copy, cv2.COLOR_BGRA2RGBA)
im = cv2.cvtColor(im, cv2.COLOR_BGRA2RGBA)
for i in range(hara.shape[0]):
brightScale = 255*(hara[i] - haraMins)/norm
bright = brightScale[0][0]
r = rects[i]
cv2.rectangle(copy, r[0], r[1], [0, bright, 0, 255], -1)
f, axarr = plt.subplots(2, 1)
axarr[0].imshow(copy)
axarr[1].imshow(im)
plt.show()
# load configuration
with open('../../config.json', 'rb') as f:
config = json.load(f)
mainImPath = config['image_dir']
pDir = config['pickle_dir']
pcaFG = pk.load(open(pDir + 'pcaFG.pk', 'rb'))
bb = pk.load(open(pDir + 'pDogs-bounding-boxes-clean.pd.pk', 'rb'))
bb.dropna(inplace=True)
# do something like sorted(bb.breed.unique().tolist())[50:] to check another breed
for breed in sorted(bb.breed.unique().tolist())[50:]:
print('breed:', breed)
cropDir = mainImPath + breed + '/grabcut/'
fgDir = cropDir + 'fg/'
fgFiles = os.listdir(fgDir)
for fi in fgFiles:
try:
fg = cv2.imread(fgDir + fi, -1) # -1 tells it to load alpha channel
except Exception as err:
print('exception:', err)
continue
fgRects, bgRects = get_fg_bg_rects(fg)
make_hara_map(fg, fgRects)
| mit | -3,552,087,796,513,353,000 | 30.196429 | 107 | 0.583453 | false |
coddingtonbear/taskwarrior-subtask-capsule | setup.py | 1 | 1786 | import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
import uuid
from taskwarrior_subtask_capsule import __version__ as version_string
requirements_path = os.path.join(
os.path.dirname(__file__),
'requirements.txt',
)
try:
from pip.req import parse_requirements
requirements = [
str(req.req) for req in parse_requirements(
requirements_path,
session=uuid.uuid1()
)
]
except ImportError:
requirements = []
with open(requirements_path, 'r') as in_:
requirements = [
req for req in in_.readlines()
if not req.startswith('-')
and not req.startswith('#')
]
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
setup(
name='taskwarrior-subtask-capsule',
version=version_string,
url='https://github.com/coddingtonbear/taskwarrior-subtask-capsule',
description=(
'Easily create subtasks'
),
author='Adam Coddington',
author_email='[email protected]',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
install_requires=requirements,
tests_require=['tox'],
cmdclass = {'test': Tox},
packages=find_packages(),
entry_points={
'taskwarrior_capsules': [
'subtask = taskwarrior_subtask_capsule.capsule:Subtask',
]
},
)
| mit | 365,277,337,414,721,800 | 25.264706 | 72 | 0.621501 | false |
GrognardsFromHell/TemplePlus | tpdatasrc/tpgamefiles/rules/d20_combat/to_hit_processing.py | 1 | 12638 | from templeplus.pymod import PythonModifier
from toee import *
import tpdp
import logbook
import roll_history
debug_enabled = False
def debug_print(*args):
if debug_enabled:
for arg in args:
print arg,
return
def handle_sanctuary(to_hit_eo, d20a):
tgt = d20a.target
if tgt == OBJ_HANDLE_NULL or not tgt.is_critter():
return
if d20a.query_can_be_affected_action_perform(tgt):
return
flags = to_hit_eo.attack_packet.get_flags()
if flags & D20CAF_CRITICAL:
flags &= ~D20CAF_CRITICAL
if flags & D20CAF_HIT:
flags &= ~D20CAF_HIT
to_hit_eo.bonus_list.add_zeroed(262) # Action lost due to Sanctuary
to_hit_eo.attack_packet.set_flags(flags)
return
def add_percent_chance_history_stub():
return
def mirror_image_attack_roll(d20a, spell_id):
performer = d20a.performer
target = d20a.target
#Target AC
mi_ac_evt_obj = tpdp.EventObjAttack()
mi_ac_evt_obj.attack_packet.attacker = performer
mi_ac_evt_obj.attack_packet.target = target
flags = d20a.flags
flags |= D20CAF_TOUCH_ATTACK
mi_ac_evt_obj.attack_packet.set_flags(flags)
mi_ac_evt_obj.attack_packet.action_type = d20a.action_type
mi_ac_evt_obj.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetAC, EK_NONE)
tgt_ac = mi_ac_evt_obj.bonus_list.get_sum()
#Performer to Hit Bonus
to_hit = tpdp.EventObjAttack()
to_hit.dispatch(performer, OBJ_HANDLE_NULL, ET_OnToHitBonus2, EK_NONE)
dc = 20
to_hit_dice = dice_new("1d{}".format(dc))
to_hit_roll = to_hit_dice.roll()
to_hit_bonus = to_hit.bonus_list.get_sum()
spell_enum = tpdp.SpellPacket(spell_id).spell_enum
spell_name = game.get_spell_mesline(spell_enum)
roll_id = tpdp.create_history_dc_roll(performer, tgt_ac, to_hit_dice, to_hit_roll, spell_name, to_hit.bonus_list)
result = to_hit_roll - dc + to_hit_bonus
d20a.roll_id_0 = roll_id
return result
def hitMirrorImage(d20a, numberOfMirrorImages):
#Check if real target was hit
#A roll of 1 indicates hit on real target
mirrorDice = dice_new("1d{}".format(numberOfMirrorImages+1) )
mirrorRoll = mirrorDice.roll()
if mirrorRoll == 1:
return False
performer = d20a.performer
target = d20a.target
#Get spell_id and spellName
spell_id = target.d20_query_get_data(Q_Critter_Has_Mirror_Image,0)
roll_result = mirror_image_attack_roll(d20a, spell_id)
if roll_result >= 0:
target.d20_send_signal(S_Spell_Mirror_Image_Struck, spell_id, 0)
target.float_mesfile_line('mes\\combat.mes', 109)
game.create_history_from_pattern(10, performer, target)
return True
else:
#I am unsure how misses are actually handled in this version
return False
def getDefenderConcealment(d20a):
target = d20a.target
defenderConcealment = tpdp.EventObjAttack()
defenderConcealment.attack_packet.set_flags(d20a.flags)
defenderConcealment.attack_packet.target = target
defenderConcealment.attack_packet.attacker = d20a.performer
return defenderConcealment.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetDefenderConcealmentMissChance, EK_NONE)
def getAttackerConcealment(performer):
performerConcealment = tpdp.EventObjAttack()
performerConcealment.dispatch(performer, OBJ_HANDLE_NULL, ET_OnGetAttackerConcealmentMissChance, EK_NONE)
return performerConcealment.bonus_list.get_highest()
def getSuppressConcealment(performer, target):
#suppressingConditions can be easily expanded with new conditions if necessary
suppressingConditions = [tpdp.get_condition_ref("sp-True Strike"), tpdp.get_condition_ref("Weapon Seeking")]
if any(performer.d20_query_with_data(Q_Critter_Has_Condition, conRef, 0) for conRef in suppressingConditions):
return True
elif performer.can_blindsee(target):
return True
elif performer.d20_query("Ignore Concealment"): #Example for Arcane Archer; not implemented in AA
return True
return False
def rollConcealment(concealmentMissChance):
concealmentDice = dice_new("1d100")
concealmentDiceRoll = concealmentDice.roll()
if concealmentDiceRoll > concealmentMissChance:
return True, concealmentDiceRoll
return False, concealmentDiceRoll
def toHitResult(performerToHit, targetAc):
toHitDice = dice_new("1d20")
toHitRoll = toHitDice.roll()
if toHitRoll == 1:
return False, toHitRoll
elif toHitRoll == 20:
return True, toHitRoll
elif toHitRoll + performerToHit >= targetAc:
return True, toHitRoll
return False, toHitRoll
def to_hit_processing(d20a):
performer = d20a.performer #auto performer = d20a.d20APerformer;
d20Data = d20a.data1 #auto d20Data = d20a.data1;
target = d20a.target #auto tgt = d20a.d20ATarget;
if not target:
return
#Mirror Image
numberOfMirrorImages = target.d20_query(Q_Critter_Has_Mirror_Image)
if numberOfMirrorImages:
if hitMirrorImage(d20a, numberOfMirrorImages):
return
#Concealment
debug_print("Concealment")
targetConcealment = getDefenderConcealment(d20a)
performerCanSuppressConcealment = getSuppressConcealment(performer, target)
if performerCanSuppressConcealment:
targetConcealment = 0
concealmentMissChance = max(targetConcealment, getAttackerConcealment(performer))
if concealmentMissChance > 0:
is_success, miss_chance_roll = rollConcealment(concealmentMissChance)
if is_success:
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 60, miss_chance_roll, 194, 193)
d20a.roll_id_1 = roll_id
else: # concealment miss
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 60, miss_chance_roll, 195, 193)
d20a.roll_id_1 = roll_id
# Blind fight - give second chance
if not performer.has_feat(feat_blind_fight):
return
is_success, miss_chance_roll = rollConcealment(concealmentMissChance)
if not is_success:
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 61, miss_chance_roll, 195, 193)
return
roll_id = roll_history.add_percent_chance_roll(performer, target, concealmentMissChance, 61, miss_chance_roll, 194, 193)
d20a.roll_id_2 = roll_id
#ToHitBonus Actions
debug_print("To Hit")
to_hit_eo = tpdp.EventObjAttack()
to_hit_eo.attack_packet.set_flags(d20a.flags)
to_hit_eo.attack_packet.target = target
to_hit_eo.attack_packet.action_type = d20a.action_type #dispIoToHitBon.attackPacket.d20ActnType = d20a.action_type
to_hit_eo.attack_packet.attacker = performer
to_hit_eo.attack_packet.event_key = d20Data #dispIoToHitBon.attackPacket.dispKey = d20Data
unarmed = OBJ_HANDLE_NULL
if to_hit_eo.attack_packet.get_flags() & D20CAF_TOUCH_ATTACK:
to_hit_eo.attack_packet.set_weapon_used(unarmed)
elif to_hit_eo.attack_packet.get_flags() & D20CAF_SECONDARY_WEAPON:
offhandItem = performer.item_worn_at(item_wear_weapon_secondary)
if offhandItem.type != obj_t_weapon:
to_hit_eo.attack_packet.set_weapon_used(unarmed)
else:
to_hit_eo.attack_packet.set_weapon_used(offhandItem)
else:
mainhandItem = performer.item_worn_at(item_wear_weapon_primary)
if mainhandItem.type != obj_t_weapon:
to_hit_eo.attack_packet.set_weapon_used(unarmed)
else:
to_hit_eo.attack_packet.set_weapon_used(mainhandItem)
to_hit_eo.attack_packet.ammo_item = performer.get_ammo_used()
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_FINAL_ATTACK_ROLL
to_hit_eo.attack_packet.set_flags(flags)
to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnGetBucklerAcPenalty , EK_NONE)
to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnToHitBonus2, EK_NONE) # // note: the "Global" condition has ToHitBonus2 hook that dispatches the ToHitBonusBase
to_hit_bon_final = to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnToHitBonusFromDefenderCondition, EK_NONE)
#targetAc Actions
debug_print("Target AC")
target_ac_eo = to_hit_eo.__copy__()
target_ac_eo.bonus_list.reset()
target_ac_eo.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetAC, EK_NONE)
tgt_ac_final = target_ac_eo.dispatch(target, OBJ_HANDLE_NULL, ET_OnGetAcModifierFromAttacker, EK_NONE)
#Check if attacks hits
attackDidHit, toHitRoll = toHitResult(to_hit_bon_final, tgt_ac_final)
critAlwaysCheat = cheats.critical #Note: changed behavior from vanilla (this used to toggle the property)
#Check for special hit conditions
if not attackDidHit:
if to_hit_eo.attack_packet.get_flags() & D20CAF_ALWAYS_HIT:
attackDidHit = True
elif critAlwaysCheat:
attackDidHit = True
else:
#Reroll Check
if performer.d20_query(Q_RerollAttack):
tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags() )
rerollDidHit, toHitRoll = toHitResult(to_hit_bon_final, tgt_ac_final)
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_REROLL
to_hit_eo.attack_packet.set_flags(flags)
if not rerollDidHit:
logbook.inc_misses(performer)
else:
attackDidHit = True
if not attackDidHit:
debug_print("Missed")
roll_id = tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags() )
d20a.roll_id_0 = roll_id
return
#We have a hit sir!
debug_print("Scored a hit")
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_HIT
to_hit_eo.attack_packet.set_flags(flags)
logbook.inc_hits(performer)
#Check if attack was a critical hit
performerCritRange = to_hit_eo.__copy__()
performerCritRange.bonus_list.reset()
critRange = 21 - performerCritRange.dispatch(performer, OBJ_HANDLE_NULL, ET_OnGetCriticalHitRange, EK_NONE)
if target.d20_query(Q_Critter_Is_Immune_Critical_Hits):
isCritical = False
elif toHitRoll == 20:
isCritical = True
elif toHitRoll >= critRange:
isCritical = True
elif critAlwaysCheat:
isCritical = True
else:
isCritical = False
#Check to Confirm Critical Hit
crit_hit_roll = -1
if isCritical:
debug_print("Confirm critical:")
to_hit_bon_final += to_hit_eo.dispatch(performer, OBJ_HANDLE_NULL, ET_OnConfirmCriticalBonus, EK_NONE)
critConfirmed, crit_hit_roll = toHitResult(to_hit_bon_final, tgt_ac_final)
#Check for special confirm conditions
if not critConfirmed:
if performer.d20_query("Always Confirm Criticals"):
critConfirmed = True
elif critAlwaysCheat:
critConfirmed = True
else:
if performer.d20_query(Q_RerollCritical):
tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags(), crit_hit_roll )
critConfirmed, crit_hit_roll = toHitResult(to_hit_bon_final, tgt_ac_final)
#no reroll flag seems to be added in original code
if critConfirmed:
debug_print("Crit confirm")
flags = to_hit_eo.attack_packet.get_flags()
flags |= D20CAF_CRITICAL
to_hit_eo.attack_packet.set_flags(flags)
#Deflect Arrows
#Unsure why it is done after confirm crit,
#If done before, history window for normal attack
#could be done earlier
#dispIoToHitBon.Dispatch(dispIoToHitBon.attackPacket.victim, objHndl::null, dispTypeDeflectArrows, DK_NONE)
#unsure why it is not simply tgt, will copy it
to_hit_eo.dispatch(to_hit_eo.attack_packet.target, OBJ_HANDLE_NULL, ET_OnDeflectArrows, EK_NONE)
handle_sanctuary(to_hit_eo, d20a)
#Set flags
debug_print("Final")
d20a.flags = to_hit_eo.attack_packet.get_flags()
roll_id = tpdp.create_history_attack_roll(performer, target, toHitRoll, to_hit_eo.bonus_list, target_ac_eo.bonus_list, to_hit_eo.attack_packet.get_flags(), crit_hit_roll )
d20a.roll_id_0 = roll_id
return
| mit | -2,265,534,329,201,253,600 | 40.032468 | 182 | 0.677006 | false |
openbermuda/karmapi | karmapi/nzquake.py | 1 | 1633 | """ The data is available from Geonet, the official source of New
Zealand earthquake hazard data:
http://wfs.geonet.org.nz/geonet/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=geonet:quake_search_v1&outputFormat=csv
Geonet Data policy
==================
All data and images are made available free of charge through the
GeoNet project to facilitate research into hazards and assessment of
risk. GeoNet is sponsored by the New Zealand Government through its
agencies: Earthquake Commission (EQC), GNS Science and Land
Information New Zealand (LINZ). The use of data or images is subject
to the following conditions:
Users are requested to acknowledge the GeoNet project sponsors as the
source of the data or images. (Suggested text: We acknowledge the New
Zealand GeoNet project and its sponsors EQC, GNS Science and LINZ, for
providing data/images used in this study.)
The GeoNet project sponsors accept no liability for any loss or
damage, direct or indirect, resulting from the use of the data or
images provided. The GeoNet project sponsors do not make any
representation in respect of the information's accuracy, completeness
or fitness for any particular purpose.
"""
URL = "http://wfs.geonet.org.nz/geonet/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=geonet:quake_search_v1&outputFormat=csv"
from pathlib import Path
import requests
import karmapi
import pandas
def get(path):
path = Path(path)
r = requests.get(URL)
path.write_bytes(r.content)
def datefix(df):
tt = df.origintime.apply(lambda x: x[:19])
df.index = pandas.to_datetime(tt)
return df
| gpl-3.0 | -3,375,490,667,483,754,500 | 29.240741 | 137 | 0.764238 | false |
hexforge/pulp_db | utils/decoder/hkseomd_decoder.py | 1 | 7007 | import sys
import struct
import pprint
from rosetta.common import get_spec, decode, build_standard_msg_parser, pb_parser
def build_msg_parsers(spec):
decoders = {}
endian = spec['endian']
decoders['header'] = build_standard_msg_parser(spec['header'])
decoders[100] = build_standard_msg_parser(spec['100.sequence_reset'])
decoders[101] = build_standard_msg_parser(spec['101.logon'])
decoders[102] = build_standard_msg_parser(spec['102.logon_responce'])
decoders[201] = build_standard_msg_parser(spec['201.retransmission_request'])
decoders[202] = build_standard_msg_parser(spec['202.retransmission_responce'])
decoders[203] = build_standard_msg_parser(spec['203.refresh_complete'])
decoders[10] = build_standard_msg_parser(spec['10.market_definition'])
decoders[14] = build_standard_msg_parser(spec['14.currency_rate'])
decoders[20] = build_standard_msg_parser(spec['20.trading_session_status'])
decoders[21] = build_standard_msg_parser(spec['21.security_status'])
decoders[30] = build_standard_msg_parser(spec['30.add_order'])
decoders[31] = build_standard_msg_parser(spec['31.modify_order'])
decoders[32] = build_standard_msg_parser(spec['32.delete_order'])
decoders[33] = build_standard_msg_parser(spec['33.add_odd_lot_order'])
decoders[34] = build_standard_msg_parser(spec['34.delete_odd_lot_order'])
decoders[51] = build_standard_msg_parser(spec['51.trade_cancel'])
decoders[52] = build_standard_msg_parser(spec['52.trade_ticker'])
decoders[62] = build_standard_msg_parser(spec['62.closing_price'])
decoders[40] = build_standard_msg_parser(spec['40.nominal_price'])
decoders[41] = build_standard_msg_parser(spec['41.indicative_equilibrium_price'])
decoders[60] = build_standard_msg_parser(spec['60.statistics'])
decoders[61] = build_standard_msg_parser(spec['61.market_turnover'])
decoders[44] = build_standard_msg_parser(spec['44.yield'])
decoders[70] = build_standard_msg_parser(spec['70.index_definition'])
decoders[71] = build_standard_msg_parser(spec['71.index_data'])
decoders[55] = build_standard_msg_parser(spec['55.top_of_book'])
decoders[42] = build_standard_msg_parser(spec['42.estimated_average_settlement_price'])
decoders[50] = build_standard_msg_parser(spec['50.Trade'])
sec_1 = build_standard_msg_parser(spec['11.security_definition'])
sec_2 = build_standard_msg_parser(spec['11.sub.security_definition'])
def decoder_11(data, index):
msg, index = sec_1(data, index)
submessages = []
msg['submessages'] = submessages
for _ in range(msg['NoUnderlyingSecurities']):
msg, index = sec_2(data, index)
submessages.append(msg)
return msg, index
decoders[11] = decoder_11
liq_1 = build_standard_msg_parser(spec['13.liquidity_provider'])
liq_2 = build_standard_msg_parser(spec['13.sub.liquidity_provider'])
def decoder_13(data, index):
msg, index = liq_1(data, index)
submessages = []
msg['submessages'] = submessages
for _ in range(msg['NoLiquidityProviders']):
msg, index = liq_2(data, index)
submessages.append(msg)
return msg, index
decoders[13] = decoder_13
agg_1 = build_standard_msg_parser(spec['53.aggregate_order_book_update'])
agg_2 = build_standard_msg_parser(spec['53.sub.aggregate_order_book_update_spec2'])
def decoder_53(data, index):
msg, index = agg_1(data, index)
submessages = []
msg['submessages'] = submessages
for _ in range(msg['NoEntries']):
msg, index = agg_2(data, index)
submessages.append(msg)
return msg, index
decoders[53] = decoder_53
bro_1 = build_standard_msg_parser(spec['54.broker_queue'])
bro_2 = build_standard_msg_parser(spec['54.sub.broker_queue'])
def decoder_54(data, index):
msg, index = bro_1(data, index)
submessages = []
msg['submessages'] = submessages
for _ in range(msg['ItemCount']):
msg, index = bro_2(data, index)
submessages.append(msg)
return msg, index
decoders[54] = decoder_54
news = build_standard_msg_parser(spec['22.news'])
news1 = build_standard_msg_parser(spec['22.sub1.news'])
news2 = build_standard_msg_parser(spec['22.sub2.news'])
news3 = build_standard_msg_parser(spec['22.sub3.news'])
news4 = build_standard_msg_parser(spec['22.sub4.news'])
news5 = build_standard_msg_parser(spec['22.sub5.news'])
news6 = build_standard_msg_parser(spec['22.sub6.news'])
news7 = build_standard_msg_parser(spec['22.sub7.news'])
news8 = build_standard_msg_parser(spec['22.sub8.news'])
def decoder_22(data, index):
msg, index = news(data, index)
n_msg, index = news1(data, index)
msg.update(n_msg)
market_codes = []
msg['market_codes'] = market_codes
for _ in range(n_msg['NoMarketCodes']):
msg, index = news2(data, index)
market_codes.append(msg)
n_msg, index = news3(data, index)
n_msg, index = news4(data, index)
sec_codes = []
msg['sec_codes'] = sec_codes
for _ in range(n_msg['NoSecurityCodes']):
msg, index = news5(data, index)
sec_codes.append(msg)
n_msg, index = news6(data, index)
n_msg, index = news7(data, index)
news_lines = []
msg['news_lines'] = news_lines
for _ in range(n_msg['NoNewsLines']):
msg, index = news8(data, index)
news_lines.append(msg)
return msg, index
decoders[22] = decoder_22
return decoders
def decode_playback(decoders, spec, playback):
header_decoder = decoders['header']
endian = spec['endian']
header_format = spec['header']['format']
header_size = struct.calcsize(header_format)
msg_num = 0
for data in pb_parser(playback):
if not data:
break
msg_num += 1
msg = []
# HEADER
index = 0
try:
decoded_header, index = header_decoder(data, index)
except struct.error:
raise
msg.append(decoded_header)
# SUBMSGS
number_of_submsgs = decoded_header['MsgCount']
for _ in range(number_of_submsgs):
try:
size = struct.unpack(endian+'H', data[index:index+2])[0]
typ = struct.unpack(endian+'H', data[index+2:index+4])[0]
sub_msg, index = decoders[typ](data, index)
except (struct.error, KeyError):
import pdb
pdb.set_trace()
raise
msg.append(sub_msg)
yield msg
print(msg_num)
def main():
spec = get_spec('hkseomd.ini')
decoders = build_msg_parsers(spec)
for msg in decode_playback(decoders, spec, sys.argv[1]):
#pass
pprint.pprint(msg)
if __name__ == '__main__':
main()
| apache-2.0 | 6,615,086,416,629,237,000 | 39.04 | 91 | 0.62095 | false |
caffeinehit/fabric-provision | docs/conf.py | 1 | 7037 | # -*- coding: utf-8 -*-
#
# fabric-chef documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 26 17:29:47 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fabric-chef'
copyright = u'2011, Alen Mujezinovic'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'fabric-chefdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'fabric-chef.tex', u'fabric-chef Documentation',
u'Alen Mujezinovic', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fabric-chef', u'fabric-chef Documentation',
[u'Alen Mujezinovic'], 1)
]
| mit | 182,649,190,020,775,580 | 31.578704 | 80 | 0.708683 | false |
PabloCastellano/telepathy-python | examples/tube-dbus-muc.py | 1 | 8390 | #!/usr/bin/env python
import dbus.glib
import gobject
import sys
import time
from dbus.service import method, signal, Object
from dbus import PROPERTIES_IFACE
from telepathy.client import Channel
from telepathy.interfaces import (
CONN_INTERFACE, CHANNEL_INTERFACE_GROUP,
CHANNEL_TYPE_TEXT, CHANNEL_INTERFACE, CONNECTION_INTERFACE_REQUESTS,
CHANNEL_INTERFACE_TUBE, CHANNEL_TYPE_DBUS_TUBE)
from telepathy.constants import (
CONNECTION_HANDLE_TYPE_CONTACT,
CONNECTION_HANDLE_TYPE_ROOM, CONNECTION_STATUS_CONNECTED,
CONNECTION_STATUS_DISCONNECTED, CONNECTION_STATUS_CONNECTING,
SOCKET_ACCESS_CONTROL_CREDENTIALS,
TUBE_CHANNEL_STATE_LOCAL_PENDING, TUBE_CHANNEL_STATE_REMOTE_PENDING,
TUBE_CHANNEL_STATE_OPEN, TUBE_CHANNEL_STATE_NOT_OFFERED)
from account import connection_from_file
from tubeconn import TubeConnection
SERVICE = "org.freedesktop.Telepathy.Tube.Test"
IFACE = SERVICE
PATH = "/org/freedesktop/Telepathy/Tube/Test"
tube_state = {TUBE_CHANNEL_STATE_LOCAL_PENDING : 'local pending',\
TUBE_CHANNEL_STATE_REMOTE_PENDING : 'remote pending',\
TUBE_CHANNEL_STATE_OPEN : 'open',
TUBE_CHANNEL_STATE_NOT_OFFERED: 'not offered'}
loop = None
class Client:
def __init__(self, account_file, muc_id):
self.conn = connection_from_file(account_file, ready_handler=self.ready_cb)
self.muc_id = muc_id
self.conn[CONN_INTERFACE].connect_to_signal('StatusChanged',
self.status_changed_cb)
self.test = None
self.joined = False
self.tube = None
def run(self):
global loop
self.conn[CONN_INTERFACE].Connect()
loop = gobject.MainLoop()
try:
loop.run()
finally:
try:
self.conn[CONN_INTERFACE].Disconnect()
except:
pass
def status_changed_cb(self, state, reason):
if state == CONNECTION_STATUS_CONNECTING:
print 'connecting'
elif state == CONNECTION_STATUS_CONNECTED:
print 'connected'
elif state == CONNECTION_STATUS_DISCONNECTED:
print 'disconnected'
loop.quit()
def ready_cb(self, conn):
self.conn[CONNECTION_INTERFACE_REQUESTS].connect_to_signal ("NewChannels",
self.new_channels_cb)
self.self_handle = self.conn[CONN_INTERFACE].GetSelfHandle()
def join_muc(self):
# workaround to be sure that the muc service is fully resolved in
# Salut.
time.sleep(2)
print "join muc", self.muc_id
chan_path, props = self.conn[CONNECTION_INTERFACE_REQUESTS].CreateChannel({
CHANNEL_INTERFACE + ".ChannelType": CHANNEL_TYPE_TEXT,
CHANNEL_INTERFACE + ".TargetHandleType": CONNECTION_HANDLE_TYPE_ROOM,
CHANNEL_INTERFACE + ".TargetID": self.muc_id})
self.channel_text = Channel(self.conn.dbus_proxy.bus_name, chan_path)
self.self_handle = self.channel_text[CHANNEL_INTERFACE_GROUP].GetSelfHandle()
self.channel_text[CHANNEL_INTERFACE_GROUP].connect_to_signal(
"MembersChanged", self.text_channel_members_changed_cb)
if self.self_handle in self.channel_text[CHANNEL_INTERFACE_GROUP].GetMembers():
self.joined = True
self.muc_joined()
def new_channels_cb(self, channels):
if self.tube is not None:
return
for path, props in channels:
if props[CHANNEL_INTERFACE + ".ChannelType"] == CHANNEL_TYPE_DBUS_TUBE:
self.tube = Channel(self.conn.dbus_proxy.bus_name, path)
self.tube[CHANNEL_INTERFACE_TUBE].connect_to_signal(
"TubeChannelStateChanged", self.tube_channel_state_changed_cb)
self.tube[CHANNEL_INTERFACE].connect_to_signal(
"Closed", self.tube_closed_cb)
self.got_tube(props)
def got_tube(self, props):
initiator_id = props[CHANNEL_INTERFACE + ".InitiatorID"]
service = props[CHANNEL_TYPE_DBUS_TUBE + ".ServiceName"]
state = self.tube[PROPERTIES_IFACE].Get(CHANNEL_INTERFACE_TUBE, 'State')
print "new D-Bus tube offered by %s. Service: %s. State: %s" % (
initiator_id, service, tube_state[state])
def tube_opened (self):
group_iface = self.channel_text[CHANNEL_INTERFACE_GROUP]
tube_conn = TubeConnection(self.conn, self.tube, self.tube_addr,
group_iface=group_iface)
self.test = Test(tube_conn, self.conn)
def tube_channel_state_changed_cb(self, state):
print "tube state changed:", tube_state[state]
if state == TUBE_CHANNEL_STATE_OPEN:
self.tube_opened()
def tube_closed_cb(self):
print "tube closed", id
def text_channel_members_changed_cb(self, message, added, removed,
local_pending, remote_pending, actor, reason):
if self.self_handle in added and not self.joined:
self.joined = True
self.muc_joined()
def muc_joined(self):
pass
class InitiatorClient(Client):
def __init__(self, account_file, muc_id):
Client.__init__(self, account_file, muc_id)
def ready_cb(self, conn):
Client.ready_cb(self, conn)
self.join_muc()
def muc_joined(self):
Client.muc_joined(self)
print "muc joined. Create the tube"
self.conn[CONNECTION_INTERFACE_REQUESTS].CreateChannel({
CHANNEL_INTERFACE + ".ChannelType": CHANNEL_TYPE_DBUS_TUBE,
CHANNEL_INTERFACE + ".TargetHandleType": CONNECTION_HANDLE_TYPE_ROOM,
CHANNEL_INTERFACE + ".TargetID": self.muc_id,
CHANNEL_TYPE_DBUS_TUBE + ".ServiceName": SERVICE})
def got_tube(self, props):
Client.got_tube(self, props)
params = dbus.Dictionary({"login": "badger", "a_int" : 69},
signature='sv')
print "Offer tube"
self.tube_addr = self.tube[CHANNEL_TYPE_DBUS_TUBE].Offer(params,
SOCKET_ACCESS_CONTROL_CREDENTIALS)
def tube_opened (self):
Client.tube_opened(self)
self._emit_test_signal();
gobject.timeout_add (20000, self._emit_test_signal)
def _emit_test_signal (self):
print "emit Hello"
self.test.Hello()
return True
class JoinerClient(Client):
def __init__(self, account_file, muc_id):
Client.__init__(self, account_file, muc_id)
def ready_cb(self, conn):
Client.ready_cb(self, conn)
self.join_muc()
def got_tube(self, props):
Client.got_tube(self, props)
print "Accept tube"
self.tube_addr = self.tube[CHANNEL_TYPE_DBUS_TUBE].Accept(SOCKET_ACCESS_CONTROL_CREDENTIALS)
def tube_opened (self):
Client.tube_opened(self)
self.test.tube.add_signal_receiver(self.hello_cb, 'Hello', IFACE,
path=PATH, sender_keyword='sender')
def hello_cb (self, sender=None):
sender_handle = self.test.tube.bus_name_to_handle[sender]
sender_id = self.conn[CONN_INTERFACE].InspectHandles(
CONNECTION_HANDLE_TYPE_CONTACT, [sender_handle])[0]
self_id = self.conn[CONN_INTERFACE].InspectHandles(
CONNECTION_HANDLE_TYPE_CONTACT, [self.self_handle])[0]
print "Hello from %s" % sender
text = "I'm %s and thank you for your hello" % self_id
self.test.tube.get_object(sender, PATH).Say(text, dbus_interface=IFACE)
class Test(Object):
def __init__(self, tube, conn):
super(Test, self).__init__(tube, PATH)
self.tube = tube
self.conn = conn
@signal(dbus_interface=IFACE, signature='')
def Hello(self):
pass
@method(dbus_interface=IFACE, in_signature='s', out_signature='b')
def Say(self, text):
print "I say: %s" % text
return True
def usage():
print "python %s [account-file] [muc]\n" \
"python %s [account-file] [muc] --initiator"\
% (sys.argv[0], sys.argv[0])
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 2:
client = JoinerClient(args[0], args[1])
elif len(args) == 3 and args[2] == '--initiator':
client = InitiatorClient(args[0], args[1])
else:
usage()
sys.exit(0)
client.run()
| lgpl-2.1 | 2,674,781,851,305,746,000 | 31.901961 | 100 | 0.617759 | false |
garinh/cs | docs/support/docutils/languages/ca.py | 1 | 1993 | # Author: Ivan Vilata i Balaguer
# Contact: [email protected]
# Revision: $Revision: 21817 $
# Date: $Date: 2005-07-21 13:39:57 -0700 (Thu, 21 Jul 2005) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Catalan-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': u'Autor',
'authors': u'Autors',
'organization': u'Organitzaci\u00F3',
'address': u'Adre\u00E7a',
'contact': u'Contacte',
'version': u'Versi\u00F3',
'revision': u'Revisi\u00F3',
'status': u'Estat',
'date': u'Data',
'copyright': u'Copyright',
'dedication': u'Dedicat\u00F2ria',
'abstract': u'Resum',
'attention': u'Atenci\u00F3!',
'caution': u'Compte!',
'danger': u'PERILL!',
'error': u'Error',
'hint': u'Suggeriment',
'important': u'Important',
'note': u'Nota',
'tip': u'Consell',
'warning': u'Av\u00EDs',
'contents': u'Contingut'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
u'autor': 'author',
u'autors': 'authors',
u'organitzaci\u00F3': 'organization',
u'adre\u00E7a': 'address',
u'contacte': 'contact',
u'versi\u00F3': 'version',
u'revisi\u00F3': 'revision',
u'estat': 'status',
u'data': 'date',
u'copyright': 'copyright',
u'dedicat\u00F2ria': 'dedication',
u'resum': 'abstract'}
"""Catalan (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| lgpl-2.1 | -5,671,412,734,274,300,000 | 31.145161 | 76 | 0.622679 | false |
nathanbjenx/cairis | cairis/bin/cairisd.py | 1 | 1541 | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'Shamal Faily'
import os
import sys
from cairis.daemon import create_app, db
from cairis.daemon.CairisHTTPError import CairisHTTPError
from flask_script import Manager, Server, Command
app = create_app()
manager = Manager(app)
manager.add_command('runserver', Server(host='0.0.0.0', port=7071))
@app.after_request
def apply_caching(response):
response.headers["X-Frame-Options"] = "SAMEORIGIN"
return response
class TestClient(Command):
def run(self):
app.test_client()
manager.add_command('testclient', TestClient())
def main(args):
manager.run()
if __name__ == '__main__':
try:
main(sys.argv)
except CairisHTTPError as e:
print('Fatal CAIRIS error: ' + str(e))
sys.exit(-1)
| apache-2.0 | -5,118,065,886,492,460,000 | 29.215686 | 67 | 0.73329 | false |
ednapiranha/snapshots-from-here | snapshots/snappy.py | 1 | 11535 | # -*- coding: utf-8 -*-
import base64
import os
import random
import time
from auto_tagify import AutoTagify
from boto.s3.key import Key
from PIL import Image
from pymongo import DESCENDING
from pymongo.objectid import ObjectId
import settings
CONTENT_TYPE = 'image/jpeg'
ATAG = AutoTagify()
ATAG.link = "/tag"
RECENT_LIMIT = 12
class Snappy(object):
"""All the snapshot functionality"""
def __init__(self):
self.token = ''
self.env = 'dev'
self.db = settings.DATABASE
def set_environment(self, env='dev'):
if env == 'test':
self.env = env
self.db = settings.TEST_DATABASE
def get_or_create_email(self, email):
"""Find the email address in the system
or create it if it doesn't exist.
"""
email = email.lower().strip()
if not self.db.users.find_one({"email":email}):
self.db.users.update({"email":email},
{"$set":{"token":self._generate_token(email)}},
upsert=True)
emailer = self.db.users.find_one({"email":email})
self.token = emailer['token']
return emailer
def get_user_by_id(self, id):
"""Find a user by id."""
return self.db.users.find_one({"_id":ObjectId(id)})
def get_user_by_token(self, sender_token):
"""Find a user by token."""
return self.db.users.find_one({"token":sender_token})
def update_profile(self, email, **kwargs):
"""Update profile information."""
profile = {}
for key in kwargs:
profile[key] = str(kwargs[key]).strip()
self.db.users.update({"email":email},
{"$set":profile})
def _generate_token(self, email):
"""Generate a token based on the timestamp and the user's
email address.
"""
random_int = str(random.randrange(100, 10000))
token_string = '%s%s%s' % (random_int,
email,
str(int(time.time())))
return base64.b64encode(token_string)
def upload(self, description, filename, sender_token):
"""Upload the image to the user's account. Also, autotag the
description.
"""
image_full_path = os.path.join('tmp/', filename + '_original')
image_full_path_medium = os.path.join('tmp/', filename + '_medium')
image_full_path_thumb = os.path.join('tmp/', filename + '_thumb')
aws_key = Key(settings.BUCKET)
aws_key.key = filename + '_original.jpg'
aws_key.set_contents_from_filename(image_full_path,
headers={'Content-Type': CONTENT_TYPE})
image_full_path_original = '%s%s_original.jpg' % (settings.IMAGE_URL,
filename)
aws_key.key = filename + '_thumb.jpg'
aws_key.set_contents_from_filename(image_full_path_thumb,
headers={'Content-Type': CONTENT_TYPE})
image_full_path_thumb = '%s%s_thumb.jpg' % (settings.IMAGE_URL, filename)
aws_key.key = filename + '_medium.jpg'
aws_key.set_contents_from_filename(image_full_path_medium,
headers={'Content-Type': CONTENT_TYPE})
image_full_path_medium = '%s%s_medium.jpg' % (settings.IMAGE_URL,
filename)
ATAG.text = description
tagged_description = ATAG.generate()
self.db.photos.update({"image_filename":filename},
{"$set":{"description":description,
"tagged_description":tagged_description,
"tags":ATAG.tag_list(),
"image_original":image_full_path_original,
"image_thumb":image_full_path_thumb,
"image_medium":image_full_path_medium,
"token":sender_token,
"created_at":int(time.time())}},
upsert=True)
ATAG.text = ''
return self.db.photos.find_one({"image_filename":filename})
def get_email(self, sender_token):
"""Get the user's email by their token."""
return self.db.users.find_one({"token":sender_token})['email']
def update_description(self, image_id, description):
"""Update the description for the image."""
ATAG.text = description
tagged_description = ATAG.generate()
self.db.photos.update({"_id":ObjectId(image_id)},
{"$set":{"description":description,
"tagged_description":tagged_description,
"tags":ATAG.tag_list()}})
ATAG.text = ''
def get_recent(self, page=0, nav='next'):
"""Get all recently uploaded images. Navigation defaults at the next
image created (descending). If navigation is set to 'prev', we go in the
reverse direction.
"""
photos = self.db.photos.find().sort("created_at", DESCENDING)
page = self._set_page(photos, page, nav)
try:
return photos.skip(page*1).limit(1)[0]
except IndexError:
return self.db.photos.find().sort("created_at").limit(1)[0]
def get_recent_by_user(self, sender_token, page=0, nav='next'):
"""Get all recently uploaded images by a user. Navigation defaults at the
next image created (descending). If navigation is set to 'prev', we go in
the reverse direction.
"""
photos = self.db.photos.find({"token":sender_token}).sort("created_at", DESCENDING)
page = self._set_page(photos, page, nav)
try:
return photos.skip(page*1).limit(1)[0]
except IndexError:
return self.db.photos.find().sort("created_at").limit(1)[0]
def get_recent_tag(self, tag=None, page=0, nav='next'):
"""Get all recently uploaded images matching this tag. Navigation
defaults at the next image created (descending). If navigation is set to
'prev', we go in the reverse direction.
"""
photos = self.db.photos.find({"tags":tag}).sort("created_at", DESCENDING)
page = self._set_page(photos, page, nav)
try:
return photos.skip(page*1).limit(1)[0]
except IndexError:
return self.db.photos.find().sort("created_at").limit(1)[0]
def get_photo_count(self, tag=None):
"""Get the total number of photos. If a tag is specified,
get the total number with that tag.
"""
if tag:
return self.db.photos.find({"tags":tag}).count() - 1
else:
return self.db.photos.count() - 1
def get_photo_count_by_user(self, sender_token):
"""Get the total number of photos for a user.
"""
return self.db.photos.find({"token":sender_token}).count() - 1
def get_image(self, image_id):
"""Return the image matching the given id."""
return self.db.photos.find_one({"_id":ObjectId(image_id)})
def get_latest_snapshots(self, sender_token):
"""Get the last 12 images from this user."""
return self.db.photos.find({"token":
sender_token}).sort("created_at", DESCENDING).limit(RECENT_LIMIT)
def get_latest_favorites(self, sender_token):
"""Get the last 12 favorites from this user."""
favorites = self.db.favorites.find({"token":
sender_token}).sort("created_at", DESCENDING).limit(RECENT_LIMIT)
photos = []
for favorite in favorites:
photos.append(self.db.photos.find_one({"_id": favorite['image_id']}))
return photos
def get_image_by_user(self, image_id, sender_token):
"""Return an image matching the given id and user."""
return self.db.photos.find_one({"_id":ObjectId(image_id),
"token":sender_token})
def delete_image(self, image_id, sender_token):
"""Delete the image matching the given id and user."""
photo = self.db.photos.find_one({"_id":ObjectId(image_id),
"token":sender_token})
settings.BUCKET.delete_keys((photo['image_filename'] + '_thumb.jpg',
photo['image_filename'] + '_medium.jpg',
photo['image_filename'] + '_original.jpg'))
self.db.photos.remove({"_id":ObjectId(image_id)})
self.db.comments.remove({"image_id":ObjectId(image_id)})
self.db.favorites.remove({"image_id":ObjectId(image_id)})
def favorited(self, image_id, sender_token):
"""Toggled favorite/unfavorite of an image."""
photo = self.db.favorites.find_one({"image_id":ObjectId(image_id),
"token":sender_token})
if photo is None:
# favorite
self.db.favorites.update({"image_id":ObjectId(image_id)},
{"$set":{"token":sender_token,
"created_at":int(time.time())}},
upsert=True)
return True
else:
# unfavorite
self.db.favorites.remove({"_id":ObjectId(photo['_id'])})
return False
def is_favorited(self, image_id, sender_token):
"""Check to see if an image was favorited."""
photo = self.db.favorites.find_one({"image_id":ObjectId(image_id),
"token":sender_token})
if photo is None:
return False
return True
def add_comment(self, image_id, sender_token, description):
"""Add a comment."""
if len(description.strip()) < 1:
return False
else:
user = self.db.users.find_one({"token":sender_token})
comment = self.db.comments.save({"image_id":ObjectId(image_id),
"token":sender_token,
"email":user['email'],
"full_name":user['full_name'],
"description":description,
"created_at":int(time.time())})
return self.db.comments.find_one({"_id":ObjectId(comment)})
def get_comments(self, image_id):
"""Get all comments for this image."""
return self.db.comments.find({"image_id":ObjectId(
image_id)}).sort("created_at", DESCENDING)
def delete_comment(self, comment_id, sender_token):
"""Delete a comment that you wrote."""
self.db.comments.remove({"_id":ObjectId(comment_id),
"token":sender_token})
def _set_page(self, photos, page, nav):
"""Set the page and nav values."""
page = int(page)
if nav == 'next' and photos.count() > 1:
if page > photos.count() - 1:
page = photos.count() - 1
elif nav == 'prev':
if page < 0:
page = 0
else:
page = 0
return int(page)
| bsd-3-clause | 7,200,109,346,975,077,000 | 40.642599 | 91 | 0.525704 | false |
Mikescher/Project-Euler_Befunge | compiled/Python2/Euler_Problem-046.py | 1 | 4454 | #!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("AR+LCAAAAAAABADt2k1Lw0AQBuC/st20l4S4k020zVAWL5716CGkRS0LoriI7qk/3lk/oNraFooo8j6wgelMs5uZ5taYJSotY9TF48Pt4vpJnT3fLR5VmeKru8W9ak7U"
+ "H5cBAAAAAAAAAAAAAAAAAAAAAAAA/AO/+V88Z3cUxNViii5GFagtPLW55UDjsvM0TiFPf/Kcn84Risp4qrqRXLjT53qqhvHbb75ZPV17GsIkU9E0HxnHLfliTH4+UEun"
+ "b7TNuaKgrWZLIW8o2JrCrj3eOJJSVR/LZVP2mLwtWLIsO/hRJxdTFV4rXWb9rHdVSvpykMXZesVASlK6K61h6o/G5Mg7TgeXjOMlm0bGY6wMZBoDTfJJnjOnNQlVFSpi"
+ "uc1ek5r12dxT7anZWN7v1YvXJ95d0rteOZmANS2FxkgwlIjzUpoQldOXupZwpRlBYier4EaaMX+/jVQP06fSQhnBWvOWm7Ye7jra3s+51ZaW919+zkqnV2nLvgfO40BT"
+ "9QKcPtfBiCwAAA==")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<200 and y<57):
return g[y*200 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<200 and y<57):
g[y*200 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(1,0,200)
gw(2,0,50)
gw(4,0,10000)
gw(3,0,2)
return 1
def _1():
gw(0,1,32)
gw(1,1,32)
gw(8,0,1073741824)
gw(tm(gr(3,0),gr(1,0)),(td(gr(3,0),gr(1,0)))+1,88)
sa(gr(3,0)+gr(3,0))
sa((1)if((gr(3,0)+gr(3,0))<gr(4,0))else(0))
return 2
def _2():
return (25)if(sp()!=0)else(3)
def _3():
sp();
return 4
def _4():
global t0
sa(gr(3,0)+1)
sa(gr(3,0)+1)
gw(3,0,gr(3,0)+1)
sa(tm(sp(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+1)
v0=sp()
t0=gr(sp(),v0)
t0=t0-32
return 5
def _5():
global t0
return (6)if((t0)!=0)else(4)
def _6():
return (1)if(gr(4,0)>gr(3,0))else(7)
def _7():
gw(3,0,0)
gw(5,0,3)
return 8
def _8():
global t0
sa(gr(5,0)+2)
sa(gr(5,0)+2)
sa(gr(5,0)+2)
gw(5,0,gr(5,0)+2)
sa(tm(sp(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+1)
v0=sp()
t0=gr(sp(),v0)
t0=t0-32
return (9)if((t0)!=0)else(10)
def _9():
sa(79)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+1)
v0=sp()
v1=sp()
gw(v1,v0,sp())
return 8
def _10():
sp();
sa(3)
sa(3-gr(5,0))
return 11
def _11():
return (12)if(sp()!=0)else(24)
def _12():
global t0
sa(sr());
sa(tm(sr(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+1)
v0=sp()
t0=gr(sp(),v0)
t0=t0-32
return (14)if((t0)!=0)else(13)
def _13():
sa(sp()+1)
sa(sr()-gr(5,0))
return 11
def _14():
global t0
t0=gr(5,0)
gw(9,0,0)
sa(sr());
sa(t0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
v0=sp()
sa(sp()-v0)
sa(sp()/2);
sa(sr());
gw(7,0,sp())
sa(gr(8,0))
sa((1)if(gr(8,0)>gr(7,0))else(0))
return 15
def _15():
return (23)if(sp()!=0)else(16)
def _16():
sa(sr());
return 17
def _17():
return (20)if(sp()!=0)else(18)
def _18():
sp();
sa(sp()-(gr(9,0)*gr(9,0)))
return (13)if(sp()!=0)else(19)
def _19():
sp();
return 8
def _20():
return (21)if((sr()+gr(9,0))>gr(7,0))else(22)
def _21():
gw(9,0,gr(9,0)/2)
sa(sp()/4);
sa(sr());
return 17
def _22():
global t0
global t1
global t2
t0=sr()+gr(9,0)
t1=gr(7,0)
t2=t1-t0
gw(7,0,t2)
gw(9,0,(sr()*2)+gr(9,0))
gw(9,0,gr(9,0)/2)
sa(sp()/4);
return 16
def _23():
sa(sp()/4);
sa((1)if(sr()>gr(7,0))else(0))
return 15
def _24():
sys.stdout.write(str(sp())+" ")
sys.stdout.flush()
return 26
def _25():
sa(sr());
sa(32)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(1,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,0)))
sa(sp()+1)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()+gr(3,0))
sa((1)if(sr()<gr(4,0))else(0))
return 2
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25]
c=0
while c<26:
c=m[c]()
| mit | -5,779,677,972,069,626,000 | 17.032389 | 136 | 0.504939 | false |
FedoraScientific/salome-paravis | test/VisuPrs/MeshPresentation/E4.py | 1 | 1519 | # Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# This case corresponds to: /visu/MeshPresentation/E4 case
# Create Mesh Presentation for all data of the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("MeshPresentation/E4")
file = datadir + "champc.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.MESH], picturedir, pictureext)
| lgpl-2.1 | -5,568,984,066,035,110,000 | 37.948718 | 81 | 0.736011 | false |
anantag/twitterAPIHack | twitterstream.py | 1 | 1840 | import oauth2 as oauth
import urllib2 as urllib
# See Assignment 1 instructions or README for how to get these credentials
access_token_key = "55773725-mKHWq6Fyj2TmqR6xPiBamIw2EYb4B4O95CWXYJJZW"
access_token_secret = "OAw65RNmhHsXTyFIHSZod39nFRwkTdStfTmn5YB0oM"
consumer_key = "N1o286mxPcWFofmwhZqOow"
consumer_secret = "urHTpu960jcRwOhhH9GYIg3iL7l2M58vZn0V57qgfJE"
_debug = 0
oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
http_method = "GET"
http_handler = urllib.HTTPHandler(debuglevel=_debug)
https_handler = urllib.HTTPSHandler(debuglevel=_debug)
'''
Construct, sign, and open a twitter request
using the hard-coded credentials above.
'''
def twitterreq(url, method, parameters):
req = oauth.Request.from_consumer_and_token(oauth_consumer,
token=oauth_token,
http_method=http_method,
http_url=url,
parameters=parameters)
req.sign_request(signature_method_hmac_sha1, oauth_consumer, oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
opener = urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
response = opener.open(url, encoded_post_data)
return response
def fetchsamples():
url = "https://stream.twitter.com/1/statuses/sample.json"
parameters = []
response = twitterreq(url, "GET", parameters)
for line in response:
print line.strip()
if __name__ == '__main__':
fetchsamples()
| gpl-2.0 | -1,027,548,437,362,856,700 | 29.163934 | 78 | 0.677174 | false |
m00dawg/holland | tests/test_config.py | 1 | 1300 | import os
import shutil
import tempfile
import unittest
from holland.core.config import hollandcfg, setup_config
class TestHollandConfig(unittest.TestCase):
def setUp(self):
test_cfg = """
[holland]
plugin_dirs = /usr/share/holland/plugins
backupsets = default
umask = 0007
path = /bin:/usr/bin:/usr/local/bin:/usr/local/sbin:/usr/local/mysql/bin
[logging]
level = info
filename = /dev/null
"""
self.tmpdir = tempfile.mkdtemp()
path = os.path.join(self.tmpdir, 'holland.conf')
open(path, 'w').write(test_cfg)
setup_config(path)
def test_globalconfig(self):
import logging
cfgentry_tests = {
'holland.plugin-dirs' : ['/usr/share/holland/plugins'],
'holland.umask' : int('0007', 8),
'holland.path' : '/bin:/usr/bin:/usr/local/bin:/usr/local/sbin:/usr/local/mysql/bin',
'logging.level' : logging.INFO,
'logging.filename' : '/dev/null'
}
for key, value in cfgentry_tests.items():
self.assertEqual(hollandcfg.lookup(key), value)
def test_backupset(self):
pass
def test_provider(self):
pass
def tearDown(self):
shutil.rmtree(self.tmpdir)
| bsd-3-clause | 9,090,616,190,800,493,000 | 27.26087 | 97 | 0.586923 | false |
yasutaka/nlp_100 | kiyota/13.py | 1 | 1244 | """
hightemp.txtは,日本の最高気温の記録を「都道府県」「地点」「℃」「日」のタブ区切り形式で
格納したファイルである.以下の処理を行うプログラムを作成し,hightemp.txtを入力ファイルとして
実行せよ.さらに,同様の処理をUNIXコマンドでも実行し,プログラムの実行結果を確認せよ.
13. col1.txtとcol2.txtをマージ
12で作ったcol1.txtとcol2.txtを結合し,元のファイルの1列目と2列目をタブ区切りで並べたテキスト
ファイルを作成せよ.確認にはpasteコマンドを用いよ
"""
# -*- coding: utf-8 -*-
import codecs
# ファイル読み込み
with codecs.open('col1.txt','br','utf-8') as f1:
data1 = f1.readlines()
with codecs.open('col2.txt','br','utf-8') as f2:
data2 = f2.readlines()
with codecs.open('merge.txt','bw','utf-8') as f3:
for col1,col2 in zip(data1,data2):
f3.write("\t".join([col1.rstrip(),col2]))
# note
# UNIX
# pasteコマンド:複数のファイルを行単位に連結
# $ paste "C:\Users\riekok.ZIPANGU\Documents\nlp_100\kiyota\Python\col1.txt" "C:\Users\riekok.ZIPANGU\Documents\nlp_100\kiyota\Python\col2.txt"
| mit | 8,806,087,318,852,099,000 | 29.769231 | 145 | 0.700969 | false |
diorcety/translate | translate/convert/ical2po.py | 1 | 4592 | # -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert iCalendar files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ical2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import ical, po
class ical2po(object):
"""Convert one or two iCalendar files to a single PO file."""
SourceStoreClass = ical.icalfile
TargetStoreClass = po.pofile
TargetUnitClass = po.pounit
def __init__(self, input_file, output_file, template_file=None,
blank_msgstr=False, duplicate_style="msgctxt"):
"""Initialize the converter."""
self.blank_msgstr = blank_msgstr
self.duplicate_style = duplicate_style
self.extraction_msg = None
self.output_file = output_file
self.source_store = self.SourceStoreClass(input_file)
self.target_store = self.TargetStoreClass()
self.template_store = None
if template_file is not None:
self.template_store = self.SourceStoreClass(template_file)
def convert_unit(self, unit):
"""Convert a source format unit to a target format unit."""
target_unit = self.TargetUnitClass(encoding="UTF-8")
target_unit.addlocation("".join(unit.getlocations()))
target_unit.addnote(unit.getnotes("developer"), "developer")
target_unit.source = unit.source
target_unit.target = ""
return target_unit
def convert_store(self):
"""Convert a single source format file to a target format file."""
self.extraction_msg = "extracted from %s" % self.source_store.filename
for source_unit in self.source_store.units:
self.target_store.addunit(self.convert_unit(source_unit))
def merge_stores(self):
"""Convert two source format files to a target format file."""
self.extraction_msg = ("extracted from %s, %s" %
(self.template_store.filename,
self.source_store.filename))
self.source_store.makeindex()
for template_unit in self.template_store.units:
target_unit = self.convert_unit(template_unit)
template_unit_name = "".join(template_unit.getlocations())
add_translation = (
not self.blank_msgstr and
template_unit_name in self.source_store.locationindex)
if add_translation:
source_unit = self.source_store.locationindex[template_unit_name]
target_unit.target = source_unit.source
self.target_store.addunit(target_unit)
def run(self):
"""Run the converter."""
if self.template_store is None:
self.convert_store()
else:
self.merge_stores()
if self.extraction_msg:
self.target_store.header().addnote(self.extraction_msg,
"developer")
self.target_store.removeduplicates(self.duplicate_style)
if self.target_store.isempty():
return 0
self.target_store.serialize(self.output_file)
return 1
def run_converter(input_file, output_file, template_file=None, pot=False,
duplicatestyle="msgctxt"):
"""Wrapper around converter."""
return ical2po(input_file, output_file, template_file, blank_msgstr=pot,
duplicate_style=duplicatestyle).run()
formats = {
"ics": ("po", run_converter),
("ics", "ics"): ("po", run_converter),
}
def main(argv=None):
parser = convert.ConvertOptionParser(formats, usetemplates=True,
usepots=True, description=__doc__)
parser.add_duplicates_option()
parser.passthrough.append("pot")
parser.run(argv)
if __name__ == '__main__':
main()
| gpl-2.0 | 6,030,933,143,855,780,000 | 34.596899 | 94 | 0.640679 | false |
wozz/electrum-myr | gui/qt/main_window.py | 1 | 109709 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, time, datetime, re, threading
from electrum_myr.i18n import _, set_language
from electrum_myr.util import print_error, print_msg
import os.path, json, ast, traceback
import webbrowser
import shutil
import StringIO
import PyQt4
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum_myr.bitcoin import MIN_RELAY_TX_FEE, is_valid
from electrum_myr.plugins import run_hook
import icons_rc
from electrum_myr.util import format_satoshis
from electrum_myr import Transaction
from electrum_myr import mnemonic
from electrum_myr import util, bitcoin, commands, Interface, Wallet
from electrum_myr import SimpleConfig, Wallet, WalletStorage
from electrum_myr import Imported_Wallet
from amountedit import AmountEdit, BTCAmountEdit, MyLineEdit
from network_dialog import NetworkDialog
from qrcodewidget import QRCodeWidget, QRDialog
from qrtextedit import QRTextEdit
from decimal import Decimal
import httplib
import socket
import webbrowser
import csv
# status of payment requests
PR_UNPAID = 0
PR_EXPIRED = 1
PR_SENT = 2 # sent but not propagated
PR_PAID = 3 # send and propagated
PR_ERROR = 4 # could not parse
from electrum_myr import ELECTRUM_VERSION
import re
from util import MyTreeWidget, HelpButton, EnterButton, line_dialog, text_dialog, ok_cancel_buttons, close_button, WaitingDialog
from util import filename_field, ok_cancel_buttons2, address_field
from util import MONOSPACE_FONT
def format_status(x):
if x == PR_UNPAID:
return _('Unpaid')
elif x == PR_PAID:
return _('Paid')
elif x == PR_EXPIRED:
return _('Expired')
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(func)
self.func = func
self.setIconSize(QSize(25,25))
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Return:
apply(self.func,())
default_column_widths = { "history":[40,140,350,140], "contacts":[350,330], "receive": [370,200,130] }
class ElectrumWindow(QMainWindow):
labelsChanged = pyqtSignal()
def __init__(self, config, network, gui_object):
QMainWindow.__init__(self)
self.config = config
self.network = network
self.gui_object = gui_object
self.tray = gui_object.tray
self.go_lite = gui_object.go_lite
self.lite = None
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.num_zeros = int(config.get('num_zeros',0))
self.invoices = {}
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.column_widths = self.config.get("column_widths_2", default_column_widths )
tabs.addTab(self.create_history_tab(), _('History') )
tabs.addTab(self.create_send_tab(), _('Send') )
tabs.addTab(self.create_receive_tab(), _('Receive') )
tabs.addTab(self.create_addresses_tab(), _('Addresses') )
tabs.addTab(self.create_contacts_tab(), _('Contacts') )
tabs.addTab(self.create_invoices_tab(), _('Invoices') )
tabs.addTab(self.create_console_tab(), _('Console') )
tabs.setMinimumSize(600, 400)
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
g = self.config.get("winpos-qt",[100, 100, 840, 400])
self.setGeometry(g[0], g[1], g[2], g[3])
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum-myr.png"))
self.init_menubar()
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: tabs.setCurrentIndex( (tabs.currentIndex() - 1 )%tabs.count() ))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: tabs.setCurrentIndex( (tabs.currentIndex() + 1 )%tabs.count() ))
for i in range(tabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: tabs.setCurrentIndex(i))
self.connect(self, QtCore.SIGNAL('update_status'), self.update_status)
self.connect(self, QtCore.SIGNAL('banner_signal'), lambda: self.console.showMessage(self.network.banner) )
self.connect(self, QtCore.SIGNAL('transaction_signal'), lambda: self.notify_transactions() )
self.connect(self, QtCore.SIGNAL('payment_request_ok'), self.payment_request_ok)
self.connect(self, QtCore.SIGNAL('payment_request_error'), self.payment_request_error)
self.labelsChanged.connect(self.update_tabs)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network.register_callback('updated', lambda: self.need_update.set())
self.network.register_callback('banner', lambda: self.emit(QtCore.SIGNAL('banner_signal')))
self.network.register_callback('status', lambda: self.emit(QtCore.SIGNAL('update_status')))
self.network.register_callback('new_transaction', lambda: self.emit(QtCore.SIGNAL('transaction_signal')))
self.network.register_callback('stop', self.close)
# set initial message
self.console.showMessage(self.network.banner)
self.wallet = None
self.payment_request = None
self.qr_window = None
self.not_enough_funds = False
def update_account_selector(self):
# account selector
accounts = self.wallet.get_account_names()
self.account_selector.clear()
if len(accounts) > 1:
self.account_selector.addItems([_("All accounts")] + accounts.values())
self.account_selector.setCurrentIndex(0)
self.account_selector.show()
else:
self.account_selector.hide()
def close_wallet(self):
self.wallet.stop_threads()
run_hook('close_wallet')
def load_wallet(self, wallet):
import electrum_myr as electrum
self.wallet = wallet
self.update_wallet_format()
# address used to create a dummy transaction and estimate transaction fee
self.dummy_address = self.wallet.addresses(False)[0]
self.invoices = self.wallet.storage.get('invoices', {})
self.accounts_expanded = self.wallet.storage.get('accounts_expanded',{})
self.current_account = self.wallet.storage.get("current_account", None)
title = 'Electrum-MYR ' + self.wallet.electrum_version + ' - ' + self.wallet.storage.path
if self.wallet.is_watching_only(): title += ' [%s]' % (_('watching only'))
self.setWindowTitle( title )
self.update_wallet()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
self.update_account_selector()
# update menus
self.new_account_menu.setEnabled(self.wallet.can_create_accounts())
self.private_keys_menu.setEnabled(not self.wallet.is_watching_only())
self.password_menu.setEnabled(self.wallet.can_change_password())
self.seed_menu.setEnabled(self.wallet.has_seed())
self.mpk_menu.setEnabled(self.wallet.is_deterministic())
self.import_menu.setEnabled(self.wallet.can_import())
self.export_menu.setEnabled(self.wallet.can_export())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.update_receive_tab()
run_hook('load_wallet', wallet)
def update_wallet_format(self):
# convert old-format imported keys
if self.wallet.imported_keys:
password = self.password_dialog(_("Please enter your password in order to update imported keys"))
try:
self.wallet.convert_imported_keys(password)
except:
self.show_message("error")
def open_wallet(self):
wallet_folder = self.wallet.storage.path
filename = unicode( QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) )
if not filename:
return
storage = WalletStorage({'wallet_path': filename})
if not storage.file_exists:
self.show_message("file not found "+ filename)
return
# close current wallet
self.close_wallet()
# load new wallet
wallet = Wallet(storage)
wallet.start_threads(self.network)
self.load_wallet(wallet)
def backup_wallet(self):
import shutil
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename = unicode( QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) )
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
QMessageBox.information(None,"Wallet backup created", _("A copy of your wallet file was created in")+" '%s'" % str(new_path))
except (IOError, os.error), reason:
QMessageBox.critical(None,"Unable to create backup", _("Electrum was unable to copy your wallet file to the specified location.")+"\n" + str(reason))
def new_wallet(self):
import installwizard
wallet_folder = os.path.dirname(self.wallet.storage.path)
i = 1
while True:
filename = "wallet_%d"%i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
filename = line_dialog(self, _('New Wallet'), _('Enter file name') + ':', _('OK'), filename)
if not filename:
return
full_path = os.path.join(wallet_folder, filename)
storage = WalletStorage({'wallet_path': full_path})
if storage.file_exists:
QMessageBox.critical(None, "Error", _("File exists"))
return
wizard = installwizard.InstallWizard(self.config, self.network, storage)
wallet = wizard.run('new')
if wallet:
self.load_wallet(wallet)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&New contact"), self.new_contact_dialog)
self.new_account_menu = wallet_menu.addAction(_("&New account"), self.new_account_dialog)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.mpk_menu = wallet_menu.addAction(_("&Master Public Keys"), self.show_master_public_keys)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
wallet_menu.addAction(_("&Export History"), self.export_history_dialog)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.run_network_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
csv_transaction_menu = tools_menu.addMenu(_("&Create transaction"))
csv_transaction_menu.addAction(_("&From CSV file"), self.do_process_from_csv_file)
csv_transaction_menu.addAction(_("&From CSV text"), self.do_process_from_csv_text)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://myr.electr.us"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://myr.electr.us/documentation.html")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
self.setMenuBar(menubar)
def show_about(self):
QMessageBox.about(self, "Electrum-MYR",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Myriadcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Myriadcoin system."))
def show_report_bug(self):
QMessageBox.information(self, "Electrum-MYR - " + _("Reporting Bugs"),
_("Please report any bugs as issues on github:")+" <a href=\"https://github.com/cryptorepaircrew/electrum-myr/issues\">https://github.com/cryptorepaircrew/electrum-myr/issues</a>")
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
print_error("Notifying GUI")
if len(self.network.pending_transactions_for_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.network.pending_transactions_for_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.network.pending_transactions_for_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received. Total amount received in the new transactions %(amount)s %(unit)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount(total_amount), 'unit' : self.base_unit()})
self.network.pending_transactions_for_notifications = []
else:
for tx in self.network.pending_transactions_for_notifications:
if tx:
self.network.pending_transactions_for_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx)
if(v > 0):
self.notify(_("New transaction received. %(amount)s %(unit)s") % { 'amount' : self.format_amount(v), 'unit' : self.base_unit()})
def notify(self, message):
if self.tray:
self.tray.showMessage("Electrum-MYR", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
fileName = unicode( QFileDialog.getOpenFileName(self, title, directory, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
path = os.path.join( directory, filename )
fileName = unicode( QFileDialog.getSaveFileName(self, title, path, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def close(self):
if self.qr_window:
self.qr_window.close()
QMainWindow.close(self)
run_hook('close_main_window')
def connect_slots(self, sender):
self.connect(sender, QtCore.SIGNAL('timersignal'), self.timer_actions)
self.previous_payto_e=''
def timer_actions(self):
if self.need_update.is_set():
self.update_wallet()
self.need_update.clear()
run_hook('timer_actions')
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'uMYR'
if self.decimal_point == 5:
return 'mMYR'
if self.decimal_point == 8:
return 'MYR'
raise Exception('Unknown base unit')
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_lag = self.network.get_local_height() - self.network.get_server_height()
if not self.wallet.up_to_date:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u = self.wallet.get_account_balance(self.current_account)
text = _( "Balance" ) + ": %s "%( self.format_amount(c) ) + self.base_unit()
if u: text += " [%s unconfirmed]"%( self.format_amount(u,True).strip() )
# append fiat balance and price from exchange rate plugin
r = {}
run_hook('get_fiat_status_text', c+u, r)
quote = r.get(0)
if quote:
text += "%s"%quote
if self.tray:
self.tray.setToolTip(text)
icon = QIcon(":icons/status_connected.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.update_history_tab()
self.update_receive_tab()
self.update_address_tab()
self.update_contacts_tab()
self.update_completions()
self.update_invoices_tab()
def create_history_tab(self):
self.history_list = l = MyTreeWidget(self)
l.setColumnCount(5)
for i,width in enumerate(self.column_widths['history']):
l.setColumnWidth(i, width)
l.setHeaderLabels( [ '', _('Date'), _('Description') , _('Amount'), _('Balance')] )
l.itemDoubleClicked.connect(self.tx_label_clicked)
l.itemChanged.connect(self.tx_label_changed)
l.customContextMenuRequested.connect(self.create_history_menu)
return l
def create_history_menu(self, position):
self.history_list.selectedIndexes()
item = self.history_list.currentItem()
be = self.config.get('block_explorer', 'myriad.theblockexplorer.com')
if be == 'myriad.theblockexplorer.com':
block_explorer = 'http://myriad.theblockexplorer.com:2750/tx/'
if be == 'birdonwheels5.no-ip.org':
block_explorer = 'http://birdonwheels5.no-ip.org/tx/'
if be == 'myr.coinpi.pe':
block_explorer = 'http://myr.coinpi.pe/tx/'
if not item: return
tx_hash = str(item.data(0, Qt.UserRole).toString())
if not tx_hash: return
menu = QMenu()
menu.addAction(_("Copy ID to Clipboard"), lambda: self.app.clipboard().setText(tx_hash))
menu.addAction(_("Details"), lambda: self.show_transaction(self.wallet.transactions.get(tx_hash)))
menu.addAction(_("Edit description"), lambda: self.tx_label_clicked(item,2))
menu.addAction(_("View on block explorer"), lambda: webbrowser.open(block_explorer + tx_hash))
menu.exec_(self.contacts_list.viewport().mapToGlobal(position))
def show_transaction(self, tx):
import transaction_dialog
d = transaction_dialog.TxDialog(tx, self)
d.exec_()
def tx_label_clicked(self, item, column):
if column==2 and item.isSelected():
self.is_edit=True
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.history_list.editItem( item, column )
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.is_edit=False
def tx_label_changed(self, item, column):
if self.is_edit:
return
self.is_edit=True
tx_hash = str(item.data(0, Qt.UserRole).toString())
tx = self.wallet.transactions.get(tx_hash)
text = unicode( item.text(2) )
self.wallet.set_label(tx_hash, text)
if text:
item.setForeground(2, QBrush(QColor('black')))
else:
text = self.wallet.get_default_label(tx_hash)
item.setText(2, text)
item.setForeground(2, QBrush(QColor('gray')))
self.is_edit=False
def edit_label(self, is_recv):
l = self.address_list if is_recv else self.contacts_list
item = l.currentItem()
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
l.editItem( item, 1 )
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
def address_label_clicked(self, item, column, l, column_addr, column_label):
if column == column_label and item.isSelected():
is_editable = item.data(0, 32).toBool()
if not is_editable:
return
addr = unicode( item.text(column_addr) )
label = unicode( item.text(column_label) )
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
l.editItem( item, column )
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
def address_label_changed(self, item, column, l, column_addr, column_label):
if column == column_label:
addr = unicode( item.text(column_addr) )
text = unicode( item.text(column_label) )
is_editable = item.data(0, 32).toBool()
if not is_editable:
return
changed = self.wallet.set_label(addr, text)
if changed:
self.update_history_tab()
self.update_completions()
self.current_item_changed(item)
run_hook('item_changed', item, column)
def current_item_changed(self, a):
run_hook('current_item_changed', a)
def update_history_tab(self):
self.history_list.clear()
for item in self.wallet.get_tx_history(self.current_account):
tx_hash, conf, is_mine, value, fee, balance, timestamp = item
time_str = _("unknown")
if conf > 0:
try:
time_str = datetime.datetime.fromtimestamp( timestamp).isoformat(' ')[:-3]
except Exception:
time_str = _("error")
if conf == -1:
time_str = 'unverified'
icon = QIcon(":icons/unconfirmed.png")
elif conf == 0:
time_str = 'pending'
icon = QIcon(":icons/unconfirmed.png")
elif conf < 6:
icon = QIcon(":icons/clock%d.png"%conf)
else:
icon = QIcon(":icons/confirmed.png")
if value is not None:
v_str = self.format_amount(value, True, whitespaces=True)
else:
v_str = '--'
balance_str = self.format_amount(balance, whitespaces=True)
if tx_hash:
label, is_default_label = self.wallet.get_label(tx_hash)
else:
label = _('Pruned transaction outputs')
is_default_label = False
item = QTreeWidgetItem( [ '', time_str, label, v_str, balance_str] )
item.setFont(2, QFont(MONOSPACE_FONT))
item.setFont(3, QFont(MONOSPACE_FONT))
item.setFont(4, QFont(MONOSPACE_FONT))
if value < 0:
item.setForeground(3, QBrush(QColor("#BC1E1E")))
if tx_hash:
item.setData(0, Qt.UserRole, tx_hash)
item.setToolTip(0, "%d %s\nTxId:%s" % (conf, _('Confirmations'), tx_hash) )
if is_default_label:
item.setForeground(2, QBrush(QColor('grey')))
item.setIcon(0, icon)
self.history_list.insertTopLevelItem(0,item)
self.history_list.setCurrentItem(self.history_list.topLevelItem(0))
run_hook('history_tab_update')
def create_receive_tab(self):
w = QWidget()
grid = QGridLayout(w)
grid.setColumnMinimumWidth(3, 300)
grid.setColumnStretch(5, 1)
self.receive_address_e = QLineEdit()
self.receive_address_e.setReadOnly(True)
grid.addWidget(QLabel(_('Receiving address')), 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, 3)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Message')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, 3)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1, 1, 2)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
grid.addWidget(self.save_request_button, 3, 1)
clear_button = QPushButton(_('New'))
clear_button.clicked.connect(self.new_receive_address)
grid.addWidget(clear_button, 3, 2)
grid.setRowStretch(4, 1)
self.receive_qr = QRCodeWidget(fixedSize=200)
grid.addWidget(self.receive_qr, 0, 4, 5, 2)
self.receive_qr.mousePressEvent = lambda x: self.toggle_qr_window()
grid.setRowStretch(5, 1)
self.receive_requests_label = QLabel(_('Saved Requests'))
self.receive_list = MyTreeWidget(self)
self.receive_list.customContextMenuRequested.connect(self.receive_list_menu)
self.receive_list.currentItemChanged.connect(self.receive_item_changed)
self.receive_list.itemClicked.connect(self.receive_item_changed)
self.receive_list.setHeaderLabels( [_('Address'), _('Message'), _('Amount')] )
self.receive_list.setColumnWidth(0, 340)
h = self.receive_list.header()
h.setStretchLastSection(False)
h.setResizeMode(1, QHeaderView.Stretch)
grid.addWidget(self.receive_requests_label, 6, 0)
grid.addWidget(self.receive_list, 7, 0, 1, 6)
return w
def receive_item_changed(self, item):
if item is None:
return
addr = str(item.text(0))
amount, message = self.receive_requests[addr]
self.receive_address_e.setText(addr)
self.receive_message_e.setText(message)
self.receive_amount_e.setAmount(amount)
def receive_list_delete(self, item):
addr = str(item.text(0))
self.receive_requests.pop(addr)
self.wallet.storage.put('receive_requests', self.receive_requests)
self.update_receive_tab()
self.clear_receive_tab()
def receive_list_menu(self, position):
item = self.receive_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Copy to clipboard"), lambda: self.app.clipboard().setText(str(item.text(0))))
menu.addAction(_("Delete"), lambda: self.receive_list_delete(item))
menu.exec_(self.receive_list.viewport().mapToGlobal(position))
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = str(self.receive_message_e.text())
if not message and not amount:
QMessageBox.warning(self, _('Error'), _('No message or amount'), _('OK'))
return
self.receive_requests = self.wallet.storage.get('receive_requests',{})
self.receive_requests[addr] = (amount, message)
self.wallet.storage.put('receive_requests', self.receive_requests)
self.update_receive_tab()
def new_receive_address(self):
domain = self.wallet.get_account_addresses(self.current_account, include_change=False)
for addr in domain:
if not self.wallet.history.get(addr) and addr not in self.receive_requests.keys():
break
else:
if isinstance(self.wallet, Imported_Wallet):
self.show_message(_('No more addresses in your wallet.'))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(self.current_account, False)
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
self.receive_requests = self.wallet.storage.get('receive_requests',{})
domain = self.wallet.get_account_addresses(self.current_account, include_change=False)
for addr in domain:
if not self.wallet.history.get(addr) and addr not in self.receive_requests.keys():
break
else:
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def toggle_qr_window(self):
import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.tabs.setCurrentIndex(2)
self.receive_address_e.setText(addr)
def update_receive_tab(self):
self.receive_requests = self.wallet.storage.get('receive_requests',{})
b = len(self.receive_requests) > 0
self.receive_list.setVisible(b)
self.receive_requests_label.setVisible(b)
self.receive_list.clear()
for address, v in self.receive_requests.items():
amount, message = v
item = QTreeWidgetItem( [ address, message, self.format_amount(amount) if amount else ""] )
item.setFont(0, QFont(MONOSPACE_FONT))
self.receive_list.addTopLevelItem(item)
def update_receive_qr(self):
import urlparse, urllib
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text()).encode('utf8')
self.save_request_button.setEnabled((amount is not None) or (message != ""))
if addr:
query = []
if amount:
query.append('amount=%s'%format_satoshis(amount))
if message:
query.append('message=%s'%urllib.quote(message))
p = urlparse.ParseResult(scheme='myriadcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
url = urlparse.urlunparse(p)
else:
url = ""
self.receive_qr.setData(url)
if self.qr_window:
self.qr_window.set_content(addr, amount, message, url)
def create_send_tab(self):
w = QWidget()
self.send_grid = grid = QGridLayout(w)
grid.setSpacing(8)
grid.setColumnMinimumWidth(3,300)
grid.setColumnStretch(5,1)
grid.setRowStretch(8, 1)
from paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_help = HelpButton(_('Recipient of the funds.') + '\n\n' + _('You may enter a Myriadcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Myriadcoin address)'))
grid.addWidget(QLabel(_('Pay to')), 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, 3)
grid.addWidget(self.payto_help, 1, 4)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
self.message_e = MyLineEdit()
self.message_help = HelpButton(_('Description of the transaction (not mandatory).') + '\n\n' + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.'))
grid.addWidget(QLabel(_('Description')), 2, 0)
grid.addWidget(self.message_e, 2, 1, 1, 3)
grid.addWidget(self.message_help, 2, 4)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self)
self.from_list.setColumnCount(2)
self.from_list.setColumnWidth(0, 350)
self.from_list.setColumnWidth(1, 50)
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
self.from_list.setContextMenuPolicy(Qt.CustomContextMenu)
self.from_list.customContextMenuRequested.connect(self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, 3)
self.set_pay_from([])
self.amount_help = HelpButton(_('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet. Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') \
+ '\n\n' + _('Keyboard shortcut: type "!" to send all your coins.'))
grid.addWidget(QLabel(_('Amount')), 4, 0)
grid.addWidget(self.amount_e, 4, 1, 1, 2)
grid.addWidget(self.amount_help, 4, 3)
self.fee_e_label = QLabel(_('Fee'))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_e, 5, 1, 1, 2)
msg = _('Myriadcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_help = HelpButton(msg)
grid.addWidget(self.fee_e_help, 5, 3)
self.update_fee_edit()
self.send_button = EnterButton(_("Send"), self.do_send)
grid.addWidget(self.send_button, 6, 1)
b = EnterButton(_("Clear"), self.do_clear)
grid.addWidget(b, 6, 2)
self.payto_sig = QLabel('')
grid.addWidget(self.payto_sig, 7, 0, 1, 4)
w.setLayout(grid)
def on_shortcut():
sendable = self.get_sendable_balance()
inputs = self.get_coins()
for i in inputs: self.wallet.add_input_info(i)
addr = self.payto_e.payto_address if self.payto_e.payto_address else self.dummy_address
output = ('address', addr, sendable)
dummy_tx = Transaction(inputs, [output])
fee = self.wallet.estimated_fee(dummy_tx)
self.amount_e.setAmount(max(0,sendable-fee))
self.amount_e.textEdited.emit("")
self.fee_e.setAmount(fee)
self.amount_e.shortcut.connect(on_shortcut)
def text_edited(is_fee):
outputs = self.payto_e.get_outputs()
amount = self.amount_e.get_amount()
fee = self.fee_e.get_amount() if is_fee else None
if amount is None:
self.fee_e.setAmount(None)
self.not_enough_funds = False
else:
if not outputs:
addr = self.payto_e.payto_address if self.payto_e.payto_address else self.dummy_address
outputs = [('address', addr, amount)]
tx = self.wallet.make_unsigned_transaction(outputs, fee, coins = self.get_coins())
self.not_enough_funds = (tx is None)
if not is_fee:
fee = self.wallet.get_tx_fee(tx) if tx else None
self.fee_e.setAmount(fee)
self.payto_e.textChanged.connect(lambda:text_edited(False))
self.amount_e.textEdited.connect(lambda:text_edited(False))
self.fee_e.textEdited.connect(lambda:text_edited(True))
def entry_changed():
if not self.not_enough_funds:
palette = QPalette()
palette.setColor(self.amount_e.foregroundRole(), QColor('black'))
text = ""
else:
palette = QPalette()
palette.setColor(self.amount_e.foregroundRole(), QColor('red'))
text = _( "Not enough funds" )
c, u = self.wallet.get_frozen_balance()
if c+u: text += ' (' + self.format_amount(c+u).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
self.statusBar().showMessage(text)
self.amount_e.setPalette(palette)
self.fee_e.setPalette(palette)
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
run_hook('create_send_tab', grid)
return w
def update_fee_edit(self):
b = self.config.get('can_edit_fees', False)
self.fee_e.setVisible(b)
self.fee_e_label.setVisible(b)
self.fee_e_help.setVisible(b)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, domain = None):
self.pay_from = [] if domain == [] else self.wallet.get_unspent_coins(domain)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:8] + '...' + h[-8:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def update_completions(self):
l = []
for addr,label in self.wallet.labels.items():
if addr in self.wallet.addressbook:
l.append( label + ' <' + addr + '>')
run_hook('update_completions', l)
self.completions.setStringList(l)
def protected(func):
return lambda s, *args: s.do_protect(func, args)
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
QMessageBox.warning(self, _('Error'), _('Payment request has expired'), _('OK'))
return
label = unicode( self.message_e.text() )
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs()
if not outputs:
QMessageBox.warning(self, _('Error'), _('No outputs'), _('OK'))
return
for type, addr, amount in outputs:
if addr is None:
QMessageBox.warning(self, _('Error'), _('Myriadcoin Address is None'), _('OK'))
return
if type == 'op_return':
continue
if type == 'address' and not bitcoin.is_address(addr):
QMessageBox.warning(self, _('Error'), _('Invalid Myriadcoin Address'), _('OK'))
return
if amount is None:
QMessageBox.warning(self, _('Error'), _('Invalid Amount'), _('OK'))
return
fee = self.fee_e.get_amount()
if fee is None:
QMessageBox.warning(self, _('Error'), _('Invalid Fee'), _('OK'))
return
amount = sum(map(lambda x:x[2], outputs))
confirm_amount = self.config.get('confirm_amount', 1000000000)
if amount >= confirm_amount:
o = '\n'.join(map(lambda x:x[1], outputs))
if not self.question(_("send %(amount)s to %(address)s?")%{ 'amount' : self.format_amount(amount) + ' '+ self.base_unit(), 'address' : o}):
return
coins = self.get_coins()
return outputs, fee, label, coins
def do_send(self):
r = self.read_send_tab()
if not r:
return
outputs, fee, label, coins = r
try:
tx = self.wallet.make_unsigned_transaction(outputs, fee, None, coins = coins)
tx.error = None
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
if tx.get_fee() < tx.required_fee(self.wallet.verifier):
QMessageBox.warning(self, _('Error'), _("This transaction requires a higher fee, or it will not be propagated by the network."), _('OK'))
return
if not self.config.get('can_edit_fees', False):
if not self.question(_("A fee of %(fee)s will be added to this transaction.\nProceed?")%{ 'fee' : self.format_amount(fee) + ' '+ self.base_unit()}):
return
else:
confirm_fee = self.config.get('confirm_fee', 1000000)
if fee >= confirm_fee:
if not self.question(_("The fee for this transaction seems unusually high.\nAre you really sure you want to pay %(fee)s in fees?")%{ 'fee' : self.format_amount(fee) + ' '+ self.base_unit()}):
return
self.send_tx(tx, label)
@protected
def send_tx(self, tx, label, password):
self.send_button.setDisabled(True)
# call hook to see if plugin needs gui interaction
run_hook('send_tx', tx)
# sign the tx
def sign_thread():
if self.wallet.is_watching_only():
return tx
keypairs = {}
try:
self.wallet.add_keypairs(tx, keypairs, password)
self.wallet.sign_transaction(tx, keypairs, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
tx.error = str(e)
return tx
def sign_done(tx):
if tx.error:
self.show_message(tx.error)
self.send_button.setDisabled(False)
return
if label:
self.wallet.set_label(tx.hash(), label)
if not tx.is_complete() or self.config.get('show_before_broadcast'):
self.show_transaction(tx)
self.do_clear()
self.send_button.setDisabled(False)
return
self.broadcast_transaction(tx)
# keep a reference to WaitingDialog or the gui might crash
self.waiting_dialog = WaitingDialog(self, 'Signing..', sign_thread, sign_done)
self.waiting_dialog.start()
def broadcast_transaction(self, tx):
def broadcast_thread():
pr = self.payment_request
if pr is None:
return self.wallet.sendtx(tx)
if pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.wallet.sendtx(tx)
if not status:
return False, msg
self.invoices[pr.get_id()] = (pr.get_domain(), pr.get_memo(), pr.get_amount(), pr.get_expiration_date(), PR_PAID, tx.hash())
self.wallet.storage.put('invoices', self.invoices)
self.update_invoices_tab()
self.payment_request = None
refund_address = self.wallet.addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
def broadcast_done(status, msg):
if status:
QMessageBox.information(self, '', _('Payment sent.') + '\n' + msg, _('OK'))
self.do_clear()
else:
QMessageBox.warning(self, _('Error'), msg, _('OK'))
self.send_button.setDisabled(False)
self.waiting_dialog = WaitingDialog(self, 'Broadcasting..', broadcast_thread, broadcast_done)
self.waiting_dialog.start()
def prepare_for_payment_request(self):
self.tabs.setCurrentIndex(1)
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
for h in [self.payto_help, self.amount_help, self.message_help]:
h.hide()
self.payto_e.setText(_("please wait..."))
return True
def payment_request_ok(self):
pr = self.payment_request
pr_id = pr.get_id()
if pr_id not in self.invoices:
self.invoices[pr_id] = (pr.get_domain(), pr.get_memo(), pr.get_amount(), pr.get_expiration_date(), PR_UNPAID, None)
self.wallet.storage.put('invoices', self.invoices)
self.update_invoices_tab()
else:
print_error('invoice already in list')
status = self.invoices[pr_id][4]
if status == PR_PAID:
self.do_clear()
self.show_message("invoice already paid")
self.payment_request = None
return
self.payto_help.show()
self.payto_help.set_alt(lambda: self.show_pr_details(pr))
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.domain)
self.amount_e.setText(self.format_amount(pr.get_amount()))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.do_clear()
self.show_message(self.payment_request.error)
self.payment_request = None
def pay_from_URI(self,URI):
if not URI:
return
address, amount, label, message, request_url = util.parse_URI(URI)
try:
address, amount, label, message, request_url = util.parse_URI(URI)
except Exception as e:
QMessageBox.warning(self, _('Error'), _('Invalid myriadcoin URI:') + '\n' + str(e), _('OK'))
return
self.tabs.setCurrentIndex(1)
if not request_url:
if label:
if self.wallet.labels.get(address) != label:
if self.question(_('Save label "%s" for address %s ?'%(label,address))):
if address not in self.wallet.addressbook and not self.wallet.is_mine(address):
self.wallet.addressbook.append(address)
self.wallet.set_label(address, label)
else:
label = self.wallet.labels.get(address)
if address:
self.payto_e.setText(label + ' <'+ address +'>' if label else address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
return
from electrum_myr import paymentrequest
def payment_request():
self.payment_request = paymentrequest.PaymentRequest(self.config)
self.payment_request.read(request_url)
if self.payment_request.verify():
self.emit(SIGNAL('payment_request_ok'))
else:
self.emit(SIGNAL('payment_request_error'))
self.pr_thread = threading.Thread(target=payment_request).start()
self.prepare_for_payment_request()
def do_clear(self):
self.not_enough_funds = False
self.payto_e.is_pr = False
self.payto_sig.setVisible(False)
for e in [self.payto_e, self.message_e, self.amount_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
for h in [self.payto_help, self.amount_help, self.message_help]:
h.show()
self.payto_help.set_alt(None)
self.set_pay_from([])
self.update_status()
def set_addrs_frozen(self,addrs,freeze):
for addr in addrs:
if not addr: continue
if addr in self.wallet.frozen_addresses and not freeze:
self.wallet.unfreeze(addr)
elif addr not in self.wallet.frozen_addresses and freeze:
self.wallet.freeze(addr)
self.update_address_tab()
def create_list_tab(self, headers):
"generic tab creation method"
l = MyTreeWidget(self)
l.setColumnCount( len(headers) )
l.setHeaderLabels( headers )
w = QWidget()
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setMargin(0)
vbox.setSpacing(0)
vbox.addWidget(l)
buttons = QWidget()
vbox.addWidget(buttons)
return l, w
def create_addresses_tab(self):
l, w = self.create_list_tab([ _('Address'), _('Label'), _('Balance'), _('Tx')])
for i,width in enumerate(self.column_widths['receive']):
l.setColumnWidth(i, width)
l.setContextMenuPolicy(Qt.CustomContextMenu)
l.customContextMenuRequested.connect(self.create_receive_menu)
l.setSelectionMode(QAbstractItemView.ExtendedSelection)
l.itemDoubleClicked.connect(lambda a, b: self.address_label_clicked(a,b,l,0,1))
l.itemChanged.connect(lambda a,b: self.address_label_changed(a,b,l,0,1))
l.currentItemChanged.connect(lambda a,b: self.current_item_changed(a))
self.address_list = l
return w
def save_column_widths(self):
self.column_widths["receive"] = []
for i in range(self.address_list.columnCount() -1):
self.column_widths["receive"].append(self.address_list.columnWidth(i))
self.column_widths["history"] = []
for i in range(self.history_list.columnCount() - 1):
self.column_widths["history"].append(self.history_list.columnWidth(i))
self.column_widths["contacts"] = []
for i in range(self.contacts_list.columnCount() - 1):
self.column_widths["contacts"].append(self.contacts_list.columnWidth(i))
self.config.set_key("column_widths_2", self.column_widths, True)
def create_contacts_tab(self):
l, w = self.create_list_tab([_('Address'), _('Label'), _('Tx')])
l.setContextMenuPolicy(Qt.CustomContextMenu)
l.customContextMenuRequested.connect(self.create_contact_menu)
for i,width in enumerate(self.column_widths['contacts']):
l.setColumnWidth(i, width)
l.itemDoubleClicked.connect(lambda a, b: self.address_label_clicked(a,b,l,0,1))
l.itemChanged.connect(lambda a,b: self.address_label_changed(a,b,l,0,1))
self.contacts_list = l
return w
def create_invoices_tab(self):
l, w = self.create_list_tab([_('Requestor'), _('Memo'), _('Date'), _('Amount'), _('Status')])
l.setColumnWidth(0, 150)
l.setColumnWidth(2, 150)
l.setColumnWidth(3, 150)
h = l.header()
h.setStretchLastSection(False)
h.setResizeMode(1, QHeaderView.Stretch)
l.setContextMenuPolicy(Qt.CustomContextMenu)
l.customContextMenuRequested.connect(self.create_invoice_menu)
self.invoices_list = l
return w
def update_invoices_tab(self):
invoices = self.wallet.storage.get('invoices', {})
l = self.invoices_list
l.clear()
for key, value in sorted(invoices.items(), key=lambda x: -x[1][3]):
domain, memo, amount, expiration_date, status, tx_hash = value
if status == PR_UNPAID and expiration_date and expiration_date < time.time():
status = PR_EXPIRED
date_str = datetime.datetime.fromtimestamp(expiration_date).isoformat(' ')[:-3]
item = QTreeWidgetItem( [ domain, memo, date_str, self.format_amount(amount, whitespaces=True), format_status(status)] )
item.setData(0, 32, key)
item.setFont(0, QFont(MONOSPACE_FONT))
item.setFont(3, QFont(MONOSPACE_FONT))
l.addTopLevelItem(item)
l.setCurrentItem(l.topLevelItem(0))
def delete_imported_key(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_imported_key(addr)
self.update_address_tab()
self.update_history_tab()
def edit_account_label(self, k):
text, ok = QInputDialog.getText(self, _('Rename account'), _('Name') + ':', text = self.wallet.labels.get(k,''))
if ok:
label = unicode(text)
self.wallet.set_label(k,label)
self.update_address_tab()
def account_set_expanded(self, item, k, b):
item.setExpanded(b)
self.accounts_expanded[k] = b
def create_account_menu(self, position, k, item):
menu = QMenu()
exp = item.isExpanded()
menu.addAction(_("Minimize") if exp else _("Maximize"), lambda: self.account_set_expanded(item, k, not exp))
menu.addAction(_("Rename"), lambda: self.edit_account_label(k))
if self.wallet.seed_version > 4:
menu.addAction(_("View details"), lambda: self.show_account_details(k))
if self.wallet.account_is_pending(k):
menu.addAction(_("Delete"), lambda: self.delete_pending_account(k))
menu.exec_(self.address_list.viewport().mapToGlobal(position))
def delete_pending_account(self, k):
self.wallet.delete_pending_account(k)
self.update_address_tab()
self.update_account_selector()
def create_receive_menu(self, position):
# fixme: this function apparently has a side effect.
# if it is not called the menu pops up several times
#self.address_list.selectedIndexes()
selected = self.address_list.selectedItems()
multi_select = len(selected) > 1
addrs = [unicode(item.text(0)) for item in selected]
if not multi_select:
item = self.address_list.itemAt(position)
if not item: return
addr = addrs[0]
if not is_valid(addr):
k = str(item.data(0,32).toString())
if k:
self.create_account_menu(position, k, item)
else:
item.setExpanded(not item.isExpanded())
return
menu = QMenu()
if not multi_select:
menu.addAction(_("Copy to clipboard"), lambda: self.app.clipboard().setText(addr))
menu.addAction(_("Request payment"), lambda: self.receive_at(addr))
menu.addAction(_("Edit label"), lambda: self.edit_label(True))
menu.addAction(_("Public keys"), lambda: self.show_public_keys(addr))
if self.wallet.can_export():
menu.addAction(_("Private key"), lambda: self.show_private_key(addr))
if not self.wallet.is_watching_only():
menu.addAction(_("Sign/verify message"), lambda: self.sign_verify_message(addr))
menu.addAction(_("Encrypt/decrypt message"), lambda: self.encrypt_message(addr))
if self.wallet.is_imported(addr):
menu.addAction(_("Remove from wallet"), lambda: self.delete_imported_key(addr))
if any(addr not in self.wallet.frozen_addresses for addr in addrs):
menu.addAction(_("Freeze"), lambda: self.set_addrs_frozen(addrs, True))
if any(addr in self.wallet.frozen_addresses for addr in addrs):
menu.addAction(_("Unfreeze"), lambda: self.set_addrs_frozen(addrs, False))
def can_send(addr):
return addr not in self.wallet.frozen_addresses and self.wallet.get_addr_balance(addr) != (0, 0)
if any(can_send(addr) for addr in addrs):
menu.addAction(_("Send From"), lambda: self.send_from_addresses(addrs))
run_hook('receive_menu', menu, addrs)
menu.exec_(self.address_list.viewport().mapToGlobal(position))
def get_sendable_balance(self):
return sum(map(lambda x:x['value'], self.get_coins()))
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
domain = self.wallet.get_account_addresses(self.current_account)
for i in self.wallet.frozen_addresses:
if i in domain: domain.remove(i)
return self.wallet.get_unspent_coins(domain)
def send_from_addresses(self, addrs):
self.set_pay_from( addrs )
self.tabs.setCurrentIndex(1)
def payto(self, addr):
if not addr: return
label = self.wallet.labels.get(addr)
m_addr = label + ' <' + addr + '>' if label else addr
self.tabs.setCurrentIndex(1)
self.payto_e.setText(m_addr)
self.amount_e.setFocus()
def delete_contact(self, x):
if self.question(_("Do you want to remove")+" %s "%x +_("from your list of contacts?")):
self.wallet.delete_contact(x)
self.wallet.set_label(x, None)
self.update_history_tab()
self.update_contacts_tab()
self.update_completions()
def create_contact_menu(self, position):
item = self.contacts_list.itemAt(position)
menu = QMenu()
if not item:
menu.addAction(_("New contact"), lambda: self.new_contact_dialog())
else:
addr = unicode(item.text(0))
label = unicode(item.text(1))
is_editable = item.data(0,32).toBool()
payto_addr = item.data(0,33).toString()
menu.addAction(_("Copy to Clipboard"), lambda: self.app.clipboard().setText(addr))
menu.addAction(_("Pay to"), lambda: self.payto(payto_addr))
menu.addAction(_("QR code"), lambda: self.show_qrcode("myriadcoin:" + addr, _("Address")))
if is_editable:
menu.addAction(_("Edit label"), lambda: self.edit_label(False))
menu.addAction(_("Delete"), lambda: self.delete_contact(addr))
run_hook('create_contact_menu', menu, item)
menu.exec_(self.contacts_list.viewport().mapToGlobal(position))
def delete_invoice(self, key):
self.invoices.pop(key)
self.wallet.storage.put('invoices', self.invoices)
self.update_invoices_tab()
def show_invoice(self, key):
from electrum_myr.paymentrequest import PaymentRequest
domain, memo, value, expiration, status, tx_hash = self.invoices[key]
pr = PaymentRequest(self.config)
pr.read_file(key)
pr.domain = domain
pr.verify()
self.show_pr_details(pr, tx_hash)
def show_pr_details(self, pr, tx_hash=None):
msg = 'Domain: ' + pr.domain
msg += '\nStatus: ' + pr.get_status()
msg += '\nMemo: ' + pr.get_memo()
msg += '\nPayment URL: ' + pr.payment_url
msg += '\n\nOutputs:\n' + '\n'.join(map(lambda x: x[1] + ' ' + self.format_amount(x[2])+ self.base_unit(), pr.get_outputs()))
if tx_hash:
msg += '\n\nTransaction ID: ' + tx_hash
QMessageBox.information(self, 'Invoice', msg , 'OK')
def do_pay_invoice(self, key):
from electrum_myr.paymentrequest import PaymentRequest
domain, memo, value, expiration, status, tx_hash = self.invoices[key]
pr = PaymentRequest(self.config)
pr.read_file(key)
pr.domain = domain
self.payment_request = pr
self.prepare_for_payment_request()
if pr.verify():
self.payment_request_ok()
else:
self.payment_request_error()
def create_invoice_menu(self, position):
item = self.invoices_list.itemAt(position)
if not item:
return
key = str(item.data(0, 32).toString())
domain, memo, value, expiration, status, tx_hash = self.invoices[key]
menu = QMenu()
menu.addAction(_("Details"), lambda: self.show_invoice(key))
if status == PR_UNPAID:
menu.addAction(_("Pay Now"), lambda: self.do_pay_invoice(key))
menu.addAction(_("Delete"), lambda: self.delete_invoice(key))
menu.exec_(self.invoices_list.viewport().mapToGlobal(position))
def update_address_tab(self):
l = self.address_list
# extend the syntax for consistency
l.addChild = l.addTopLevelItem
l.insertChild = l.insertTopLevelItem
l.clear()
accounts = self.wallet.get_accounts()
if self.current_account is None:
account_items = sorted(accounts.items())
else:
account_items = [(self.current_account, accounts.get(self.current_account))]
for k, account in account_items:
if len(accounts) > 1:
name = self.wallet.get_account_name(k)
c,u = self.wallet.get_account_balance(k)
account_item = QTreeWidgetItem( [ name, '', self.format_amount(c+u), ''] )
l.addTopLevelItem(account_item)
account_item.setExpanded(self.accounts_expanded.get(k, True))
account_item.setData(0, 32, k)
else:
account_item = l
sequences = [0,1] if account.has_change() else [0]
for is_change in sequences:
if len(sequences) > 1:
name = _("Receiving") if not is_change else _("Change")
seq_item = QTreeWidgetItem( [ name, '', '', '', ''] )
account_item.addChild(seq_item)
if not is_change:
seq_item.setExpanded(True)
else:
seq_item = account_item
used_item = QTreeWidgetItem( [ _("Used"), '', '', '', ''] )
used_flag = False
addr_list = account.get_addresses(is_change)
for address in addr_list:
num, is_used = self.wallet.is_used(address)
label = self.wallet.labels.get(address,'')
c, u = self.wallet.get_addr_balance(address)
balance = self.format_amount(c + u)
item = QTreeWidgetItem( [ address, label, balance, "%d"%num] )
item.setFont(0, QFont(MONOSPACE_FONT))
item.setData(0, 32, True) # label can be edited
if address in self.wallet.frozen_addresses:
item.setBackgroundColor(0, QColor('lightblue'))
if self.wallet.is_beyond_limit(address, account, is_change):
item.setBackgroundColor(0, QColor('red'))
if is_used:
if not used_flag:
seq_item.insertChild(0, used_item)
used_flag = True
used_item.addChild(item)
else:
seq_item.addChild(item)
# we use column 1 because column 0 may be hidden
l.setCurrentItem(l.topLevelItem(0),1)
def update_contacts_tab(self):
l = self.contacts_list
l.clear()
for address in self.wallet.addressbook:
label = self.wallet.labels.get(address,'')
n = self.wallet.get_num_tx(address)
item = QTreeWidgetItem( [ address, label, "%d"%n] )
item.setFont(0, QFont(MONOSPACE_FONT))
# 32 = label can be edited (bool)
item.setData(0,32, True)
# 33 = payto string
item.setData(0,33, address)
l.addTopLevelItem(item)
run_hook('update_contacts_tab', l)
l.setCurrentItem(l.topLevelItem(0))
def create_console_tab(self):
from console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet, 'network' : self.network, 'gui':self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: apply( f, (method, args, self.password_dialog ))
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def change_account(self,s):
if s == _("All accounts"):
self.current_account = None
else:
accounts = self.wallet.get_account_names()
for k, v in accounts.items():
if v == s:
self.current_account = k
self.update_history_tab()
self.update_status()
self.update_address_tab()
self.update_receive_tab()
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
from version_getter import UpdateLabel
self.updatelabel = UpdateLabel(self.config, sb)
self.account_selector = QComboBox()
self.account_selector.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.connect(self.account_selector,SIGNAL("activated(QString)"),self.change_account)
sb.addPermanentWidget(self.account_selector)
if (int(qtVersion[0]) >= 4 and int(qtVersion[2]) >= 7):
sb.addPermanentWidget( StatusBarButton( QIcon(":icons/switchgui.png"), _("Switch to Lite Mode"), self.go_lite ) )
self.lock_icon = QIcon()
self.password_button = StatusBarButton( self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget( self.password_button )
sb.addPermanentWidget( StatusBarButton( QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton( QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget( self.seed_button )
self.status_button = StatusBarButton( QIcon(":icons/status_disconnected.png"), _("Network"), self.run_network_dialog )
sb.addPermanentWidget( self.status_button )
run_hook('create_status_bar', (sb,))
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.use_encryption else QIcon(":icons/unlock.png")
self.password_button.setIcon( icon )
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setText(_("Create unsigned transaction") if self.wallet.is_watching_only() else _("Send"))
def change_password_dialog(self):
from password_dialog import PasswordDialog
d = PasswordDialog(self.wallet, self)
d.run()
self.update_lock_icon()
def new_contact_dialog(self):
d = QDialog(self)
d.setWindowTitle(_("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact')+':'))
grid = QGridLayout()
line1 = QLineEdit()
line2 = QLineEdit()
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(ok_cancel_buttons(d))
if not d.exec_():
return
address = str(line1.text())
label = unicode(line2.text())
if not is_valid(address):
QMessageBox.warning(self, _('Error'), _('Invalid Address'), _('OK'))
return
self.wallet.add_contact(address)
if label:
self.wallet.set_label(address, label)
self.update_contacts_tab()
self.update_history_tab()
self.update_completions()
self.tabs.setCurrentIndex(3)
@protected
def new_account_dialog(self, password):
dialog = QDialog(self)
dialog.setModal(1)
dialog.setWindowTitle(_("New Account"))
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_('Account name')+':'))
e = QLineEdit()
vbox.addWidget(e)
msg = _("Note: Newly created accounts are 'pending' until they receive myriadcoins.") + " " \
+ _("You will need to wait for 2 confirmations until the correct balance is displayed and more addresses are created for that account.")
l = QLabel(msg)
l.setWordWrap(True)
vbox.addWidget(l)
vbox.addLayout(ok_cancel_buttons(dialog))
dialog.setLayout(vbox)
r = dialog.exec_()
if not r: return
name = str(e.text())
if not name: return
self.wallet.create_pending_account(name, password)
self.update_address_tab()
self.update_account_selector()
self.tabs.setCurrentIndex(3)
def show_master_public_keys(self):
dialog = QDialog(self)
dialog.setModal(1)
dialog.setWindowTitle(_("Master Public Keys"))
main_layout = QGridLayout()
mpk_dict = self.wallet.get_master_public_keys()
i = 0
for key, value in mpk_dict.items():
main_layout.addWidget(QLabel(key), i, 0)
mpk_text = QRTextEdit()
mpk_text.setReadOnly(True)
mpk_text.setMaximumHeight(170)
mpk_text.setText(value)
main_layout.addWidget(mpk_text, i + 1, 0)
i += 2
vbox = QVBoxLayout()
vbox.addLayout(main_layout)
vbox.addLayout(close_button(dialog))
dialog.setLayout(vbox)
dialog.exec_()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
QMessageBox.information(self, _('Message'), _('This wallet has no seed'), _('OK'))
return
try:
mnemonic = self.wallet.get_mnemonic(password)
except Exception:
QMessageBox.warning(self, _('Error'), _('Incorrect Password'), _('OK'))
return
from seed_dialog import SeedDialog
d = SeedDialog(self, mnemonic, self.wallet.has_imported_keys())
d.exec_()
def show_qrcode(self, data, title = _("QR code")):
if not data:
return
d = QRDialog(data, self, title)
d.exec_()
def do_protect(self, func, args):
if self.wallet.use_encryption:
password = self.password_dialog()
if not password:
return
else:
password = None
if args != (False,):
args = (self,) + args + (password,)
else:
args = (self,password)
apply( func, args)
def show_public_keys(self, address):
if not address: return
try:
pubkey_list = self.wallet.get_public_keys(address)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = QDialog(self)
d.setMinimumSize(600, 200)
d.setModal(1)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget( QLabel(_("Public key") + ':'))
keys = QRTextEdit()
keys.setReadOnly(True)
keys.setText('\n'.join(pubkey_list))
vbox.addWidget(keys)
vbox.addLayout(close_button(d))
d.setLayout(vbox)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address: return
try:
pk_list = self.wallet.get_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = QDialog(self)
d.setMinimumSize(600, 200)
d.setModal(1)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget( QLabel(_("Private key") + ':'))
keys = QRTextEdit()
keys.setReadOnly(True)
keys.setText('\n'.join(pk_list))
vbox.addWidget(keys)
vbox.addLayout(close_button(d))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
message = unicode(message.toPlainText())
message = message.encode('utf-8')
try:
sig = self.wallet.sign_message(str(address.text()), message, password)
signature.setText(sig)
except Exception as e:
self.show_message(str(e))
def do_verify(self, address, message, signature):
message = unicode(message.toPlainText())
message = message.encode('utf-8')
if bitcoin.verify_message(address.text(), str(signature.toPlainText()), message):
self.show_message(_("Signature verified"))
else:
self.show_message(_("Error: wrong signature"))
def sign_verify_message(self, address=''):
d = QDialog(self)
d.setModal(1)
d.setWindowTitle(_('Sign/verify Message'))
d.setMinimumSize(410, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
try:
decrypted = self.wallet.decrypt_message(str(pubkey_e.text()), str(encrypted_e.toPlainText()), password)
message_e.setText(decrypted)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = unicode(message_e.toPlainText())
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, str(pubkey_e.text()))
encrypted_e.setText(encrypted)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address = ''):
d = QDialog(self)
d.setModal(1)
d.setWindowTitle(_('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_keys(address)[0]
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def question(self, msg):
return QMessageBox.question(self, _('Message'), msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.No) == QMessageBox.Yes
def show_message(self, msg):
QMessageBox.information(self, _('Message'), msg, _('OK'))
def show_warning(self, msg):
QMessageBox.warning(self, _('Warning'), msg, _('OK'))
def password_dialog(self, msg=None):
d = QDialog(self)
d.setModal(1)
d.setWindowTitle(_("Enter Password"))
pw = QLineEdit()
pw.setEchoMode(2)
vbox = QVBoxLayout()
if not msg:
msg = _('Please enter your password')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Password')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(ok_cancel_buttons(d))
d.setLayout(vbox)
run_hook('password_dialog', pw, grid, 1)
if not d.exec_(): return
return unicode(pw.text())
def tx_from_text(self, txt):
"json or raw hexadecimal"
try:
txt.decode('hex')
is_hex = True
except:
is_hex = False
if is_hex:
try:
return Transaction.deserialize(txt)
except:
traceback.print_exc(file=sys.stdout)
QMessageBox.critical(None, _("Unable to parse transaction"), _("Electrum was unable to parse your transaction"))
return
try:
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
tx = Transaction.deserialize(tx_dict["hex"])
#if tx_dict.has_key("input_info"):
# input_info = json.loads(tx_dict['input_info'])
# tx.add_input_info(input_info)
return tx
except Exception:
traceback.print_exc(file=sys.stdout)
QMessageBox.critical(None, _("Unable to parse transaction"), _("Electrum was unable to parse your transaction"))
def read_tx_from_qrcode(self):
from electrum_ltc import qrscanner
try:
data = qrscanner.scan_qr(self.config)
except BaseException, e:
QMessageBox.warning(self, _('Error'), _(e), _('OK'))
return
if not data:
return
# transactions are binary, but qrcode seems to return utf8...
z = data.decode('utf8')
s = ''
for b in z:
s += chr(ord(b))
data = s.encode('hex')
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error), reason:
QMessageBox.critical(None, _("Unable to read file or no transaction found"), _("Electrum was unable to open your transaction file") + "\n" + str(reason))
return self.tx_from_text(file_content)
@protected
def sign_raw_transaction(self, tx, password):
try:
self.wallet.signrawtransaction(tx, [], password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.warning(self, _("Error"), str(e))
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_myr import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
r = self.network.synchronous_get([ ('blockchain.transaction.get',[str(txid)]) ])[0]
if r:
tx = transaction.Transaction.deserialize(r)
if tx:
self.show_transaction(tx)
else:
self.show_message("unknown transaction")
def do_process_from_csvReader(self, csvReader):
outputs = []
errors = []
errtext = ""
try:
for position, row in enumerate(csvReader):
address = row[0]
if not bitcoin.is_address(address):
errors.append((position, address))
continue
amount = Decimal(row[1])
amount = int(100000000*amount)
outputs.append(('address', address, amount))
except (ValueError, IOError, os.error), reason:
QMessageBox.critical(None, _("Unable to read file or no transaction found"), _("Electrum was unable to open your transaction file") + "\n" + str(reason))
return
if errors != []:
for x in errors:
errtext += "CSV Row " + str(x[0]+1) + ": " + x[1] + "\n"
QMessageBox.critical(None, _("Invalid Addresses"), _("ABORTING! Invalid Addresses found:") + "\n\n" + errtext)
return
try:
tx = self.wallet.make_unsigned_transaction(outputs, None, None)
except Exception as e:
self.show_message(str(e))
return
self.show_transaction(tx)
def do_process_from_csv_file(self):
fileName = self.getOpenFileName(_("Select your transaction CSV"), "*.csv")
if not fileName:
return
try:
with open(fileName, "r") as f:
csvReader = csv.reader(f)
self.do_process_from_csvReader(csvReader)
except (ValueError, IOError, os.error), reason:
QMessageBox.critical(None, _("Unable to read file or no transaction found"), _("Electrum was unable to open your transaction file") + "\n" + str(reason))
return
def do_process_from_csv_text(self):
text = text_dialog(self, _('Input CSV'), _("Please enter a list of outputs.") + '\n' \
+ _("Format: address, amount. One output per line"), _("Load CSV"))
if not text:
return
f = StringIO.StringIO(text)
csvReader = csv.reader(f)
self.do_process_from_csvReader(csvReader)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
d = QDialog(self)
d.setWindowTitle(_('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-myr-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
h, b = ok_cancel_buttons2(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(h)
private_keys = {}
addresses = self.wallet.addresses(True)
done = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done:
break
private_keys[addr] = "\n".join(self.wallet.get_private_key(addr, password))
d.emit(SIGNAL('computing_privkeys'))
d.emit(SIGNAL('show_privkeys'))
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
d.connect(d, QtCore.SIGNAL('computing_privkeys'), lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
d.connect(d, QtCore.SIGNAL('show_privkeys'), show_privkeys)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error), reason:
export_error_label = _("Electrum was unable to produce a private key-export.")
QMessageBox.critical(None, _("Unable to create csv"), export_error_label + "\n" + str(reason))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.dat")
if not labelsFile: return
try:
f = open(labelsFile, 'r')
data = f.read()
f.close()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
QMessageBox.information(None, _("Labels imported"), _("Your labels were imported from")+" '%s'" % str(labelsFile))
except (IOError, os.error), reason:
QMessageBox.critical(None, _("Unable to import labels"), _("Electrum was unable to import your labels.")+"\n" + str(reason))
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum-myr_labels.dat', "*.dat")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f)
QMessageBox.information(None, _("Labels exported"), _("Your labels where exported to")+" '%s'" % str(fileName))
except (IOError, os.error), reason:
QMessageBox.critical(None, _("Unable to export labels"), _("Electrum was unable to export your labels.")+"\n" + str(reason))
def export_history_dialog(self):
d = QDialog(self)
d.setWindowTitle(_('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-myr-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
h, b = ok_cancel_buttons2(d, _('Export'))
vbox.addLayout(h)
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error), reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
QMessageBox.critical(self, _("Unable to export history"), export_error_label + "\n" + str(reason))
return
QMessageBox.information(self,_("History exported"), _("Your wallet history has been successfully exported."))
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_tx_history()
lines = []
for item in history:
tx_hash, confirmations, is_mine, value, fee, balance, timestamp = item
if confirmations:
if timestamp is not None:
try:
time_string = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except [RuntimeError, TypeError, NameError] as reason:
time_string = "unknown"
pass
else:
time_string = "unknown"
else:
time_string = "pending"
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if fee is not None:
fee_string = format_satoshis(fee, True)
else:
fee_string = '0'
if tx_hash:
label, is_default_label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
balance_string = format_satoshis(balance, False)
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, fee_string, balance_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["transaction_hash","label", "confirmations", "value", "fee", "balance", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = QDialog(self)
d.setWindowTitle(_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys")))
keys_e = QTextEdit()
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, address_e = address_field(self.wallet.addresses(False))
vbox.addLayout(h)
vbox.addStretch(1)
hbox, button = ok_cancel_buttons2(d, _('Sweep'))
vbox.addLayout(hbox)
button.setEnabled(False)
def get_address():
addr = str(address_e.text())
if bitcoin.is_address(addr):
return addr
def get_pk():
pk = str(keys_e.toPlainText()).strip()
if Wallet.is_private_key(pk):
return pk.split()
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
if not d.exec_():
return
fee = self.wallet.fee_per_kb
tx = Transaction.sweep(get_pk(), self.network, get_address(), fee)
self.show_transaction(tx)
@protected
def do_import_privkey(self, password):
if not self.wallet.has_imported_keys():
r = QMessageBox.question(None, _('Warning'), '<b>'+_('Warning') +':\n</b><br/>'+ _('Imported keys are not recoverable from seed.') + ' ' \
+ _('If you ever need to restore your wallet from its seed, these keys will be lost.') + '<p>' \
+ _('Are you sure you understand what you are doing?'), 3, 4)
if r == 4: return
text = text_dialog(self, _('Import private keys'), _("Enter private keys")+':', _("Import"))
if not text: return
text = str(text).split()
badkeys = []
addrlist = []
for key in text:
try:
addr = self.wallet.import_key(key, password)
except Exception as e:
badkeys.append(key)
continue
if not addr:
badkeys.append(key)
else:
addrlist.append(addr)
if addrlist:
QMessageBox.information(self, _('Information'), _("The following addresses were added") + ':\n' + '\n'.join(addrlist))
if badkeys:
QMessageBox.critical(self, _('Error'), _("The following inputs could not be imported") + ':\n'+ '\n'.join(badkeys))
self.update_address_tab()
self.update_history_tab()
def settings_dialog(self):
self.need_restart = False
d = QDialog(self)
d.setWindowTitle(_('Electrum Settings'))
d.setModal(1)
vbox = QVBoxLayout()
grid = QGridLayout()
grid.setColumnStretch(0,1)
widgets = []
lang_label = QLabel(_('Language') + ':')
lang_help = HelpButton(_('Select which language is used in the GUI (after restart).'))
lang_combo = QComboBox()
from electrum_myr.i18n import languages
lang_combo.addItems(languages.values())
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = languages.keys()[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
widgets.append((lang_label, lang_combo, lang_help))
nz_label = QLabel(_('Zeros after decimal point') + ':')
nz_help = HelpButton(_('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"'))
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.update_history_tab()
self.update_address_tab()
nz.valueChanged.connect(on_nz)
widgets.append((nz_label, nz, nz_help))
fee_label = QLabel(_('Transaction fee per kb') + ':')
fee_help = HelpButton(_('Fee per kilobyte of transaction.') + '\n' \
+ _('Recommended value') + ': ' + self.format_amount(bitcoin.RECOMMENDED_FEE) + ' ' + self.base_unit())
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(self.wallet.fee_per_kb)
if not self.config.is_modifiable('fee_per_kb'):
for w in [fee_e, fee_label]: w.setEnabled(False)
def on_fee():
fee = fee_e.get_amount()
self.wallet.set_fee(fee)
fee_e.editingFinished.connect(on_fee)
widgets.append((fee_label, fee_e, fee_help))
units = ['MYR', 'mMYR', 'uMYR']
unit_label = QLabel(_('Base unit') + ':')
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
msg = _('Base unit of your wallet.')\
+ '\n1MYR=1000mMYR.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_help = HelpButton(msg)
def on_unit(x):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
if unit_result == 'MYR':
self.decimal_point = 8
elif unit_result == 'mMYR':
self.decimal_point = 5
elif unit_result == 'uMYR':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
self.update_history_tab()
self.update_receive_tab()
self.update_address_tab()
self.update_invoices_tab()
fee_e.setAmount(self.wallet.fee_per_kb)
self.update_status()
unit_combo.currentIndexChanged.connect(on_unit)
widgets.append((unit_label, unit_combo, unit_help))
block_explorers = ['myriad.theblockexplorer.com', 'birdonwheels5.no-ip.org', 'myr.coinpi.pe']
block_ex_label = QLabel(_('Online Block Explorer') + ':')
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_explorers.index(self.config.get('block_explorer', 'myriad.theblockexplorer.com')))
block_ex_help = HelpButton(_('Choose which online block explorer to use for functions that open a web browser'))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
widgets.append((block_ex_label, block_ex_combo, block_ex_help))
from electrum_myr import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
qr_label = QLabel(_('Video Device') + ':')
qr_combo.setEnabled(qrscanner.zbar is not None)
qr_help = HelpButton(_("Install the zbar package to enable this.\nOn linux, type: 'apt-get install python-zbar'"))
on_video_device = lambda x: self.config.set_key("video_device", str(qr_combo.itemData(x).toString()), True)
qr_combo.currentIndexChanged.connect(on_video_device)
widgets.append((qr_label, qr_combo, qr_help))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
usechange_help = HelpButton(_('Using change addresses makes it more difficult for other people to track your transactions.'))
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
widgets.append((usechange_cb, None, usechange_help))
showtx_cb = QCheckBox(_('Show transaction before broadcast'))
showtx_cb.setChecked(self.config.get('show_before_broadcast', False))
showtx_cb.stateChanged.connect(lambda x: self.config.set_key('show_before_broadcast', showtx_cb.isChecked()))
showtx_help = HelpButton(_('Display the details of your transactions before broadcasting it.'))
widgets.append((showtx_cb, None, showtx_help))
can_edit_fees_cb = QCheckBox(_('Set transaction fees manually'))
can_edit_fees_cb.setChecked(self.config.get('can_edit_fees', False))
def on_editfees(x):
self.config.set_key('can_edit_fees', x == Qt.Checked)
self.update_fee_edit()
can_edit_fees_cb.stateChanged.connect(on_editfees)
can_edit_fees_help = HelpButton(_('This option lets you edit fees in the send tab.'))
widgets.append((can_edit_fees_cb, None, can_edit_fees_help))
for a,b,c in widgets:
i = grid.rowCount()
if b:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
grid.addWidget(c, i, 2)
vbox.addLayout(grid)
vbox.addStretch(1)
vbox.addLayout(close_button(d))
d.setLayout(vbox)
# run the dialog
d.exec_()
run_hook('close_settings_dialog')
if self.need_restart:
QMessageBox.warning(self, _('Success'), _('Please restart Electrum to activate the new GUI settings'), _('OK'))
def run_network_dialog(self):
if not self.network:
QMessageBox.warning(self, _('Offline'), _('You are using Electrum in offline mode.\nRestart Electrum if you want to get connected.'), _('OK'))
return
NetworkDialog(self.wallet.network, self.config, self).do_exec()
def closeEvent(self, event):
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.config.set_key("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
self.save_column_widths()
self.config.set_key("console-history", self.console.history[-50:], True)
self.wallet.storage.put('accounts_expanded', self.accounts_expanded)
event.accept()
def plugins_dialog(self):
from electrum_myr.plugins import plugins
d = QDialog(self)
d.setWindowTitle(_('Electrum Plugins'))
d.setModal(1)
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(len(plugins)*35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
def do_toggle(cb, p, w):
if p.is_enabled():
if p.disable():
p.close()
else:
if p.enable():
p.load_wallet(self.wallet)
p.init_qt(self.gui_object)
r = p.is_enabled()
cb.setChecked(r)
if w: w.setEnabled(r)
def mk_toggle(cb, p, w):
return lambda: do_toggle(cb,p,w)
for i, p in enumerate(plugins):
try:
cb = QCheckBox(p.fullname())
cb.setDisabled(not p.is_available())
cb.setChecked(p.is_enabled())
grid.addWidget(cb, i, 0)
if p.requires_settings():
w = p.settings_widget(self)
w.setEnabled( p.is_enabled() )
grid.addWidget(w, i, 1)
else:
w = None
cb.clicked.connect(mk_toggle(cb,p,w))
grid.addWidget(HelpButton(p.description()), i, 2)
except Exception:
print_msg(_("Error: cannot display plugin"), p)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(i+1,1)
vbox.addLayout(close_button(d))
d.exec_()
def show_account_details(self, k):
account = self.wallet.accounts[k]
d = QDialog(self)
d.setWindowTitle(_('Account Details'))
d.setModal(1)
vbox = QVBoxLayout(d)
name = self.wallet.get_account_name(k)
label = QLabel('Name: ' + name)
vbox.addWidget(label)
vbox.addWidget(QLabel(_('Address type') + ': ' + account.get_type()))
vbox.addWidget(QLabel(_('Derivation') + ': ' + k))
vbox.addWidget(QLabel(_('Master Public Key:')))
text = QTextEdit()
text.setReadOnly(True)
text.setMaximumHeight(170)
vbox.addWidget(text)
mpk_text = '\n'.join( account.get_master_pubkeys() )
text.setText(mpk_text)
vbox.addLayout(close_button(d))
d.exec_()
| gpl-3.0 | 5,956,823,126,570,258,000 | 38.181786 | 458 | 0.585868 | false |
ptphp/PyLib | src/webpy1/src/borough/parseBody.py | 1 | 4768 | # coding=gbk
import re
import string,urlparse
import os.path as osp
nums = string.digits
# Çå³ýhtml´úÂëÀïµÄ¶àÓà¿Õ¸ñ
def clearBlank(html):
if not html or html == None : return ;
html = re.sub('\r|\n|\t','',html)
html = html.replace(' ','').replace(' ','').replace('\'','"')
return html
def clearInfo(html):
if not html or html == None : return ;
html = re.sub('´òµç»°¸øÎÒʱ£¬ÇëÒ»¶¨ËµÃ÷ÔÚ.*?Íø¿´µ½µÄ£¬Ð»Ð»£¡|·¢²¼ÈÕÆÚ£º.*?<br />|<a .*?>|\[ºô½Ð\]|</a>|<p .*?>','',html).replace('°ÙÐÕ','¿ìËÙ×âÁÞÍø')
return html
# html´úÂë½ØÈ¡º¯Êý
def rects(html,regx,cls=''):
if not html or html == None or len(html)==0 : return ;
# ÕýÔò±í´ïʽ½ØÈ¡
if regx[:1]==chr(40) and regx[-1:]==chr(41) :
reHTML = re.search(regx,html,re.I)
if reHTML == None : return
reHTML = reHTML.group()
intRegx = re.search(regx,reHTML,re.I)
R = reHTML[intRegx]
# ×Ö·û´®½ØÈ¡
else :
# È¡µÃ×Ö·û´®µÄλÖÃ
pattern =re.compile(regx.lower())
intRegx=pattern.findall(html.lower())
# Èç¹ûËÑË÷²»µ½¿ªÊ¼×Ö·û´®£¬ÔòÖ±½Ó·µ»Ø¿Õ
if not intRegx : return
R = intRegx
# ÇåÀíÄÚÈÝ
if cls:
RC = []
for item in R:
RC.append(resub(item,cls))
return RC
else:
return R
def rect(html,regx,cls=''):
#regx = regx.encode('utf-8')
if not html or html == None or len(html)==0 : return ;
# ÕýÔò±í´ïʽ½ØÈ¡
if regx[:1]==chr(40) and regx[-1:]==chr(41) :
reHTML = re.search(regx,html,re.I)
if reHTML == None : return
reHTML = reHTML.group()
intRegx = re.search(regx,reHTML,re.I)
R = reHTML[intRegx]
# ×Ö·û´®½ØÈ¡
else :
# È¡µÃ×Ö·û´®µÄλÖÃ
pattern =re.compile(regx.lower())
intRegx=pattern.findall(html)
# Èç¹ûËÑË÷²»µ½¿ªÊ¼×Ö·û´®£¬ÔòÖ±½Ó·µ»Ø¿Õ
if not intRegx : return
R = intRegx[0]
if cls:
R = resub(R,cls)
# ·µ»Ø½ØÈ¡µÄ×Ö·û
return R
# ÕýÔòÇå³ý
def resub(html,regexs):
if not regexs: return html
html =re.sub(regexs,'',html)
return html
def rereplace(html,regexs):
if not regexs: return html
html =html.repalce(regexs,'')
return html
#Ìø×ªµç»°URL
def telPageReplace(url):
telUrl=url.split('/')
finalUrl="phone_%s" % telUrl[len(telUrl)-1]
return url.replace(telUrl[len(telUrl)-1],finalUrl)
#ÅжÏÊý×Ö
def check(a):
if type(a) is not str:
return False
else:
for i in a:
if i not in nums:
return False
return True
#Åжϵ绰
def parseNum(a):
strs=''
if type(a) is not str:
return 0
else:
for i in a:
if i in nums or i == '.':
strs +=i
return strs
def reTel(str,regx):
#regx = '((13[0-9]|15[0-9]|18[89])\\d{8})'
p = re.compile(regx)
#print p
if p.findall(str):
return p.findall(str)[0]
else:
regx = '((13[0-9]|15[0-9]|18[89])\d{8})'
#regx = '(13[0-9]|15[0-9]|18[89])\d{8}'
res = re.search(regx,str).group()
if res:
return res
else:
return ''
def matchURL(tag,url):
print tag
print url
urls = re.findall('(.*)(src|href)=(.+?)( |/>|>).*|(.*)url\(([^\)]+)\)',tag,re.I)
if urls == None :
return tag
else :
if urls[0][5] == '' :
urlQuote = urls[0][2]
else:
urlQuote = urls[0][5]
if len(urlQuote) > 0 :
cUrl = re.sub('''['"]''','',urlQuote)
else :
return tag
urls = urlparse(url); scheme = urls[0];
if scheme!='' : scheme+='://'
host = urls[1]; host = scheme + host
if len(host)==0 : return tag
path = osp.dirname(urls[2]);
if path=='/' : path = '';
if cUrl.find("#")!=-1 : cUrl = cUrl[:cUrl.find("#")]
# ÅжÏÀàÐÍ
if re.search('''^(http|https|ftp):(//|\\\\)(([\w/\\\+\-~`@:%])+\.)+([\w/\\\.\=\?\+\-~`@':!%#]|(&)|&)+''',cUrl,re.I) != None :
# http¿ªÍ·µÄurlÀàÐÍÒªÌø¹ý
return tag
elif cUrl[:1] == '/' :
# ¾ø¶Ô·¾¶
cUrl = host + cUrl
elif cUrl[:3]=='../' :
# Ïà¶Ô·¾¶
while cUrl[:3]=='../' :
cUrl = cUrl[3:]
if len(path) > 0 :
path = osp.dirname(path)
elif cUrl[:2]=='./' :
cUrl = host + path + cUrl[1:]
elif cUrl.lower()[:7]=='mailto:' or cUrl.lower()[:11]=='javascript:' :
return tag
else :
cUrl = host + path + '/' + cUrl
R = tag.replace(urlQuote,'"' + cUrl + '"')
return R
def urlencode(str) :
str=str.decode('utf-8').encode('utf-8')
reprStr = repr(str).replace(r'\x', '%')
return reprStr[1:-1]
| apache-2.0 | -9,106,232,021,613,570,000 | 27.556886 | 153 | 0.490352 | false |
tsherwen/AC_tools | Scripts/2D_GEOSChem_slice_subregion_plotter_example.py | 1 | 2934 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Plotter for 2D slices of GEOS-Chem output NetCDFs files.
NOTES
---
- This is setup for Cly, but many other options (plot/species) are availible
by just updating passed variables/plotting function called.
"""
import AC_tools as AC
import numpy as np
import matplotlib.pyplot as plt
def main():
"""
Basic plotter of NetCDF files using AC_tools
"""
# --- Local settings hardwired here...
fam = 'Cly' # Family to plot
# print species in family for reference...
print((AC.GC_var(fam)))
# --- Get working directory etc from command line (as a dictionary object)
# (1st argument is fil directory with folder, 2nd is filename)
Var_rc = AC.get_default_variable_dict()
# Get details on extracted data (inc. resolution)
Data_rc = AC.get_shared_data_as_dict(Var_rc=Var_rc)
# --- extract data and units of data for family/species...
arr, units = AC.fam_data_extractor(wd=Var_rc['wd'], fam=fam,
res=Data_rc['res'], rtn_units=True, annual_mean=False)
# --- Process data (add and extra processing of data here... )
# take average over time
print((arr.shape))
arr = arr.mean(axis=-1)
# Select surface values
print((arr.shape))
arr = arr[..., 0]
# convert to pptv
arr = arr*1E12
units = 'pptv'
# --- Plot up data...
print((arr.shape))
# - Plot a (very) simple plot ...
# AC.map_plot( arr.T, res=Data_rc['res'] )
# - plot a slightly better plot...
# (loads of options here - just type help(AC.plot_spatial_figure) in ipython)
# set range for data...
fixcb = np.array([0., 100.])
# number of ticks on colorbar (make sure the fixcb range divides by this)
nticks = 6
interval = (1/3.) # number of lat/lon labels... (x*15 degrees... )
# set limits of plot
lat_min = 5.
lat_max = 75.
lon_min = -30.
lon_max = 60.
left_cb_pos = 0.85 # set X (fractional) position
axis_titles = True # add labels for lat and lon
# title for plot
title = "Plot of annual average {}".format(fam)
# save as pdf (just set to True) or show?
# figsize = (7,5) # figsize to use? (e.g. square or rectangular plot)
# call plotter...
AC.plot_spatial_figure(arr, res=Data_rc['res'], units=units, fixcb=fixcb,
lat_min=lat_min, lat_max=lat_max, lon_min=lon_min, lon_max=lon_max,
axis_titles=axis_titles, left_cb_pos=left_cb_pos,
nticks=nticks, interval=interval, title=title, show=False)
# are the spacings right? - if not just up
bottom = 0.1
top = 0.9
left = 0.1
right = 0.9
fig = plt.gcf()
fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right)
# show and save as PDF?
plt.savefig('pete_plot.png')
AC.show_plot()
if __name__ == "__main__":
main()
| mit | 3,210,553,677,345,683,500 | 31.6 | 94 | 0.599864 | false |
takluyver/xray | xray/groupby.py | 1 | 12796 | import itertools
from common import ImplementsReduce
from ops import inject_reduce_methods
import variable
import dataset
import numpy as np
def unique_value_groups(ar):
"""Group an array by its unique values.
Parameters
----------
ar : array-like
Input array. This will be flattened if it is not already 1-D.
Returns
-------
values : np.ndarray
Sorted, unique values as returned by `np.unique`.
indices : list of lists of int
Each element provides the integer indices in `ar` with values given by
the corresponding value in `unique_values`.
"""
values, inverse = np.unique(ar, return_inverse=True)
groups = [[] for _ in range(len(values))]
for n, g in enumerate(inverse):
groups[g].append(n)
return values, groups
def peek_at(iterable):
"""Returns the first value from iterable, as well as a new iterable with
the same content as the original iterable
"""
gen = iter(iterable)
peek = gen.next()
return peek, itertools.chain([peek], gen)
class GroupBy(object):
"""A object that implements the split-apply-combine pattern.
Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over
(unique_value, grouped_array) pairs, but the main way to interact with a
groupby object are with the `apply` or `reduce` methods. You can also
directly call numpy methods like `mean` or `std`.
You should create a GroupBy object by using the `DataArray.groupby` or
`Dataset.groupby` methods.
See Also
--------
XArray.groupby
DataArray.groupby
"""
def __init__(self, obj, group_coord, squeeze=True):
"""Create a GroupBy object
Parameters
----------
obj : Dataset or DataArray
Object to group.
group_coord : DataArray
1-dimensional array with the group values.
squeeze : boolean, optional
If "group" is a coordinate of object, `squeeze` controls whether
the subarrays have a dimension of length 1 along that coordinate or
if the dimension is squeezed out.
"""
if group_coord.ndim != 1:
# TODO: remove this limitation?
raise ValueError('`group_coord` must be 1 dimensional')
self.obj = obj
self.group_coord = group_coord
self.group_dim, = group_coord.dimensions
expected_size = dataset.as_dataset(obj).dimensions[self.group_dim]
if group_coord.size != expected_size:
raise ValueError('the group variable\'s length does not '
'match the length of this variable along its '
'dimension')
if group_coord.name in obj.dimensions:
# assume that group_coord already has sorted, unique values
if group_coord.dimensions != (group_coord.name,):
raise ValueError('`group_coord` is required to be a '
'coordinate variable if `group_coord.name` '
'is a dimension in `obj`')
group_indices = np.arange(group_coord.size)
if not squeeze:
# group_indices = group_indices.reshape(-1, 1)
# use slices to do views instead of fancy indexing
group_indices = [slice(i, i + 1) for i in group_indices]
unique_coord = group_coord
else:
# look through group_coord to find the unique values
unique_values, group_indices = unique_value_groups(group_coord)
# TODO: switch this to using the new DataArray constructor when we
# get around to writing it:
# unique_coord = xary.DataArray(unique_values, name=group_coord.name)
variables = {group_coord.name: (group_coord.name, unique_values)}
unique_coord = dataset.Dataset(variables)[group_coord.name]
self.group_indices = group_indices
self.unique_coord = unique_coord
self._groups = None
@property
def groups(self):
# provided to mimic pandas.groupby
if self._groups is None:
self._groups = dict(zip(self.unique_coord.values,
self.group_indices))
return self._groups
def __len__(self):
return self.unique_coord.size
def __iter__(self):
return itertools.izip(self.unique_coord.values, self._iter_grouped())
def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self.group_indices:
yield self.obj.indexed(**{self.group_dim: indices})
def _infer_concat_args(self, applied_example):
if self.group_dim in applied_example.dimensions:
concat_dim = self.group_coord
indexers = self.group_indices
else:
concat_dim = self.unique_coord
indexers = np.arange(self.unique_coord.size)
return concat_dim, indexers
@property
def _combine(self):
return type(self.obj).concat
class ArrayGroupBy(GroupBy, ImplementsReduce):
"""GroupBy object specialized to grouping DataArray objects
"""
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields XArrays without metadata
"""
array = variable.as_variable(self.obj)
# build the new dimensions
if isinstance(self.group_indices[0], int):
# group_dim is squeezed out
dims = tuple(d for d in array.dimensions if d != self.group_dim)
else:
dims = array.dimensions
# slice the data and build the new Arrays directly
indexer = [slice(None)] * array.ndim
group_axis = array.get_axis_num(self.group_dim)
for indices in self.group_indices:
indexer[group_axis] = indices
data = array.values[tuple(indexer)]
yield variable.Variable(dims, data)
def _combine_shortcut(self, applied, concat_dim, indexers):
stacked = variable.Variable.concat(
applied, concat_dim, indexers, shortcut=True)
stacked.attrs.update(self.obj.attrs)
name = self.obj.name
ds = self.obj.dataset.unselect(name)
ds[concat_dim.name] = concat_dim
# remove extraneous dimensions
for dim in self.obj.dimensions:
if dim not in stacked.dimensions:
del ds[dim]
ds[name] = stacked
return ds[name]
def _restore_dim_order(self, stacked, concat_dim):
def lookup_order(dimension):
if dimension == self.group_coord.name:
dimension, = concat_dim.dimensions
if dimension in self.obj.dimensions:
axis = self.obj.get_axis_num(dimension)
else:
axis = 1e6 # some arbitrarily high value
return axis
new_order = sorted(stacked.dimensions, key=lookup_order)
return stacked.transpose(*new_order)
def apply(self, func, shortcut=False, **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes, indices or other contained arrays) but
only on the data and dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar.
Returns
-------
applied : DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (func(arr, **kwargs) for arr in grouped)
# peek at applied to determine which coordinate to stack over
applied_example, applied = peek_at(applied)
concat_dim, indexers = self._infer_concat_args(applied_example)
if shortcut:
combined = self._combine_shortcut(applied, concat_dim, indexers)
else:
combined = self._combine(applied, concat_dim, indexers)
reordered = self._restore_dim_order(combined, concat_dim)
return reordered
def reduce(self, func, dimension=None, axis=None, shortcut=True,
**kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dimension : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_array(ar):
return ar.reduce(func, dimension, axis, **kwargs)
return self.apply(reduce_array, shortcut=shortcut)
_reduce_method_docstring = \
"""Reduce the items in this group by applying `{name}` along some
dimension(s).
Parameters
----------
dimension : str or sequence of str, optional
Dimension(s) over which to apply `{name}`.
axis : int or sequence of int, optional
Axis(es) over which to apply `{name}`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`{name}` is calculated over all dimension for each group item.
**kwargs : dict
Additional keyword arguments passed on to `{name}`.
Returns
-------
reduced : {cls}
New {cls} object with `{name}` applied to its data and the
indicated dimension(s) removed.
"""
inject_reduce_methods(ArrayGroupBy)
class DatasetGroupBy(GroupBy):
def apply(self, func, **kwargs):
"""Apply a function over each Dataset in the group and concatenate them
together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset
The result of splitting, applying and combining this dataset.
"""
applied = [func(ds, **kwargs) for ds in self._iter_grouped()]
concat_dim, indexers = self._infer_concat_args(applied[0])
combined = self._combine(applied, concat_dim, indexers)
return combined
| apache-2.0 | -8,291,218,335,638,136,000 | 37.197015 | 81 | 0.604408 | false |
jamslevy/gsoc | tests/settings_test.py | 1 | 4105 | #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the scripts.settings module.
For details on running the tests, see:
http://code.google.com/p/soc/wiki/TestingGuidelines#Running_the_smoke_tests
"""
__authors__ = [
# alphabetical order by last name, please
'"Todd Larsen" <[email protected]>',
]
import optparse
import os
import sys
import unittest
from scripts import settings
class SettingsTests(unittest.TestCase):
"""Python-format settings file tests for the settings.py module.
"""
def setUp(self):
self.test_srcdir = os.path.dirname(__file__)
self.settings_defaults = {'foo': 1, 'bif': 4}
def testMissingPythonSettings(self):
"""Test that non-existent files work properly without errors.
"""
# non-existent settings file with no defaults produces empty dict
self.assertEqual(
{},
settings.readPythonSettings(settings_dir=self.test_srcdir,
settings_file='nonexistent_file'))
# non-existent settings file should just pass through the defaults
self.assertEqual(
self.settings_defaults,
settings.readPythonSettings(defaults=self.settings_defaults,
settings_dir=self.test_srcdir,
settings_file='nonexistent_file'))
def testGoodPythonSettings(self):
"""Test that settings file that is present overwrites defaults.
"""
# foo and bar are overwritten, but not bif (not in the settings file)
self.assertEqual(
{'foo': 3, 'bar': 3, 'bif': 4},
settings.readPythonSettings(defaults=self.settings_defaults,
settings_dir=self.test_srcdir,
settings_file='good_test_settings'))
# but the original defaults will be untouched
self.assertEqual({'foo': 1, 'bif': 4}, self.settings_defaults)
def testBadPythonSettings(self):
"""Test that exception is raised when format of settings file is bad.
"""
self.assertRaises(settings.Error, settings.readPythonSettings,
settings_dir=self.test_srcdir,
settings_file='bad_test_settings')
class OptionParserTests(unittest.TestCase):
"""Tests of custom optparse OptionParser with 'required' parameter support.
"""
def testRequiredPresent(self):
"""Test required=True raises nothing when value option is present.
"""
parser = settings.OptionParser(
option_list=[
settings.Option(
'-t', '--test', action='store', dest='test', required=True,
help='(REQUIRED) test option'),
],
)
options, args = parser.parse_args([sys.argv[0], '--test', '3'])
def testRequiredMissing(self):
"""Test that Error exception is raised if required option not present.
"""
parser = settings.OptionParser(
option_list=[
settings.Option(
'-t', '--test', action='store', dest='test', required=True,
help='(REQUIRED) test option'),
],
)
self.assertRaises(settings.Error, parser.parse_args, [])
def testBadRequiredAction(self):
"""Test that OptionError is raised if action does not support required=True.
"""
# store_true is not in Options.TYPED_VALUES, which means option cannot
# take a value, so required=True is not permitted.
self.assertRaises(optparse.OptionError, settings.Option,
'-t', '--test', action='store_true', dest='test', required=True,
help='(REQUIRED) test option')
| apache-2.0 | 7,990,805,361,573,050,000 | 32.92562 | 80 | 0.654811 | false |
BirkbeckCTP/janeway | src/submission/urls.py | 1 | 1920 | __copyright__ = "Copyright 2017 Birkbeck, University of London"
__author__ = "Martin Paul Eve & Andy Byers"
__license__ = "AGPL v3"
__maintainer__ = "Birkbeck Centre for Technology and Publishing"
from django.conf.urls import url
from submission import views
urlpatterns = [
url(r'^start/$', views.start, name='submission_start'),
url(r'^(?P<type>[-\w.]+)/start/$', views.start, name='submission_start'),
url(r'^(?P<article_id>\d+)/info/$', views.submit_info, name='submit_info'),
url(r'^(?P<article_id>\d+)/authors/$', views.submit_authors, name='submit_authors'),
url(r'^(?P<article_id>\d+)/authors/(?P<author_id>\d+)/delete/$', views.delete_author, name='delete_author'),
url(r'^(?P<article_id>\d+)/funders/(?P<funder_id>\d+)/delete/$', views.delete_funder, name='delete_funder'),
url(r'^(?P<article_id>\d+)/files/$', views.submit_files, name='submit_files'),
url(r'^(?P<article_id>\d+)/funding/$', views.submit_funding, name='submit_funding'),
url(r'^submissions/$', views.submit_submissions, name='submission_submissions'),
url(r'^(?P<article_id>\d+)/review/$', views.submit_review, name='submit_review'),
url(r'^manager/article/settings/article/(?P<article_id>\d+)/publishernotes/order/$', views.publisher_notes_order,
name='submission_article_publisher_notes_order'),
url(r'^manager/configurator/$', views.configurator, name='submission_configurator'),
url(r'^manager/additional_fields/$', views.fields, name='submission_fields'),
url(r'^manager/additional_fields/(?P<field_id>\d+)/$', views.fields, name='submission_fields_id'),
url(r'^manager/licences/$', views.licenses, name='submission_licenses'),
url(r'^manager/licences/(?P<license_pk>\d+)/delete/',
views.delete_license,
name='submission_delete_license'),
url(r'^manager/licences/(?P<license_pk>\d+)/', views.licenses, name='submission_licenses_id'),
]
| agpl-3.0 | 7,696,528,079,549,939,000 | 49.526316 | 117 | 0.665104 | false |
dsoprea/RandomUtility | python/ssl_sign.py | 1 | 3373 | #!/usr/bin/env python2.7
import os.path
import argparse
import datetime
import hashlib
import random
import time
import M2Crypto.X509
import M2Crypto.ASN1
import M2Crypto.RSA
import M2Crypto.EVP
_OUTPUT_PATH = 'output'
_CA_PASSPHRASE = 'test'
_CA_KEY_PEM_FILENAME = 'output/ca.key.pem'
_CA_CRT_PEM_FILENAME = 'output/ca.crt.pem'
_SERIAL_NUMBER_GENERATOR_CB = lambda: \
hashlib.sha1(str(time.time()) + str(random.random())).\
hexdigest()
def pem_private_to_rsa(private_key_pem, passphrase=None):
def passphrase_cb(*args):
return passphrase
rsa = M2Crypto.RSA.load_key_string(
private_key_pem,
callback=passphrase_cb)
return rsa
def pem_csr_to_csr(csr_pem):
return M2Crypto.X509.load_request_string(csr_pem)
def pem_certificate_to_x509(cert_pem):
return M2Crypto.X509.load_cert_string(cert_pem)
def new_cert(ca_private_key_pem, csr_pem, validity_td, issuer_name, bits=2048,
is_ca=False, passphrase=None):
ca_rsa = pem_private_to_rsa(
ca_private_key_pem,
passphrase=passphrase)
def callback(*args):
pass
csr = pem_csr_to_csr(csr_pem)
public_key = csr.get_pubkey()
name = csr.get_subject()
cert = M2Crypto.X509.X509()
sn_hexstring = _SERIAL_NUMBER_GENERATOR_CB()
sn = int(sn_hexstring, 16)
cert.set_serial_number(sn)
cert.set_subject(name)
now_epoch = long(time.time())
notBefore = M2Crypto.ASN1.ASN1_UTCTIME()
notBefore.set_time(now_epoch)
notAfter = M2Crypto.ASN1.ASN1_UTCTIME()
notAfter.set_time(now_epoch + long(validity_td.total_seconds()))
cert.set_not_before(notBefore)
cert.set_not_after(notAfter)
cert.set_issuer(issuer_name)
cert.set_pubkey(public_key)
ext = M2Crypto.X509.new_extension('basicConstraints', 'CA:FALSE')
cert.add_ext(ext)
pkey = M2Crypto.EVP.PKey()
pkey.assign_rsa(ca_rsa)
cert.sign(pkey, 'sha1')
cert_pem = cert.as_pem()
return cert_pem
def sign(ca_key_filepath, ca_crt_filepath, csr_filepath, passphrase=None):
with open(ca_crt_filepath) as f:
ca_cert_pem = f.read()
with open(ca_key_filepath) as f:
ca_private_key_pem = f.read()
ca_cert = pem_certificate_to_x509(ca_cert_pem)
issuer_name = ca_cert.get_issuer()
with open(csr_filepath) as f:
csr_pem = f.read()
validity_td = datetime.timedelta(days=400)
return new_cert(
ca_private_key_pem,
csr_pem,
validity_td,
issuer_name,
passphrase=passphrase)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sign a CSR')
parser.add_argument('ca_key_filepath',
help='File-path of CA PEM private-key')
parser.add_argument('ca_crt_filepath',
help='File-path of CA PEM certificate')
parser.add_argument('csr_filepath',
help='File-path of PEM CSR')
parser.add_argument('-p', '--passphrase',
help='CA passphrase')
args = parser.parse_args()
crt_pem = sign(
args.ca_key_filepath,
args.ca_crt_filepath,
args.csr_filepath,
passphrase=args.passphrase)
print(crt_pem)
| gpl-2.0 | 3,708,214,889,509,246,500 | 24.360902 | 87 | 0.607471 | false |
jjmiranda/edx-platform | openedx/core/djangoapps/user_api/views.py | 1 | 40311 | """HTTP end-points for the User API. """
import copy
from opaque_keys import InvalidKeyError
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured, NON_FIELD_ERRORS, ValidationError
from django.utils.translation import ugettext as _
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie, csrf_protect, csrf_exempt
from opaque_keys.edx import locator
from rest_framework import authentication
from rest_framework import filters
from rest_framework import generics
from rest_framework import status
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.exceptions import ParseError
from django_countries import countries
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from openedx.core.lib.api.permissions import ApiKeyHeaderPermission
import third_party_auth
from django_comment_common.models import Role
from edxmako.shortcuts import marketing_link
from student.forms import get_registration_extension_form
from student.views import create_account_with_params
from student.cookies import set_logged_in_cookies
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.lib.api.authentication import SessionAuthenticationAllowInactiveUser
from util.json_request import JsonResponse
from .preferences.api import get_country_time_zones, update_email_opt_in
from .helpers import FormDescription, shim_student_view, require_post_params
from .models import UserPreference, UserProfile
from .accounts import (
NAME_MAX_LENGTH, EMAIL_MIN_LENGTH, EMAIL_MAX_LENGTH, PASSWORD_MIN_LENGTH, PASSWORD_MAX_LENGTH,
USERNAME_MIN_LENGTH, USERNAME_MAX_LENGTH
)
from .accounts.api import check_account_exists
from .serializers import CountryTimeZoneSerializer, UserSerializer, UserPreferenceSerializer
class LoginSessionView(APIView):
"""HTTP end-points for logging in users. """
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
@method_decorator(ensure_csrf_cookie)
def get(self, request):
"""Return a description of the login form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_login_session"))
# Translators: This label appears above a field on the login form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the login form meant to hold the user's email address.
email_placeholder = _(u"[email protected]")
# Translators: These instructions appear on the login form, immediately
# below a field meant to hold the user's email address.
email_instructions = _("The email address you used to register with {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
}
)
# Translators: This label appears above a field on the login form
# meant to hold the user's password.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"min_length": PASSWORD_MIN_LENGTH,
"max_length": PASSWORD_MAX_LENGTH,
}
)
form_desc.add_field(
"remember",
field_type="checkbox",
label=_("Remember me"),
default=False,
required=False,
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
@method_decorator(require_post_params(["email", "password"]))
@method_decorator(csrf_protect)
def post(self, request):
"""Log in a user.
You must send all required form fields with the request.
You can optionally send an `analytics` param with a JSON-encoded
object with additional info to include in the login analytics event.
Currently, the only supported field is "enroll_course_id" to indicate
that the user logged in while enrolling in a particular course.
Arguments:
request (HttpRequest)
Returns:
HttpResponse: 200 on success
HttpResponse: 400 if the request is not valid.
HttpResponse: 403 if authentication failed.
403 with content "third-party-auth" if the user
has successfully authenticated with a third party provider
but does not have a linked account.
HttpResponse: 302 if redirecting to another page.
Example Usage:
POST /user_api/v1/login_session
with POST params `email`, `password`, and `remember`.
200 OK
"""
# For the initial implementation, shim the existing login view
# from the student Django app.
from student.views import login_user
return shim_student_view(login_user, check_logged_in=True)(request)
class RegistrationView(APIView):
"""HTTP end-points for creating a new user. """
DEFAULT_FIELDS = ["email", "name", "username", "password"]
EXTRA_FIELDS = [
"first_name",
"last_name",
"city",
"state",
"country",
"gender",
"year_of_birth",
"level_of_education",
"company",
"title",
"mailing_address",
"goals",
"honor_code",
"terms_of_service",
]
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
def _is_field_visible(self, field_name):
"""Check whether a field is visible based on Django settings. """
return self._extra_fields_setting.get(field_name) in ["required", "optional"]
def _is_field_required(self, field_name):
"""Check whether a field is required based on Django settings. """
return self._extra_fields_setting.get(field_name) == "required"
def __init__(self, *args, **kwargs):
super(RegistrationView, self).__init__(*args, **kwargs)
# Backwards compatibility: Honor code is required by default, unless
# explicitly set to "optional" in Django settings.
self._extra_fields_setting = copy.deepcopy(configuration_helpers.get_value('REGISTRATION_EXTRA_FIELDS'))
if not self._extra_fields_setting:
self._extra_fields_setting = copy.deepcopy(settings.REGISTRATION_EXTRA_FIELDS)
self._extra_fields_setting["honor_code"] = self._extra_fields_setting.get("honor_code", "required")
# Check that the setting is configured correctly
for field_name in self.EXTRA_FIELDS:
if self._extra_fields_setting.get(field_name, "hidden") not in ["required", "optional", "hidden"]:
msg = u"Setting REGISTRATION_EXTRA_FIELDS values must be either required, optional, or hidden."
raise ImproperlyConfigured(msg)
# Map field names to the instance method used to add the field to the form
self.field_handlers = {}
for field_name in self.DEFAULT_FIELDS + self.EXTRA_FIELDS:
handler = getattr(self, "_add_{field_name}_field".format(field_name=field_name))
self.field_handlers[field_name] = handler
@method_decorator(ensure_csrf_cookie)
def get(self, request):
"""Return a description of the registration form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
This is especially important for the registration form,
since different edx-platform installations might
collect different demographic information.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Arguments:
request (HttpRequest)
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_registration"))
self._apply_third_party_auth_overrides(request, form_desc)
# Default fields are always required
for field_name in self.DEFAULT_FIELDS:
self.field_handlers[field_name](form_desc, required=True)
# Custom form fields can be added via the form set in settings.REGISTRATION_EXTENSION_FORM
custom_form = get_registration_extension_form()
if custom_form:
for field_name, field in custom_form.fields.items():
restrictions = {}
if getattr(field, 'max_length', None):
restrictions['max_length'] = field.max_length
if getattr(field, 'min_length', None):
restrictions['min_length'] = field.min_length
field_options = getattr(
getattr(custom_form, 'Meta', None), 'serialization_options', {}
).get(field_name, {})
field_type = field_options.get('field_type', FormDescription.FIELD_TYPE_MAP.get(field.__class__))
if not field_type:
raise ImproperlyConfigured(
"Field type '{}' not recognized for registration extension field '{}'.".format(
field_type,
field_name
)
)
form_desc.add_field(
field_name, label=field.label,
default=field_options.get('default'),
field_type=field_options.get('field_type', FormDescription.FIELD_TYPE_MAP.get(field.__class__)),
placeholder=field.initial, instructions=field.help_text, required=field.required,
restrictions=restrictions,
options=getattr(field, 'choices', None), error_messages=field.error_messages,
include_default_option=field_options.get('include_default_option'),
)
# Extra fields configured in Django settings
# may be required, optional, or hidden
for field_name in self.EXTRA_FIELDS:
if self._is_field_visible(field_name):
self.field_handlers[field_name](
form_desc,
required=self._is_field_required(field_name)
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
@method_decorator(csrf_exempt)
def post(self, request):
"""Create the user's account.
You must send all required form fields with the request.
You can optionally send a "course_id" param to indicate in analytics
events that the user registered while enrolling in a particular course.
Arguments:
request (HTTPRequest)
Returns:
HttpResponse: 200 on success
HttpResponse: 400 if the request is not valid.
HttpResponse: 409 if an account with the given username or email
address already exists
"""
data = request.POST.copy()
email = data.get('email')
username = data.get('username')
# Handle duplicate email/username
conflicts = check_account_exists(email=email, username=username)
if conflicts:
conflict_messages = {
"email": _(
# Translators: This message is shown to users who attempt to create a new
# account using an email address associated with an existing account.
u"It looks like {email_address} belongs to an existing account. "
u"Try again with a different email address."
).format(email_address=email),
"username": _(
# Translators: This message is shown to users who attempt to create a new
# account using a username associated with an existing account.
u"It looks like {username} belongs to an existing account. "
u"Try again with a different username."
).format(username=username),
}
errors = {
field: [{"user_message": conflict_messages[field]}]
for field in conflicts
}
return JsonResponse(errors, status=409)
# Backwards compatibility: the student view expects both
# terms of service and honor code values. Since we're combining
# these into a single checkbox, the only value we may get
# from the new view is "honor_code".
# Longer term, we will need to make this more flexible to support
# open source installations that may have separate checkboxes
# for TOS, privacy policy, etc.
if data.get("honor_code") and "terms_of_service" not in data:
data["terms_of_service"] = data["honor_code"]
try:
user = create_account_with_params(request, data)
except ValidationError as err:
# Should only get non-field errors from this function
assert NON_FIELD_ERRORS not in err.message_dict
# Only return first error for each field
errors = {
field: [{"user_message": error} for error in error_list]
for field, error_list in err.message_dict.items()
}
return JsonResponse(errors, status=400)
response = JsonResponse({"success": True})
set_logged_in_cookies(request, response, user)
return response
def _add_email_field(self, form_desc, required=True):
"""Add an email field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the registration form meant to hold the user's email address.
email_placeholder = _(u"[email protected]")
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
},
required=required
)
def _add_name_field(self, form_desc, required=True):
"""Add a name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's full name.
name_label = _(u"Full name")
# Translators: This example name is used as a placeholder in
# a field on the registration form meant to hold the user's name.
name_placeholder = _(u"Jane Doe")
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's full name.
name_instructions = _(u"Your legal name, used for any certificates you earn.")
form_desc.add_field(
"name",
label=name_label,
placeholder=name_placeholder,
instructions=name_instructions,
restrictions={
"max_length": NAME_MAX_LENGTH,
},
required=required
)
def _add_username_field(self, form_desc, required=True):
"""Add a username field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's public username.
username_label = _(u"Public username")
username_instructions = _(
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's public username.
u"The name that will identify you in your courses - "
u"{bold_start}(cannot be changed later){bold_end}"
).format(bold_start=u'<strong>', bold_end=u'</strong>')
# Translators: This example username is used as a placeholder in
# a field on the registration form meant to hold the user's username.
username_placeholder = _(u"JaneDoe")
form_desc.add_field(
"username",
label=username_label,
instructions=username_instructions,
placeholder=username_placeholder,
restrictions={
"min_length": USERNAME_MIN_LENGTH,
"max_length": USERNAME_MAX_LENGTH,
},
required=required
)
def _add_password_field(self, form_desc, required=True):
"""Add a password field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's password.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"min_length": PASSWORD_MIN_LENGTH,
"max_length": PASSWORD_MAX_LENGTH,
},
required=required
)
def _add_level_of_education_field(self, form_desc, required=True):
"""Add a level of education field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's highest completed level of education.
education_level_label = _(u"Highest level of education completed")
# The labels are marked for translation in UserProfile model definition.
options = [(name, _(label)) for name, label in UserProfile.LEVEL_OF_EDUCATION_CHOICES] # pylint: disable=translation-of-non-string
form_desc.add_field(
"level_of_education",
label=education_level_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_gender_field(self, form_desc, required=True):
"""Add a gender field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's gender.
gender_label = _(u"Gender")
# The labels are marked for translation in UserProfile model definition.
options = [(name, _(label)) for name, label in UserProfile.GENDER_CHOICES] # pylint: disable=translation-of-non-string
form_desc.add_field(
"gender",
label=gender_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_year_of_birth_field(self, form_desc, required=True):
"""Add a year of birth field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's year of birth.
yob_label = _(u"Year of birth")
options = [(unicode(year), unicode(year)) for year in UserProfile.VALID_YEARS]
form_desc.add_field(
"year_of_birth",
label=yob_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_mailing_address_field(self, form_desc, required=True):
"""Add a mailing address field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's mailing address.
mailing_address_label = _(u"Mailing address")
form_desc.add_field(
"mailing_address",
label=mailing_address_label,
field_type="textarea",
required=required
)
def _add_goals_field(self, form_desc, required=True):
"""Add a goals field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This phrase appears above a field on the registration form
# meant to hold the user's reasons for registering with edX.
goals_label = _(u"Tell us why you're interested in {platform_name}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME)
)
form_desc.add_field(
"goals",
label=goals_label,
field_type="textarea",
required=required
)
def _add_city_field(self, form_desc, required=True):
"""Add a city field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the city in which they live.
city_label = _(u"City")
form_desc.add_field(
"city",
label=city_label,
required=required
)
def _add_state_field(self, form_desc, required=False):
"""Add a State/Province/Region field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the State/Province/Region in which they live.
state_label = _(u"State/Province/Region")
form_desc.add_field(
"state",
label=state_label,
required=required
)
def _add_company_field(self, form_desc, required=False):
"""Add a Company field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the Company
company_label = _(u"Company")
form_desc.add_field(
"company",
label=company_label,
required=required
)
def _add_title_field(self, form_desc, required=False):
"""Add a Title field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the Title
title_label = _(u"Title")
form_desc.add_field(
"title",
label=title_label,
required=required
)
def _add_first_name_field(self, form_desc, required=False):
"""Add a First Name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the First Name
first_name_label = _(u"First Name")
form_desc.add_field(
"first_name",
label=first_name_label,
required=required
)
def _add_last_name_field(self, form_desc, required=False):
"""Add a Last Name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to False
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the First Name
last_name_label = _(u"Last Name")
form_desc.add_field(
"last_name",
label=last_name_label,
required=required
)
def _add_country_field(self, form_desc, required=True):
"""Add a country field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the country in which the user lives.
country_label = _(u"Country")
error_msg = _(u"Please select your Country.")
form_desc.add_field(
"country",
label=country_label,
field_type="select",
options=list(countries),
include_default_option=True,
required=required,
error_messages={
"required": error_msg
}
)
def _add_honor_code_field(self, form_desc, required=True):
"""Add an honor code field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Separate terms of service and honor code checkboxes
if self._is_field_visible("terms_of_service"):
terms_label = _(u"Honor Code")
terms_link = marketing_link("HONOR")
terms_text = _(u"Review the Honor Code")
# Combine terms of service and honor code checkboxes
else:
# Translators: This is a legal document users must agree to
# in order to register a new account.
terms_label = _(u"Terms of Service and Honor Code")
terms_link = marketing_link("HONOR")
terms_text = _(u"Review the Terms of Service and Honor Code")
# Translators: "Terms of Service" is a legal document users must agree to
# in order to register a new account.
label = _(u"I agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
# Translators: "Terms of Service" is a legal document users must agree to
# in order to register a new account.
error_msg = _(u"You must agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
form_desc.add_field(
"honor_code",
label=label,
field_type="checkbox",
default=False,
required=required,
error_messages={
"required": error_msg
},
supplementalLink=terms_link,
supplementalText=terms_text
)
def _add_terms_of_service_field(self, form_desc, required=True):
"""Add a terms of service field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This is a legal document users must agree to
# in order to register a new account.
terms_label = _(u"Terms of Service")
terms_link = marketing_link("TIS")
terms_text = _(u"Review the Terms of Service")
# Translators: "Terms of service" is a legal document users must agree to
# in order to register a new account.
label = _(u"I agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
# Translators: "Terms of service" is a legal document users must agree to
# in order to register a new account.
error_msg = _(u"You must agree to the {platform_name} {terms_of_service}").format(
platform_name=configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME),
terms_of_service=terms_label
)
form_desc.add_field(
"terms_of_service",
label=label,
field_type="checkbox",
default=False,
required=required,
error_messages={
"required": error_msg
},
supplementalLink=terms_link,
supplementalText=terms_text
)
def _apply_third_party_auth_overrides(self, request, form_desc):
"""Modify the registration form if the user has authenticated with a third-party provider.
If a user has successfully authenticated with a third-party provider,
but does not yet have an account with EdX, we want to fill in
the registration form with any info that we get from the
provider.
This will also hide the password field, since we assign users a default
(random) password on the assumption that they will be using
third-party auth to log in.
Arguments:
request (HttpRequest): The request for the registration form, used
to determine if the user has successfully authenticated
with a third-party provider.
form_desc (FormDescription): The registration form description
"""
if third_party_auth.is_enabled():
running_pipeline = third_party_auth.pipeline.get(request)
if running_pipeline:
current_provider = third_party_auth.provider.Registry.get_from_pipeline(running_pipeline)
if current_provider:
# Override username / email / full name
field_overrides = current_provider.get_register_form_data(
running_pipeline.get('kwargs')
)
for field_name in self.DEFAULT_FIELDS:
if field_name in field_overrides:
form_desc.override_field_properties(
field_name, default=field_overrides[field_name]
)
# Hide the password field
form_desc.override_field_properties(
"password",
default="",
field_type="hidden",
required=False,
label="",
instructions="",
restrictions={}
)
class PasswordResetView(APIView):
"""HTTP end-point for GETting a description of the password reset form. """
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
@method_decorator(ensure_csrf_cookie)
def get(self, request):
"""Return a description of the password reset form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("password_change_request"))
# Translators: This label appears above a field on the password reset
# form meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the password reset form meant to hold the user's email address.
email_placeholder = _(u"[email protected]")
# Translators: These instructions appear on the password reset form,
# immediately below a field meant to hold the user's email address.
email_instructions = _(u"The email address you used to register with {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
}
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
DRF class for interacting with the User ORM object
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = User.objects.all().prefetch_related("preferences")
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
class ForumRoleUsersListView(generics.ListAPIView):
"""
Forum roles are represented by a list of user dicts
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
"""
Return a list of users with the specified role/course pair
"""
name = self.kwargs['name']
course_id_string = self.request.query_params.get('course_id')
if not course_id_string:
raise ParseError('course_id must be specified')
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)
role = Role.objects.get_or_create(course_id=course_id, name=name)[0]
users = role.users.all()
return users
class UserPreferenceViewSet(viewsets.ReadOnlyModelViewSet):
"""
DRF class for interacting with the UserPreference ORM
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = UserPreference.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ("key", "user")
serializer_class = UserPreferenceSerializer
paginate_by = 10
paginate_by_param = "page_size"
class PreferenceUsersListView(generics.ListAPIView):
"""
DRF class for listing a user's preferences
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
return User.objects.filter(preferences__key=self.kwargs["pref_key"]).prefetch_related("preferences")
class UpdateEmailOptInPreference(APIView):
"""View for updating the email opt in preference. """
authentication_classes = (SessionAuthenticationAllowInactiveUser,)
@method_decorator(require_post_params(["course_id", "email_opt_in"]))
@method_decorator(ensure_csrf_cookie)
def post(self, request):
""" Post function for updating the email opt in preference.
Allows the modification or creation of the email opt in preference at an
organizational level.
Args:
request (Request): The request should contain the following POST parameters:
* course_id: The slash separated course ID. Used to determine the organization
for this preference setting.
* email_opt_in: "True" or "False" to determine if the user is opting in for emails from
this organization. If the string does not match "True" (case insensitive) it will
assume False.
"""
course_id = request.data['course_id']
try:
org = locator.CourseLocator.from_string(course_id).org
except InvalidKeyError:
return HttpResponse(
status=400,
content="No course '{course_id}' found".format(course_id=course_id),
content_type="text/plain"
)
# Only check for true. All other values are False.
email_opt_in = request.data['email_opt_in'].lower() == 'true'
update_email_opt_in(request.user, org, email_opt_in)
return HttpResponse(status=status.HTTP_200_OK)
class CountryTimeZoneListView(generics.ListAPIView):
"""
**Use Cases**
Retrieves a list of all time zones, by default, or common time zones for country, if given
The country is passed in as its ISO 3166-1 Alpha-2 country code as an
optional 'country_code' argument. The country code is also case-insensitive.
**Example Requests**
GET /user_api/v1/preferences/time_zones/
GET /user_api/v1/preferences/time_zones/?country_code=FR
**Example GET Response**
If the request is successful, an HTTP 200 "OK" response is returned along with a
list of time zone dictionaries for all time zones or just for time zones commonly
used in a country, if given.
Each time zone dictionary contains the following values.
* time_zone: The name of the time zone.
* description: The display version of the time zone
"""
serializer_class = CountryTimeZoneSerializer
paginator = None
def get_queryset(self):
country_code = self.request.GET.get('country_code', None)
return get_country_time_zones(country_code)
| agpl-3.0 | -1,252,888,636,332,038,100 | 36.5685 | 139 | 0.614324 | false |
AVSystem/avs_commons | tools/rbtree_fuzz_gen_full_tree.py | 1 | 1242 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2021 AVSystem <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import functools
import struct
nodes = int(sys.argv[1]) if len(sys.argv) > 1 else 1
def binsearch_depth(nums, num):
if not nums:
raise ValueError('should never happen')
at = len(nums) // 2
if nums[at] == num:
return 1
elif num < nums[at]:
return 1 + binsearch_depth(nums[:at], num)
else:
return 1 + binsearch_depth(nums[at+1:], num)
values = list(range(1, nodes+1))
ordered_values = sorted(values, key=functools.partial(binsearch_depth, values))
for num in ordered_values:
sys.stdout.buffer.write(struct.pack('=BI', 0, num))
| apache-2.0 | -2,947,104,314,626,366,000 | 30.846154 | 79 | 0.697262 | false |
millen1m/flask-restplus-server-example | app/modules/auth/views.py | 1 | 4794 | # coding: utf-8
"""
OAuth2 provider setup.
It is based on the code from the example:
https://github.com/lepture/example-oauth2-server
More details are available here:
* http://flask-oauthlib.readthedocs.org/en/latest/oauth2.html
* http://lepture.com/en/2013/create-oauth-server
"""
from flask import Blueprint, request, render_template, jsonify, session, redirect
from flask_login import current_user
import flask_login
import webargs
from werkzeug import exceptions as http_exceptions
from werkzeug import security
from app.extensions import db, api, oauth2, login_manager
from app.modules.users.models import User
from . import parameters
from .models import OAuth2Client
import logging
log = logging.getLogger('flask_oauthlib')
login_manager.login_view = "auth.login"
auth_blueprint = Blueprint('auth', __name__, url_prefix='/auth') # pylint: disable=invalid-name
def get_current_user():
if 'id' in session:
uid = session['id']
return User.query.get(uid)
else:
return User.query.get(1)
@auth_blueprint.route('/login', methods=['GET', 'POST'])
def login(*args, **kwargs):
if request.method == 'GET': # Note: it is critical to not have the action parameter on the form
return '''
Please log in to access your account
<form method='POST'>
<input type='text' name='email' id='email' placeholder='email'></input>
<input type='password' name='pw' id='pw' placeholder='password'></input>
<input type='submit' name='submit'></input>
</form>
'''
email = request.form['email']
user = User.query.get(email)
if request.form['pw']:
user = User.find_with_password(request.form['email'], request.form['pw'])
flask_login.login_user(user)
next = request.args.get("next")
if next is None:
next = 'auth/protected'
return redirect(next)
return 'Bad login'
@auth_blueprint.route('/logout', methods=['GET', 'POST'])
@flask_login.login_required
def logout(*args, **kwargs):
flask_login.logout_user()
return '''
<h1>You have successfully logged out</h1>
Would you like to log in again?
<form method='POST' action='login'>
<input type='text' name='email' id='email' placeholder='email'></input>
<input type='password' name='pw' id='pw' placeholder='password'></input>
<input type='submit' name='login'></input>
</form>
'''
@auth_blueprint.route('/protected')
@flask_login.login_required
def protected():
return 'Logged in as: ' + flask_login.current_user.username
@auth_blueprint.route('/oauth2/token', methods=['GET', 'POST'])
@oauth2.token_handler
def access_token(*args, **kwargs):
# pylint: disable=unused-argument
"""
This endpoint is for exchanging/refreshing an access token.
Returns:
response (dict): a dictionary or None as the extra credentials for
creating the token response.
"""
log.debug("requested token")
return None
@auth_blueprint.route('/oauth2/revoke', methods=['POST'])
@oauth2.revoke_handler
def revoke_token():
"""
This endpoint allows a user to revoke their access token.
"""
pass
@auth_blueprint.route('/oauth2/errors', methods=['POST'])
def error_message():
"""
This endpoint allows a user to revoke their access token.
"""
log.debug("Error")
pass
@oauth2.usergetter
def get_user(username, password, *args, **kwargs):
user = User.query.filter_by(username=username).first()
print("Running user getter")
if user.check_password(password):
return user
return None
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
@auth_blueprint.route('/oauth2/authorize', methods=['GET', 'POST'])
@flask_login.login_required
@oauth2.authorize_handler
def authorize(*args, **kwargs):
# pylint: disable=unused-argument
"""
This endpoint asks user if he grants access to his data to the requesting
application.
"""
log.debug("requested authorization")
if not current_user.is_authenticated:
log.debug(("NOT AUTHENTICATED"))
return api.abort(code=http_exceptions.Unauthorized.code)
if request.method == 'GET':
client_id = kwargs.get('client_id')
log.debug("render authorizer")
oauth2_client = OAuth2Client.query.filter_by(client_id=client_id).first()
kwargs['client'] = oauth2_client
kwargs['user'] = current_user
# TODO: improve template design
return render_template('authorize.html', **kwargs)
confirm = request.form.get('confirm', 'no')
return confirm == 'yes'
| mit | -8,786,363,367,552,155,000 | 28.231707 | 100 | 0.645807 | false |
lnanhkhoa/thesis-iot-khoa | iot/temporary/structureDevice.py | 1 | 11595 | importantDatabase = {
"floor_00": {
"nameOnWeb": "Ground Floor",
"room_00": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave01",
"name": "Device02",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeSlave01",
"name": "Device05",
"nameModule": "Light",
"nameOnWeb": "Sliding Door",
"value": 1
}
},
"Sensor": {},
"nameOnWeb": "Garage"
},
"room_01": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave01",
"name": "Device04",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 1
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave01",
"name": "Sensor03",
"nameModule": "Humidity",
"nameOnWeb": "Humidity",
"value": 52.0
},
"Sensor_02": {
"inNode": "dataNodeSlave01",
"name": "Sensor04",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 30.0
}
},
"nameOnWeb": "Warehouse"
},
"room_02": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave01",
"name": "Device06",
"nameModule": "Television",
"nameOnWeb": "Television",
"value": 0
},
"Device_02": {
"inNode": "dataNodeSlave01",
"name": "Device08",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_03": {
"inNode": "dataNodeSlave01",
"name": "Device07",
"nameModule": "AirConditioners",
"nameOnWeb": "Air Conditioners",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave01",
"name": "Sensor01",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 0.0
}
},
"nameOnWeb": "Servant Room"
},
"room_03": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave01",
"name": "Device03",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeSlave01",
"name": "Device01",
"nameModule": "WashingMachine",
"nameOnWeb": "Washing Machine",
"value": 0
}
},
"Sensor": {},
"nameOnWeb": "WC"
}
},
"floor_01": {
"nameOnWeb": "First Floor",
"room_00": {
"Device": {
"Device_01": {
"inNode": "dataNodeMaster01",
"name": "Device11",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 1
},
"Device_02": {
"inNode": "dataNodeMaster01",
"name": "Device08",
"nameModule": "Television",
"nameOnWeb": "Television",
"value": 0
},
"Device_03": {
"inNode": "dataNodeMaster01",
"name": "Device09",
"nameModule": "AirConditioners",
"nameOnWeb": "Air Conditioners",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeMaster01",
"name": "Sensor01",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 29.8
}
},
"nameOnWeb": "Living Room"
},
"room_01": {
"Device": {
"Device_01": {
"inNode": "dataNodeMaster01",
"name": "Device01",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 1
},
"Device_02": {
"inNode": "dataNodeMaster01",
"name": "Device10",
"nameModule": "Television",
"nameOnWeb": "Television",
"value": 1
}
},
"Sensor": {},
"nameOnWeb": "Dining Room"
},
"room_02": {
"Device": {
"Device_01": {
"inNode": "dataNodeMaster01",
"name": "Device04",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 1
},
"Device_02": {
"inNode": "dataNodeMaster01",
"name": "Device07",
"nameModule": "Refrigerator",
"nameOnWeb": "Refrigerator",
"value": 1
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave01",
"name": "Sensor05",
"nameModule": "GasSensor",
"nameOnWeb": "Gas Sensor",
"value": 1.0
}
},
"nameOnWeb": "Kitchen"
},
"room_03": {
"Device": {
"Device_01": {
"inNode": "dataNodeMaster01",
"name": "Device02",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeMaster01",
"name": "Device06",
"nameModule": "AirConditioners",
"nameOnWeb": "Air Conditioners",
"value": 0
},
"Device_03": {
"inNode": "dataNodeMaster01",
"name": "Device03",
"nameModule": "Television",
"nameOnWeb": "Television",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeMaster01",
"name": "Sensor02",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 30.0
}
},
"nameOnWeb": "Master's Room"
},
"room_04": {
"Device": {
"Device_01": {
"inNode": "dataNodeMaster01",
"name": "Device05",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
}
},
"Sensor": {},
"nameOnWeb": "WC"
}
},
"floor_02": {
"nameOnWeb": "Second Floor",
"room_00": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave02",
"name": "Device02",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 1
}
},
"Sensor": {},
"nameOnWeb": "Altar room"
},
"room_01": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave02",
"name": "Device03",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeSlave02",
"name": "Device06",
"nameModule": "AirConditioners",
"nameOnWeb": "Air Conditioners",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave02",
"name": "Sensor01",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 0.0
}
},
"nameOnWeb": "Children's room"
},
"room_02": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave02",
"name": "Device04",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeSlave02",
"name": "Device01",
"nameModule": "AirConditioners",
"nameOnWeb": "Air Conditioners",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave02",
"name": "Sensor02",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 0.0
}
},
"nameOnWeb": "Guest's room"
},
"room_03": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave02",
"name": "Device05",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
}
},
"Sensor": {},
"nameOnWeb": "WC room"
},
"room_04": {
"Device": {},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave02",
"name": "Sensor07",
"nameModule": "Humidity",
"nameOnWeb": "Soil Moisture",
"value": 77.0
}
},
"nameOnWeb": "Garden"
}
},
"floor_03": {
"nameOnWeb": "Third Floor",
"room_00": {
"Device": {},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave02",
"name": "Sensor06",
"nameModule": "RainSensor",
"nameOnWeb": "Rain sensor",
"value": 7.0
}
},
"nameOnWeb": "OutSide"
}
}
} | apache-2.0 | -4,764,750,610,570,268,000 | 31.573034 | 52 | 0.312117 | false |
theandrewdavis/cpplint | cpplint.py | 1 | 262075 | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import glob
import itertools
import math # for log
import os
import re
import sre_compile
import string
import sys
import sysconfig
import unicodedata
import xml.etree.ElementTree
# if empty, use defaults
_valid_extensions = set([])
__VERSION__ = '1.5.4'
try:
xrange # Python 2
except NameError:
# -- pylint: disable=redefined-builtin
xrange = range # Python 3
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit|sed|gsed]
[--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--repository=path]
[--linelength=digits] [--headers=x,y,...]
[--recursive]
[--exclude=path]
[--extensions=hpp,cpp,...]
[--includeorder=default|standardcfirst]
[--quiet]
[--version]
<file> [file] ...
Style checker for C/C++ source files.
This is a fork of the Google style checker with minor extensions.
The style guidelines this tries to follow are those in
https://google.github.io/styleguide/cppguide.html
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are %s.
Other file types will be ignored.
Change the extensions with the --extensions flag.
Flags:
output=emacs|eclipse|vs7|junit|sed|gsed
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Further support exists for
eclipse (eclipse), and JUnit (junit). XML parsers such as those used
in Jenkins and Bamboo may also be used.
The sed format outputs sed commands that should fix some of the errors.
Note that this requires gnu sed. If that is installed as gsed on your
system (common e.g. on macOS with homebrew) you can use the gsed output
format. Sed commands are written to stdout, not stderr, so you should be
able to pipe output straight to a shell to run the fixes.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
Errors with lower verbosity levels have lower confidence and are more
likely to be false positives.
quiet
Don't print anything if no errors are found.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
repository=path
The top level directory of the repository, used to derive the header
guard CPP variable. By default, this is determined by searching for a
path that contains .git, .hg, or .svn. When this flag is specified, the
given path is used instead. This option allows the header guard CPP
variable to remain consistent even if members of a team have different
repository root directories (such as when checking out a subdirectory
with SVN). In addition, users of non-mainstream version control systems
can use this flag to ensure readable header guard CPP variables.
Examples:
Assuming that Alice checks out ProjectName and Bob checks out
ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
with no --repository flag, the header guard CPP variable will be:
Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
If Alice uses the --repository=trunk flag and Bob omits the flag or
uses --repository=. then the header guard CPP variable will be:
Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
root=subdir
The root directory used for deriving header guard CPP variable.
This directory is relative to the top level directory of the repository
which by default is determined by searching for a directory that contains
.git, .hg, or .svn but can also be controlled with the --repository flag.
If the specified directory does not exist, this flag is ignored.
Examples:
Assuming that src is the top level directory of the repository (and
cwd=top/src), the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
--root=.. => SRC_CHROME_BROWSER_UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
recursive
Search for files to lint recursively. Each directory given in the list
of files to be linted is replaced by all files that descend from that
directory. Files with extensions not in the valid extensions list are
excluded.
exclude=path
Exclude the given path from the list of files to be linted. Relative
paths are evaluated relative to the current directory and shell globbing
is performed. This flag can be provided multiple times to exclude
multiple files.
Examples:
--exclude=one.cc
--exclude=src/*.cc
--exclude=src/*.cc --exclude=test/*.cc
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=%s
includeorder=default|standardcfirst
For the build/include_order rule, the default is to blindly assume angle
bracket includes with file extension are c-system-headers (default),
even knowing this will have false classifications.
The default is established at google.
standardcfirst means to instead use an allow-list of known c headers and
treat all others as separate group of "other system headers". The C headers
included are those of the C-standard lib and closely related ones.
headers=x,y,...
The header extensions that cpplint will treat as .h in checks. Values are
automatically added to --extensions list.
(by default, only files with extensions %s will be assumed to be headers)
Examples:
--headers=%s
--headers=hpp,hxx
--headers=hpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
root=subdir
headers=x,y,...
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through the linter.
"linelength" allows to specify the allowed line length for the project.
The "root" option is similar in function to the --root flag (see example
above). Paths are relative to the directory of the CPPLINT.cfg.
The "headers" option is similar in function to the --headers flag
(see example above).
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/c++14',
'build/c++tr1',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_subdir',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces_headers',
'build/namespaces_literals',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/strings',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_if_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
]
# keywords to use with --outputs which generate stdout for machine processing
_MACHINE_OUTPUTS = [
'junit',
'sed',
'gsed'
]
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
'readability/streams',
'readability/function',
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# The default list of categories suppressed for C (not C++) files.
_DEFAULT_C_SUPPRESSED_CATEGORIES = [
'readability/casting',
]
# The default list of categories suppressed for Linux Kernel files.
_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
'whitespace/tab',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'scoped_allocator',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++14 headers
'shared_mutex',
# 17.6.1.2 C++17 headers
'any',
'charconv',
'codecvt',
'execution',
'filesystem',
'memory_resource',
'optional',
'string_view',
'variant',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# C headers
_C_HEADERS = frozenset([
# System C headers
'assert.h',
'complex.h',
'ctype.h',
'errno.h',
'fenv.h',
'float.h',
'inttypes.h',
'iso646.h',
'limits.h',
'locale.h',
'math.h',
'setjmp.h',
'signal.h',
'stdalign.h',
'stdarg.h',
'stdatomic.h',
'stdbool.h',
'stddef.h',
'stdint.h',
'stdio.h',
'stdlib.h',
'stdnoreturn.h',
'string.h',
'tgmath.h',
'threads.h',
'time.h',
'uchar.h',
'wchar.h',
'wctype.h',
# additional POSIX C headers
'aio.h',
'arpa/inet.h',
'cpio.h',
'dirent.h',
'dlfcn.h',
'fcntl.h',
'fmtmsg.h',
'fnmatch.h',
'ftw.h',
'glob.h',
'grp.h',
'iconv.h',
'langinfo.h',
'libgen.h',
'monetary.h',
'mqueue.h',
'ndbm.h',
'net/if.h',
'netdb.h',
'netinet/in.h',
'netinet/tcp.h',
'nl_types.h',
'poll.h',
'pthread.h',
'pwd.h',
'regex.h',
'sched.h',
'search.h',
'semaphore.h',
'setjmp.h',
'signal.h',
'spawn.h',
'strings.h',
'stropts.h',
'syslog.h',
'tar.h',
'termios.h',
'trace.h',
'ulimit.h',
'unistd.h',
'utime.h',
'utmpx.h',
'wordexp.h',
# additional GNUlib headers
'a.out.h',
'aliases.h',
'alloca.h',
'ar.h',
'argp.h',
'argz.h',
'byteswap.h',
'crypt.h',
'endian.h',
'envz.h',
'err.h',
'error.h',
'execinfo.h',
'fpu_control.h',
'fstab.h',
'fts.h',
'getopt.h',
'gshadow.h',
'ieee754.h',
'ifaddrs.h',
'libintl.h',
'mcheck.h',
'mntent.h',
'obstack.h',
'paths.h',
'printf.h',
'pty.h',
'resolv.h',
'shadow.h',
'sysexits.h',
'ttyent.h',
# Additional linux glibc headers
'dlfcn.h',
'elf.h',
'features.h',
'gconv.h',
'gnu-versions.h',
'lastlog.h',
'libio.h',
'link.h',
'malloc.h',
'memory.h',
'netash/ash.h',
'netatalk/at.h',
'netax25/ax25.h',
'neteconet/ec.h',
'netipx/ipx.h',
'netiucv/iucv.h',
'netpacket/packet.h',
'netrom/netrom.h',
'netrose/rose.h',
'nfs/nfs.h',
'nl_types.h',
'nss.h',
're_comp.h',
'regexp.h',
'sched.h',
'sgtty.h',
'stab.h',
'stdc-predef.h',
'stdio_ext.h',
'syscall.h',
'termio.h',
'thread_db.h',
'ucontext.h',
'ustat.h',
'utmp.h',
'values.h',
'wait.h',
'xlocale.h',
# Hardware specific headers
'arm_neon.h',
'emmintrin.h',
'xmmintin.h',
])
# Folders of C libraries so commonly used in C++,
# that they have parity with standard C libraries.
C_STANDARD_HEADER_FOLDERS = frozenset([
# standard C library
"sys",
# glibc for linux
"arpa",
"asm-generic",
"bits",
"gnu",
"net",
"netinet",
"protocols",
"rpc",
"rpcsvc",
"scsi",
# linux kernel header
"drm",
"linux",
"misc",
"mtd",
"rdma",
"sound",
"video",
"xen",
])
# Type names
_TYPES = re.compile(
r'^(?:'
# [dcl.type.simple]
r'(char(16_t|32_t)?)|wchar_t|'
r'bool|short|int|long|signed|unsigned|float|double|'
# [support.types]
r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
# [cstdint.syn]
r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
r'(u?int(max|ptr)_t)|'
r')$')
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Pattern for matching FileInfo.BaseName() against test file name
_test_suffixes = ['_test', '_regtest', '_unittest']
_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
# Pattern that matches only complete whitespace, possibly across multiple lines.
_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
# Assertion macros. These are defined in base/logging.h and
# testing/base/public/gunit.h.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE', 'ASSERT_TRUE',
'EXPECT_FALSE', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_OTHER_SYS_HEADER = 3
_LIKELY_MY_HEADER = 4
_POSSIBLE_MY_HEADER = 5
_OTHER_HEADER = 6
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
# Match strings that indicate we're working on a C (not C++) file.
_SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
# Match string that indicates we're working on a Linux Kernel file.
_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
# Commands for sed to fix the problem
_SED_FIXUPS = {
'Remove spaces around =': r's/ = /=/',
'Remove spaces around !=': r's/ != /!=/',
'Remove space before ( in if (': r's/if (/if(/',
'Remove space before ( in for (': r's/for (/for(/',
'Remove space before ( in while (': r's/while (/while(/',
'Remove space before ( in switch (': r's/switch (/switch(/',
'Should have a space between // and comment': r's/\/\//\/\/ /',
'Missing space before {': r's/\([^ ]\){/\1 {/',
'Tab found, replace by spaces': r's/\t/ /g',
'Line ends in whitespace. Consider deleting these extra spaces.': r's/\s*$//',
'You don\'t need a ; after a }': r's/};/}/',
'Missing space after ,': r's/,\([^ ]\)/, \1/g',
}
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
_root_debug = False
# The top level repository directory. If set, _root is calculated relative to
# this directory instead of the directory containing version control artifacts.
# This is set by the --repository flag.
_repository = None
# Files to exclude from linting. This is set by the --exclude flag.
_excludes = None
# Whether to supress all PrintInfo messages, UNRELATED to --quiet flag
_quiet = False
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# This allows to use different include order rule than default
_include_order = "default"
try:
unicode
except NameError:
# -- pylint: disable=redefined-builtin
basestring = unicode = str
try:
long
except NameError:
# -- pylint: disable=redefined-builtin
long = int
if sys.version_info < (3,):
# -- pylint: disable=no-member
# BINARY_TYPE = str
itervalues = dict.itervalues
iteritems = dict.iteritems
else:
# BINARY_TYPE = bytes
itervalues = dict.values
iteritems = dict.items
def unicode_escape_decode(x):
if sys.version_info < (3,):
return codecs.unicode_escape_decode(x)[0]
else:
return x
# Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.
# This is set by --headers flag.
_hpp_headers = set([])
# {str, bool}: a map from error categories to booleans which indicate if the
# category should be suppressed for every line.
_global_error_suppressions = {}
def ProcessHppHeadersOption(val):
global _hpp_headers
try:
_hpp_headers = {ext.strip() for ext in val.split(',')}
except ValueError:
PrintUsage('Header extensions must be comma separated list.')
def ProcessIncludeOrderOption(val):
if val is None or val == "default":
pass
elif val == "standardcfirst":
global _include_order
_include_order = val
else:
PrintUsage('Invalid includeorder value %s. Expected default|standardcfirst')
def IsHeaderExtension(file_extension):
return file_extension in GetHeaderExtensions()
def GetHeaderExtensions():
if _hpp_headers:
return _hpp_headers
if _valid_extensions:
return {h for h in _valid_extensions if 'h' in h}
return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh'])
# The allowed extensions for file names
# This is set by --extensions flag
def GetAllExtensions():
return GetHeaderExtensions().union(_valid_extensions or set(
['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
def ProcessExtensionsOption(val):
global _valid_extensions
try:
extensions = [ext.strip() for ext in val.split(',')]
_valid_extensions = set(extensions)
except ValueError:
PrintUsage('Extensions should be a comma-separated list of values;'
'for example: extensions=hpp,cpp\n'
'This could not be parsed: "%s"' % (val,))
def GetNonHeaderExtensions():
return GetAllExtensions().difference(GetHeaderExtensions())
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ProcessGlobalSuppresions(lines):
"""Updates the list of global error suppressions.
Parses any lint directives in the file that have global effect.
Args:
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
"""
for line in lines:
if _SEARCH_C_FILE.search(line):
for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
if _SEARCH_KERNEL_FILE.search(line):
for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
_global_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment or
global suppression.
"""
return (_global_error_suppressions.get(category, False) or
linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
def _IsSourceExtension(s):
"""File extension (excluding dot) matches a source file extension."""
return s in GetNonHeaderExtensions()
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_SYS_SECTION = 4
_OTHER_H_SECTION = 5
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_OTHER_SYS_HEADER: 'other system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_SYS_SECTION: 'other system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self._section = None
self._last_header = None
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _OTHER_SYS_HEADER:
if self._section <= self._OTHER_SYS_SECTION:
self._section = self._OTHER_SYS_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
self.quiet = False # Suppress non-error messagess?
# output format:
# "emacs" - format that emacs can parse (default)
# "eclipse" - format that eclipse can parse
# "vs7" - format that Microsoft Visual Studio 7 can parse
# "junit" - format that Jenkins, Bamboo, etc can parse
# "sed" - returns a gnu sed command to fix the problem
# "gsed" - like sed, but names the command gsed, e.g. for macOS homebrew users
self.output_format = 'emacs'
# For JUnit output, save errors and failures until the end so that they
# can be written into the XML
self._junit_errors = []
self._junit_failures = []
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetQuiet(self, quiet):
"""Sets the module's quiet settings, and returns the previous setting."""
last_quiet = self.quiet
self.quiet = quiet
return last_quiet
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in sorted(iteritems(self.errors_by_category)):
self.PrintInfo('Category \'%s\' errors found: %d\n' %
(category, count))
if self.error_count > 0:
self.PrintInfo('Total errors found: %d\n' % self.error_count)
def PrintInfo(self, message):
# _quiet does not represent --quiet flag.
# Hide infos from stdout to keep stdout pure for machine consumption
if not _quiet and self.output_format not in _MACHINE_OUTPUTS:
sys.stdout.write(message)
def PrintError(self, message):
if self.output_format == 'junit':
self._junit_errors.append(message)
else:
sys.stderr.write(message)
def AddJUnitFailure(self, filename, linenum, message, category, confidence):
self._junit_failures.append((filename, linenum, message, category,
confidence))
def FormatJUnitXML(self):
num_errors = len(self._junit_errors)
num_failures = len(self._junit_failures)
testsuite = xml.etree.ElementTree.Element('testsuite')
testsuite.attrib['errors'] = str(num_errors)
testsuite.attrib['failures'] = str(num_failures)
testsuite.attrib['name'] = 'cpplint'
if num_errors == 0 and num_failures == 0:
testsuite.attrib['tests'] = str(1)
xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
else:
testsuite.attrib['tests'] = str(num_errors + num_failures)
if num_errors > 0:
testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
testcase.attrib['name'] = 'errors'
error = xml.etree.ElementTree.SubElement(testcase, 'error')
error.text = '\n'.join(self._junit_errors)
if num_failures > 0:
# Group failures by file
failed_file_order = []
failures_by_file = {}
for failure in self._junit_failures:
failed_file = failure[0]
if failed_file not in failed_file_order:
failed_file_order.append(failed_file)
failures_by_file[failed_file] = []
failures_by_file[failed_file].append(failure)
# Create a testcase for each file
for failed_file in failed_file_order:
failures = failures_by_file[failed_file]
testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
testcase.attrib['name'] = failed_file
failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
template = '{0}: {1} [{2}] [{3}]'
texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
failure.text = '\n'.join(texts)
xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _Quiet():
"""Return's the module's quiet setting."""
return _cpplint_state.quiet
def _SetQuiet(quiet):
"""Set the module's quiet status, and return previous setting."""
return _cpplint_state.SetQuiet(quiet)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if not self.in_a_function:
return
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
r"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\\Documents and Settings\\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
# If the user specified a repository path, it exists, and the file is
# contained in it, use the specified repository path
if _repository:
repo = FileInfo(_repository).FullName()
root_dir = project_dir
while os.path.exists(root_dir):
# allow case insensitive compare on Windows
if os.path.normcase(root_dir) == os.path.normcase(repo):
return os.path.relpath(fullname, root_dir).replace('\\', '/')
one_up_dir = os.path.dirname(root_dir)
if one_up_dir == root_dir:
break
root_dir = one_up_dir
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = current_dir = os.path.dirname(fullname)
while current_dir != os.path.dirname(current_dir):
if (os.path.exists(os.path.join(current_dir, ".git")) or
os.path.exists(os.path.join(current_dir, ".hg")) or
os.path.exists(os.path.join(current_dir, ".svn"))):
root_dir = current_dir
current_dir = os.path.dirname(current_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period, includes that period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return _IsSourceExtension(self.Extension()[1:])
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
_cpplint_state.PrintError('%s(%s): error cpplint: [%s] %s [%d]\n' % (
filename, linenum, category, message, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'junit':
_cpplint_state.AddJUnitFailure(filename, linenum, message, category,
confidence)
elif _cpplint_state.output_format in ['sed', 'gsed']:
if message in _SED_FIXUPS:
sys.stdout.write(_cpplint_state.output_format + " -i '%s%s' %s # %s [%s] [%d]\n" % (
linenum, _SED_FIXUPS[message], filename, message, category, confidence))
else:
sys.stderr.write('# %s:%s: "%s" [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
final_message = '%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence)
sys.stderr.write(final_message)
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
#
# Once we have matched a raw string, we check the prefix of the
# line to make sure that the line is not part of a single line
# comment. It's done this way because we remove raw strings
# before removing comments as opposed to removing comments
# before removing raw strings. This is because there are some
# cpplint checks that requires the comments to be preserved, but
# we don't want to check comments that are inside raw strings.
matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if (matched and
not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
matched.group(1))):
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // <empty> comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 4 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments.
2) lines member contains lines without comments.
3) raw_lines member contains all the lines without processing.
4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
strings removed.
All these members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# placeholder line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def PathSplitToList(path):
"""Returns the path split into a list by the separator.
Args:
path: An absolute or relative path (e.g. '/a/b/c/' or '../a')
Returns:
A list of path components (e.g. ['a', 'b', 'c]).
"""
lst = []
while True:
(head, tail) = os.path.split(path)
if head == path: # absolute paths end
lst.append(head)
break
if tail == path: # relative paths end
lst.append(tail)
break
path = head
lst.append(tail)
lst.reverse()
return lst
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
def FixupPathFromRoot():
if _root_debug:
sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
% (_root, fileinfo.RepositoryName()))
# Process the file path with the --root flag if it was set.
if not _root:
if _root_debug:
sys.stderr.write("_root unspecified\n")
return file_path_from_root
def StripListPrefix(lst, prefix):
# f(['x', 'y'], ['w, z']) -> None (not a valid prefix)
if lst[:len(prefix)] != prefix:
return None
# f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd']
return lst[(len(prefix)):]
# root behavior:
# --root=subdir , lstrips subdir from the header guard
maybe_path = StripListPrefix(PathSplitToList(file_path_from_root),
PathSplitToList(_root))
if _root_debug:
sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
" _root=%s)\n") % (maybe_path, file_path_from_root, _root))
if maybe_path:
return os.path.join(*maybe_path)
# --root=.. , will prepend the outer directory to the header guard
full_path = fileinfo.FullName()
# adapt slashes for windows
root_abspath = os.path.abspath(_root).replace('\\', '/')
maybe_path = StripListPrefix(PathSplitToList(full_path),
PathSplitToList(root_abspath))
if _root_debug:
sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " +
"root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
if maybe_path:
return os.path.join(*maybe_path)
if _root_debug:
sys.stderr.write("_root ignore, returning %s\n" % (file_path_from_root))
# --root=FAKE_DIR is ignored
return file_path_from_root
file_path_from_root = FixupPathFromRoot()
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
# Allow pragma once instead of header guards
for i in raw_lines:
if Search(r'^\s*#pragma\s+once', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar)
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a source file does not include its header."""
# Do not check test files
fileinfo = FileInfo(filename)
if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
for ext in GetHeaderExtensions():
basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
headerfile = basefilename + '.' + ext
if not os.path.exists(headerfile):
continue
headername = FileInfo(headerfile).RepositoryName()
first_include = None
include_uses_unix_dir_aliases = False
for section_list in include_state.include_list:
for f in section_list:
include_text = f[0]
if "./" in include_text:
include_uses_unix_dir_aliases = True
if headername in include_text or include_text in headername:
return
if not first_include:
first_include = f[1]
message = '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)
if include_uses_unix_dir_aliases:
message += ". Relative paths like . and .. are not allowed."
error(filename, first_include, 'build/include', 5, message)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if unicode_escape_decode('\ufffd') in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, linenum, seen_open_brace):
self.starting_linenum = linenum
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self, linenum):
_BlockInfo.__init__(self, linenum, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# If there is a DISALLOW macro, it should appear near the end of
# the class.
seen_last_thing_in_class = False
for i in xrange(linenum - 1, self.starting_linenum, -1):
match = Search(
r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
self.name + r'\)',
clean_lines.elided[i])
if match:
if seen_last_thing_in_class:
error(filename, i, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
break
if not Match(r'^\s*$', clean_lines.elided[i]):
seen_last_thing_in_class = True
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name or ''
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
re.escape(self.name) + r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
r'(class|struct)\s+(?:[a-zA-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo(linenum))
else:
self.stack.append(_BlockInfo(linenum, True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
explicit_constructor_match = Match(
r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
r'(?:(?:inline|constexpr)\s+)*%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
variadic_args = [arg for arg in constructor_args if '&&...' in arg]
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1) or
# variadic arguments with zero or one argument
(len(constructor_args) <= 2 and
len(variadic_args) >= 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'((const\s+(volatile\s+)?)?|(volatile\s+(const\s+)?))?'
r'%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args or variadic_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|elif|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
not Search(r'\bcase\s+\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
if Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
if (Match(r'//[^ ]*\w', comment) and
not Match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except for C++11 attributes
# or maybe after 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
if (Search(r'\w\s+\[(?!\[)', line) and
not Search(r'(?:auto&?|delete|return)\s+\[', line)):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. This is because there are too
# many false positives due to RValue references.
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def _IsType(clean_lines, nesting_state, expr):
"""Check if expression looks like a type name, returns true if so.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
expr: The expression to check.
Returns:
True, if token looks like a type.
"""
# Keep only the last token in the expression
last_word = Match(r'^.*(\b\S+)$', expr)
if last_word:
token = last_word.group(1)
else:
token = expr
# Match native types and stdint types
if _TYPES.match(token):
return True
# Try a bit harder to match templated types. Walk up the nesting
# stack until we find something that resembles a typename
# declaration for what we are looking for.
typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
r'\b')
block_index = len(nesting_state.stack) - 1
while block_index >= 0:
if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
return False
# Found where the opening brace is. We want to scan from this
# line up to the beginning of the function, minus a few lines.
# template <typename Type1, // stop scanning here
# ...>
# class C
# : public ... { // start scanning here
last_line = nesting_state.stack[block_index].starting_linenum
next_block_start = 0
if block_index > 0:
next_block_start = nesting_state.stack[block_index - 1].starting_linenum
first_line = last_line
while first_line >= next_block_start:
if clean_lines.elided[first_line].find('template') >= 0:
break
first_line -= 1
if first_line < next_block_start:
# Didn't find any "template" keyword before reaching the next block,
# there are probably no template things to check for this block
block_index -= 1
continue
# Look for typename in the specified range
for i in xrange(first_line, last_line + 1, 1):
if Search(typename_pattern, clean_lines.elided[i]):
return True
block_index -= 1
return False
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces when they are delimiting blocks, classes, namespaces etc.
# And since you should never have braces at the beginning of a line,
# this is an easy test. Except that braces used for initialization don't
# follow the same rule; we often don't want spaces before those.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
leading_text = match.group(1)
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
# We also suppress warnings for `uint64_t{expression}` etc., as the style
# guide recommends brace initialization for integral types to avoid
# overflow/truncation.
if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
and not _IsType(clean_lines, nesting_state, leading_text)):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block. We also allow a brace on the
# following line if it is part of an array initialization and would not fit
# within the 80 character limit of the preceding line.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline) and
not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*(|constexpr)\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*(|constexpr)\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we explicitly list the allowed rules rather
# than listing the disallowed ones. These are the places where "};"
# should be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a list of safe macros instead of a list of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the allowed checks wrong means some extra
# semicolons, while the downside for getting disallowed checks wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs
# - decltype
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\bdecltype$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
# We need to check the line forward for NOLINT
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
error)
ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
error)
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
# Check for if statements that have completely empty bodies (no comments)
# and no else clauses.
if end_pos >= 0 and matched.group(1) == 'if':
# Find the position of the opening { for the if statement.
# Return without logging an error if it has no brackets.
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
# Couldn't find conditional's opening { or any code before EOF.
return
opening_line_fragment = clean_lines.elided[opening_linenum]
# Set opening_line (opening_line_fragment may not be entire opening line).
opening_line = clean_lines.elided[opening_linenum]
# Find the position of the closing }.
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
# We need to make opening_pos relative to the start of the entire line.
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
# Now construct the body of the conditional. This consists of the portion
# of the opening line after the {, all lines until the closing line,
# and the portion of the closing line before the }.
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
# Opening line ends with a comment, so conditional isn't empty.
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
bodylist = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(bodylist)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
# Check if the body is empty
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
# The body is empty. Now make sure there's not an else clause.
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
# The body is empty and there's no else clause until EOF or other code.
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause'))
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
# Issue 337
# https://mail.python.org/pipermail/python-list/2012-August/628809.html
if (sys.version_info.major, sys.version_info.minor) <= (3, 2):
# https://github.com/python/cpython/blob/2.7/Include/unicodeobject.h#L81
is_wide_build = sysconfig.get_config_var("Py_UNICODE_SIZE") >= 4
# https://github.com/python/cpython/blob/2.7/Objects/unicodeobject.c#L564
is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF
if not is_wide_build and is_low_surrogate:
width -= 1
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
prev = raw_lines[linenum - 1] if linenum > 0 else ''
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*(?:public|private|protected|signals)(?:\s+(?:slots\s*)?)?:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
# We also don't check for lines that look like continuation lines
# (of lines ending in double quotes, commas, equals, or angle brackets)
# because the rules for how to indent those are non-trivial.
if (not Search(r'[",=><] *$', prev) and
(initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# Check if the line is a header guard.
is_header_guard = False
if IsHeaderExtension(file_extension):
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
#
# Doxygen documentation copying can get pretty long when using an overloaded
# function declaration
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^\s*//\s*[^\s]*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
line_width = GetLineWidth(line)
if line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# allow simple single line lambdas
not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
line) and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in itertools.chain(
('%s.%s' % (test_suffix.lstrip('_'), ext)
for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
('%s.%s' % (suffix, ext)
for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _ClassifyInclude(fileinfo, include, used_angle_brackets, include_order="default"):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
used_angle_brackets: True if the #include used <> rather than "".
include_order: "default" or other value allowed in program arguments
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', True, "standardcfirst")
_OTHER_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_header = include in _CPP_HEADERS
# Mark include as C header if in list or in a known folder for standard-ish C headers.
is_std_c_header = (include_order == "default") or (include in _C_HEADERS
# additional linux glibc header folders
or Search(r'(?:%s)\/.*\.h' % "|".join(C_STANDARD_HEADER_FOLDERS), include))
# Headers with C++ extensions shouldn't be considered C system headers
is_system = used_angle_brackets and not os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']
if is_system:
if is_cpp_header:
return _CPP_SYS_HEADER
if is_std_c_header:
return _C_SYS_HEADER
else:
return _OTHER_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
target_dir_pub = os.path.normpath(target_dir + '/../public')
target_dir_pub = target_dir_pub.replace('\\', '/')
if target_base == include_base and (
include_dir == target_dir or
include_dir == target_dir_pub):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include_subdir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
used_angle_brackets = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
return
for extension in GetNonHeaderExtensions():
if (include.endswith('.' + extension) and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
error(filename, linenum, 'build/include', 4,
'Do not include .' + extension + ' files from other packages')
return
# We DO want to include a 3rd party looking header if it matches the
# filename. Otherwise we get an erroneous error "...should include its
# header" error later.
third_src_header = False
for ext in GetHeaderExtensions():
basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
headerfile = basefilename + '.' + ext
headername = FileInfo(headerfile).RepositoryName()
if headername in include or include in headername:
third_src_header = True
break
if third_src_header or not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, used_angle_brackets, _include_order))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(itervalues(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
# Stream types.
_RE_PATTERN_REF_STREAM_PARAM = (
r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if IsHeaderExtension(file_extension):
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
if Search(r'\bliterals\b', line):
error(filename, linenum, 'build/namespaces_literals', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
else:
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (IsHeaderExtension(file_extension)
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces_headers', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access, and
# also because globals can be destroyed when some threads are still running.
# TODO(unknown): Generalize this to also find static unique_ptr instances.
# TODO(unknown): File bugs for clang-tidy to find these.
match = Match(
r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
r'([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
if Search(r'\bconst\b', line):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string '
'instead: "%schar%s %s[]".' %
(match.group(1), match.group(2) or '', match.group(3)))
else:
error(filename, linenum, 'runtime/string', 4,
'Static/global string variables are not permitted.')
if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsOutOfLineMethodDefinition(clean_lines, linenum):
"""Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Don't warn on out-of-line method definitions, as we would warn on the
# in-line declaration, if it isn't marked with 'override'.
if IsOutOfLineMethodDefinition(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
allowed_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(allowed_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see an allowed function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(allowed_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old style cast.
# If we see those, don't issue warnings for deprecated casts.
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
return False
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('multimap',)),
('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
'unique_ptr', 'weak_ptr')),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<tuple>', ('tuple',)),
('<unordered_map>', ('unordered_map', 'unordered_multimap')),
('<unordered_set>', ('unordered_set', 'unordered_multiset')),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_MAYBE_TEMPLATES = (
('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
'transform',
)),
('<utility>', ('forward', 'make_pair', 'move', 'swap')),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_headers_maybe_templates = []
for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
for _template in _templates:
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# 'type::max()'.
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
_header))
# Match set<type>, but not foo->set<type>, foo.set<type>
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\bset\s*\<'),
'set<>',
'<set>'))
# Match 'map<type> var' and 'std::map<type>(...)', but not 'map<type>(...)''
_re_pattern_headers_maybe_templates.append(
(re.compile(r'(std\b::\bmap\s*\<)|(^(std\b::\b)map\b\(\s*\<)'),
'map<>',
'<map>'))
# Other scripts may reach in and modify this pattern.
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the source (e.g. .cc) file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
fileinfo_cc = FileInfo(filename_cc)
if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
return (False, '')
fileinfo_h = FileInfo(filename_h)
if not IsHeaderExtension(fileinfo_h.Extension().lstrip('.')):
return (False, '')
filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
if matched_test_suffix:
filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
with io.open(filename, 'r', 'utf8', 'replace') as headerfile:
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
except IOError:
return False
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_headers_maybe_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
matched = pattern.search(line)
if matched:
# Don't warn about IWYU in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = list(include_dict.keys())
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if not header_found:
for extension in GetNonHeaderExtensions():
if filename.endswith('.' + extension):
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in sorted(required, key=required.__getitem__):
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
return len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo))
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=None):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
if extra_check_functions:
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++ TR1 headers.
if include and include.group(1).startswith('tr1/'):
error(filename, linenum, 'build/c++tr1', 5,
('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
# Flag unapproved C++11 headers.
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def FlagCxx14Features(filename, clean_lines, linenum, error):
"""Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++14 headers.
if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
error(filename, linenum, 'build/c++14', 5,
('<%s> is an unapproved C++14 header.') % include.group(1))
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=None):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if IsHeaderExtension(file_extension):
CheckForHeaderGuard(filename, clean_lines, error)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
if _cpplint_state.quiet:
# Suppress "Ignoring file" warning when using --quiet.
return False
_cpplint_state.PrintInfo('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
_cpplint_state.PrintError('Line length must be numeric.')
elif name == 'extensions':
ProcessExtensionsOption(val)
elif name == 'root':
global _root
# root directories are specified relative to CPPLINT.cfg dir.
_root = os.path.join(os.path.dirname(cfg_file), val)
elif name == 'headers':
ProcessHppHeadersOption(val)
elif name == 'includeorder':
ProcessIncludeOrderOption(val)
else:
_cpplint_state.PrintError(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
_cpplint_state.PrintError(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for cfg_filter in reversed(cfg_filters):
_AddFilters(cfg_filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=None):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
old_errors = _cpplint_state.error_count
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
with codecs.open(filename, 'r', 'utf8', 'replace') as target_file:
lines = target_file.read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
_cpplint_state.PrintError(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in GetAllExtensions():
_cpplint_state.PrintError('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(GetAllExtensions())))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
# Suppress printing anything if --quiet was passed unless the error
# count has increased after processing this file.
if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:
_cpplint_state.PrintInfo('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE % (list(GetAllExtensions()),
','.join(list(GetAllExtensions())),
GetHeaderExtensions(),
','.join(GetHeaderExtensions())))
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(0)
def PrintVersion():
sys.stdout.write('Cpplint fork (https://github.com/cpplint/cpplint)\n')
sys.stdout.write('cpplint ' + __VERSION__ + '\n')
sys.stdout.write('Python ' + sys.version + '\n')
sys.exit(0)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'v=',
'version',
'counting=',
'filter=',
'root=',
'repository=',
'linelength=',
'extensions=',
'exclude=',
'recursive',
'headers=',
'includeorder=',
'quiet'])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
quiet = _Quiet()
counting_style = ''
recursive = False
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
if opt == '--version':
PrintVersion()
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse', 'junit', 'sed', 'gsed'):
PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
'sed, gsed and junit.')
output_format = val
elif opt == '--quiet':
quiet = True
elif opt == '--verbose' or opt == '--v':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--repository':
global _repository
_repository = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--exclude':
global _excludes
if not _excludes:
_excludes = set()
_excludes.update(glob.glob(val))
elif opt == '--extensions':
ProcessExtensionsOption(val)
elif opt == '--headers':
ProcessHppHeadersOption(val)
elif opt == '--recursive':
recursive = True
elif opt == '--includeorder':
ProcessIncludeOrderOption(val)
if not filenames:
PrintUsage('No files were specified.')
if recursive:
filenames = _ExpandDirectories(filenames)
if _excludes:
filenames = _FilterExcludedFiles(filenames)
_SetOutputFormat(output_format)
_SetQuiet(quiet)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
filenames.sort()
return filenames
def _ExpandDirectories(filenames):
"""Searches a list of filenames and replaces directories in the list with
all files descending from those directories. Files with extensions not in
the valid extensions list are excluded.
Args:
filenames: A list of files or directories
Returns:
A list of all files that are members of filenames or descended from a
directory in filenames
"""
expanded = set()
for filename in filenames:
if not os.path.isdir(filename):
expanded.add(filename)
continue
for root, _, files in os.walk(filename):
for loopfile in files:
fullname = os.path.join(root, loopfile)
if fullname.startswith('.' + os.path.sep):
fullname = fullname[len('.' + os.path.sep):]
expanded.add(fullname)
filtered = []
for filename in expanded:
if os.path.splitext(filename)[1][1:] in GetAllExtensions():
filtered.append(filename)
return filtered
def _FilterExcludedFiles(fnames):
"""Filters out files listed in the --exclude command line switch. File paths
in the switch are evaluated relative to the current working directory
"""
exclude_paths = [os.path.abspath(f) for f in _excludes]
# because globbing does not work recursively, exclude all subpath of all excluded entries
return [f for f in fnames
if not any(e for e in exclude_paths
if _IsParentOrSame(e, os.path.abspath(f)))]
def _IsParentOrSame(parent, child):
"""Return true if child is subdirectory of parent.
Assumes both paths are absolute and don't contain symlinks.
"""
parent = os.path.normpath(parent)
child = os.path.normpath(child)
if parent == child:
return True
prefix = os.path.commonprefix([parent, child])
if prefix != parent:
return False
# Note: os.path.commonprefix operates on character basis, so
# take extra care of situations like '/foo/ba' and '/foo/bar/baz'
child_suffix = child[len(prefix):]
child_suffix = child_suffix.lstrip(os.sep)
return child == os.path.join(prefix, child_suffix)
def main():
filenames = ParseArguments(sys.argv[1:])
backup_err = sys.stderr
try:
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
# If --quiet is passed, suppress printing error count unless there are errors.
if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
_cpplint_state.PrintErrorCounts()
if _cpplint_state.output_format == 'junit':
sys.stderr.write(_cpplint_state.FormatJUnitXML())
finally:
sys.stderr = backup_err
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| bsd-3-clause | -2,675,977,740,084,183,600 | 36.97638 | 98 | 0.637501 | false |
datacommonsorg/website | server/tests/i18n_test.py | 1 | 1720 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from flask import g
from unittest.mock import patch
from main import app
class TestHlParamSelection(unittest.TestCase):
def test_no_hl(self):
with app.test_client() as c:
c.get('/')
assert (g.locale == 'en')
assert (g.locale_choices == ['en'])
def test_default_hl(self):
with app.test_client() as c:
c.get('/?hl=en')
assert (g.locale == 'en')
assert (g.locale_choices == ['en'])
def test_simple_hl(self):
with app.test_client() as c:
c.get('/?hl=ru')
assert (g.locale == 'ru')
assert (g.locale_choices == ['ru', 'en'])
@patch('lib.i18n.AVAILABLE_LANGUAGES', ['en', 'pt-br', 'pt'])
def test_complex_hl(self):
with app.test_client() as c:
c.get('/?hl=pt-BR')
assert (g.locale == 'pt-br')
assert (g.locale_choices == ['pt-br', 'pt', 'en'])
def test_fallback_hl(self):
with app.test_client() as c:
c.get('/?hl=foobar')
assert (g.locale == 'en')
assert (g.locale_choices == ['en']) | apache-2.0 | 1,607,169,839,682,063,000 | 31.471698 | 74 | 0.594767 | false |
shoopio/shoop | shuup/admin/dashboard/utils.py | 2 | 1372 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import time
from heapq import heappop, heappush
from itertools import islice
from django.utils.timezone import now
from shuup.admin.module_registry import get_modules
def get_activity(request, n_entries=30, cutoff_hours=10):
"""
Get Activity objects from all modules as a list in latest-first order.
:param request: Request context
:type request: django.http.request.HttpRequest
:param n_entries: Number of entries to return in total.
:type n_entries: int
:param cutoff_hours: Calculate cutoff datetime so the oldest entry should be at most this old
:type cutoff_hours: float
:return: List of Activity objects
:rtype: list[Activity]
"""
cutoff_dt = now() - datetime.timedelta(hours=cutoff_hours)
activities = []
for module in get_modules():
for activity in islice(module.get_activity(request, cutoff=cutoff_dt), n_entries):
heappush(activities, (-time.mktime(activity.datetime.timetuple()), activity))
out = []
while activities and len(out) < n_entries:
out.append(heappop(activities)[1])
return out
| agpl-3.0 | 5,926,010,667,520,457,000 | 33.3 | 97 | 0.708455 | false |
cheminfo/RDKitjs | old/src/similarityMap_basic_functions.py | 1 | 3270 | def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0):
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp(-z/(2*(1-rho**2))) / denom
def MolToMPL(mol,size=(300,300),kekulize=True, wedgeBonds=True, imageType=None, fitImage=False, options=None, **kwargs):
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.mplCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor=None
if fitImage:
drawingOptions.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds=wedgeBonds
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
omol=mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol,**kwargs)
omol._atomPs=drawer.atomPs[mol]
for k,v in iteritems(omol._atomPs):
omol._atomPs[k]=canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0])/100,float(size[1])/100)
return canvas._figure
def calcAtomGaussians(mol,a=0.03,step=0.02,weights=None):
import numpy
from matplotlib import mlab
x = numpy.arange(0,1,step)
y = numpy.arange(0,1,step)
X,Y = numpy.meshgrid(x,y)
if weights is None:
weights=[1.]*mol.GetNumAtoms()
Z = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[0][0], mol._atomPs[0][1])*weights[0] # this is not bivariate case ... only univariate no mixtures #matplotlib.mlab.bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0)
for i in range(1,mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp*weights[i]
return X,Y,Z
def GetSimilarityMapFromWeights(mol, weights, colorMap=cm.PiYG, scale=-1, size=(250, 250), sigma=None, #@UndefinedVariable #pylint: disable=E1101
coordScale=1.5, step=0.01, colors='k', contourLines=10, alpha=0.5, **kwargs):
if mol.GetNumAtoms() < 2: raise ValueError("too few atoms")
fig = Draw.MolToMPL(mol, coordScale=coordScale, size=size, **kwargs)
if sigma is None:
if mol.GetNumBonds() > 0:
bond = mol.GetBondWithIdx(0)
idx1 = bond.GetBeginAtomIdx()
idx2 = bond.GetEndAtomIdx()
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[idx1][i]-mol._atomPs[idx2][i])**2 for i in range(2)]))
else:
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[0][i]-mol._atomPs[1][i])**2 for i in range(2)]))
sigma = round(sigma, 2)
x, y, z = Draw.calcAtomGaussians(mol, sigma, weights=weights, step=step)
# scaling
if scale <= 0.0: maxScale = max(math.fabs(numpy.min(z)), math.fabs(numpy.max(z)))
else: maxScale = scale
# coloring
fig.axes[0].imshow(z, cmap=colorMap, interpolation='bilinear', origin='lower', extent=(0,1,0,1), vmin=-maxScale, vmax=maxScale)
# contour lines
# only draw them when at least one weight is not zero
if len([w for w in weights if w != 0.0]):
fig.axes[0].contour(x, y, z, contourLines, colors=colors, alpha=alpha, **kwargs)
return fig
| bsd-3-clause | -2,635,108,952,803,476,000 | 42.026316 | 240 | 0.666667 | false |
unicefuganda/uSurvey | survey/tests/models/test_interviews_model.py | 1 | 18752 | import string
from model_mommy import mommy
from datetime import datetime
from django_rq import job
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.contrib.auth.models import User
from django.utils import timezone
from dateutil.parser import parse as extract_date
from django.conf import settings
from survey.models import (InterviewerAccess, ODKAccess, USSDAccess, Interview, Interviewer, QuestionSetChannel,
EnumerationArea, Survey, SurveyAllocation, Question, QuestionSet, Batch, BatchQuestion,
QuestionOption)
from survey.forms.question import get_question_form
# import all question types
from survey.models import (Answer, NumericalAnswer, TextAnswer, MultiChoiceAnswer, MultiSelectAnswer, GeopointAnswer,
ImageAnswer, AudioAnswer, VideoAnswer, DateAnswer, AutoResponse)
from survey.utils.decorators import static_var
from survey.tests.base_test import BaseTest
from survey.forms.answer import SurveyAllocationForm, AddMoreLoopForm
from .survey_base_test import SurveyBaseTest
from survey.utils.views_helper import activate_super_powers
class InterviewsTest(SurveyBaseTest):
def test_name(self):
interview = self.interview
self.assertEquals(str(interview), '%s: %s' % (interview.id, interview.question_set.name))
def test_is_closed(self):
self.assertEquals(self.interview.closure_date is not None, self.interview.is_closed())
def test_interview_qset_gives_property_maps_to_correct_type(self):
self.assertEquals(self.qset.id, self.interview.qset.id)
self.assertEquals(self.qset.__class__, self.interview.qset.__class__)
def test_interview_is_considered_stared_when_last_question_is_not_none(self):
self.assertEquals(self.interview.last_question, None)
self.assertFalse(self.interview.has_started)
def test_question_text_is_given_when_no_response_is_supplied(self):
self._create_ussd_non_group_questions(self.qset)
interview = self.interview
first_question = interview.question_set.start_question
# confirm if its the Numerical answer
self.assertEquals(first_question.answer_type, NumericalAnswer.choice_name())
# interview has not started
self.assertEquals(interview.has_started, False)
self.assertEquals(Answer.objects.count(), 0)
response = interview.respond() # first question is numerical
self.assertEquals(response, first_question.text)
def test_last_question_is_updated_after_response(self):
self._create_ussd_non_group_questions(self.qset)
interview = self.interview
first_question = interview.question_set.start_question
# confirm if its the Numerical answer
self.assertEquals(first_question.answer_type, NumericalAnswer.choice_name())
response = interview.respond()
interview.refresh_from_db()
self.assertEquals(interview.has_started, True)
self.assertEquals(interview.last_question.id, first_question.id)
def _validate_response(self, question, answer, interview=None):
if interview is None:
interview = self.interview
answer_count = Answer.objects.count()
questions = self.qset.flow_questions
interview.respond(reply=answer, answers_context={})
interview.refresh_from_db()
self.assertEquals(Answer.objects.count(), answer_count+1)
next_question = question.next_question(answer)
# just confirm text value of this answer was saved
self.assertTrue(interview.get_answer(question), str(answer))
question = Question.get(id=question.id)
# next test is valid
if questions.index(question) < len(questions) - 1:
self.assertEquals(next_question.id, questions[questions.index(question)+1].id)
self.assertEquals(next_question.id, interview.last_question.id)
def test_interview_response_flow(self):
self._create_ussd_non_group_questions(self.qset)
interview = self.interview
self._try_interview(interview)
def _try_interview(self, interview):
first_question = interview.question_set.start_question
response = interview.respond() # first question is numerical
self.assertEquals(response, first_question.text)
self._validate_response(first_question, 1, interview=interview) # numerical question
self._validate_response(self.qset.flow_questions[1], 'Howdy', interview=interview) # text question
self._validate_response(self.qset.flow_questions[2], 'N', interview=interview) # Multichoice
# auto response is internally an integer answer only that its generated by code (but outside models)
self._validate_response(self.qset.flow_questions[3], 1, interview=interview) # Auto response
# now assert that the interview is closed.
self.assertTrue(interview.is_closed())
def test_interviews_belonging_to_a_survey(self):
self._create_ussd_non_group_questions(self.qset)
interview = mommy.make(Interview, interviewer=self.interviewer, survey=self.survey, ea=self.ea,
interview_channel=self.access_channel, question_set=self.qset)
self._try_interview(interview)
self.assertEquals(Interview.interviews(self.survey).exclude(survey=self.survey).count(), 0)
def test_interviews_in_a_location(self):
self._create_ussd_non_group_questions(self.qset)
location1 = self.ea.locations.first()
interview = mommy.make(Interview, interviewer=self.interviewer, survey=self.survey, ea=self.ea,
interview_channel=self.access_channel, question_set=self.qset)
self._try_interview(interview)
interview = mommy.make(Interview, interviewer=self.interviewer, survey=self.survey, ea=self.ea,
interview_channel=self.access_channel, question_set=self.qset)
self._try_interview(interview)
self.assertEquals(Interview.interviews_in(location1, include_self=True).count(), Interview.objects.count())
self.assertEquals(Interview.interviews_in(location1, survey=self.survey, include_self=True).count(),
Interview.objects.count())
# test another location doesnt have any interviews
location2 = EnumerationArea.objects.exclude(locations__in=self.ea.locations.all()).first().locations.first()
self.assertEquals(Interview.interviews_in(location2, include_self=True).count(), 0)
self.assertEquals(Interview.interviews_in(location2, survey=self.survey, include_self=True).count(), 0)
def _load_other_client(self):
self.client = Client()
User.objects.create_user(username='useless', email='[email protected]', password='I_Suck')
user = User.objects.create_user('demo13', '[email protected]', 'demo13')
self.assign_permission_to(user, 'can_have_super_powers')
self.assign_permission_to(user, 'can_view_users')
self.client.login(username='demo13', password='demo13')
return user
def test_bulk_answer_questions(self):
self._create_ussd_non_group_questions(self.qset)
answers = []
n_quest = Question.objects.get(answer_type=NumericalAnswer.choice_name())
t_quest = Question.objects.get(answer_type=TextAnswer.choice_name())
m_quest = Question.objects.get(answer_type=MultiChoiceAnswer.choice_name())
# first is numeric, then text, then multichioice
answers = [{n_quest.id: 1, t_quest.id: 'Hey Man', m_quest.id: 'Y'},
{n_quest.id: 5, t_quest.id: 'Hey Boy', m_quest.id: 'Y'},
{n_quest.id: 15, t_quest.id: 'Hey Girl!', m_quest.id: 'N'},
{n_quest.id: 15, t_quest.id: 'Hey Part!'}
]
question_map = {n_quest.id: n_quest, t_quest.id: t_quest, m_quest.id: m_quest}
interview = self.interview
Interview.save_answers(self.qset, self.survey, self.ea,
self.access_channel, question_map, answers)
# confirm that 11 answers has been created
self.assertEquals(NumericalAnswer.objects.count(), 4)
self.assertEquals(TextAnswer.objects.count(), 4)
self.assertEquals(MultiChoiceAnswer.objects.count(), 3)
self.assertEquals(TextAnswer.objects.first().to_text().lower(), 'Hey Man'.lower())
self.assertEquals(MultiChoiceAnswer.objects.first().as_text.lower(), 'Y'.lower())
self.assertEquals(MultiChoiceAnswer.objects.first().as_value, str(QuestionOption.objects.get(text='Y').order))
# now test wipe data
request = RequestFactory().get('.')
request.user = self._load_other_client()
activate_super_powers(request)
url = reverse('wipe_survey_data', args=(self.survey.id,))
answer_count = Answer.objects.count()
self.assertTrue(answer_count > 0)
response = self.client.get(url)
self.assertEquals(Answer.objects.count(), 0)
def test_respond_on_closed_interview(self):
self.interview.closure_date = timezone.now()
self.interview.save()
self.assertEquals(self.interview.respond(), None)
def test_respond_start_question_interview(self):
self._create_ussd_group_questions()
self.assertEquals(self.interview.respond(),
self.qset.g_first_question.display_text(channel=ODKAccess.choice_name()))
class InterviewsTestExtra(SurveyBaseTest):
def test_first_question_is_loop_first(self):
self._create_ussd_group_questions()
# test first question is group first
self.assertEquals(self.interview.respond(),
self.qset.g_first_question.display_text(channel=ODKAccess.choice_name(), context={}))
# test running again gives same results
self.assertEquals(self.interview.respond(),
self.qset.g_first_question.display_text(channel=ODKAccess.choice_name(), context={}))
def test_interviews_in_exclude_self(self):
location = self.ea.locations.first()
interviews = Interview.interviews_in(location.parent)
self.assertTrue(interviews.filter(id=self.interview.id).exists())
def test_answers_unicode_rep(self):
self._create_ussd_non_group_questions()
n_question = Question.objects.filter(answer_type=NumericalAnswer.choice_name()).first()
answer = NumericalAnswer.create(self.interview, n_question, 1)
self.assertEquals(str(answer.as_text), unicode(answer))
# test update (since numeric makes use of thr parent implementation)
answer.update(2)
self.assertEquals(answer.as_value, 2)
# just test to label also :)
self.assertEquals(answer.to_label(), 2)
#test to pretty_print
self.assertEquals(str(answer.pretty_print()), '2')
def test_get_answer_class_with_doesnt_exist(self):
self.assertRaises(ValueError, Answer.get_class, 'Fake_Anwer')
def _prep_answers(self):
self._create_test_non_group_questions(self.qset)
answers = []
n_quest = Question.objects.get(answer_type=NumericalAnswer.choice_name())
t_quest = Question.objects.get(answer_type=TextAnswer.choice_name())
m_quest = Question.objects.get(answer_type=MultiChoiceAnswer.choice_name())
# first is numeric, then text, then multichioice
answers = [{n_quest.id: 1, t_quest.id: 'Hey Man', m_quest.id: 'Y'},
{n_quest.id: 5, t_quest.id: 'Our Hey Boy', m_quest.id: 'Y'},
{n_quest.id: 27, t_quest.id: 'Hey Girl!', m_quest.id: 'N'},
{n_quest.id: 12, t_quest.id: 'Hey Raster!', m_quest.id: 'N'},
{n_quest.id: 19, t_quest.id: 'This bad boy'}
]
question_map = {n_quest.id: n_quest, t_quest.id: t_quest, m_quest.id: m_quest}
interview = self.interview
interviews = Interview.save_answers(self.qset, self.survey, self.ea,
self.access_channel, question_map, answers)
# confirm that 11 answers has been created
self.assertEquals(NumericalAnswer.objects.count(), 5)
self.assertEquals(TextAnswer.objects.count(), 5)
self.assertEquals(MultiChoiceAnswer.objects.count(), 4)
self.assertEquals(TextAnswer.objects.first().to_text().lower(), 'Hey Man'.lower())
self.assertEquals(MultiChoiceAnswer.objects.first().as_text.lower(), 'Y'.lower())
multichoice = MultiChoiceAnswer.objects.first()
self.assertEquals(multichoice.as_value,
str(QuestionOption.objects.get(text='Y', question=multichoice.question).order))
return Interview.objects.filter(id__in=[i.id for i in interviews])
def test_answer_qs_filters(self):
interviews = self._prep_answers()
fetched_interviews = Answer.fetch_contains('answer__as_value', 'Hey', qs=interviews) # 4 intervies meet this
self.assertEquals(fetched_interviews.count(), 4)
fetched_interviews = Answer.fetch_starts_with('answer__as_value', 'Hey', qs=interviews) # 3 intervies meet this
self.assertEquals(fetched_interviews.count(), 3)
fetched_interviews = Answer.fetch_ends_with('answer__as_value', 'boy', qs=interviews) # 2 intervies meet this
self.assertEquals(fetched_interviews.count(), 2)
fetched_answers = Answer.fetch_contains('as_value', 'boy') # 2 intervies meet this
self.assertEquals(fetched_answers.count(), 2)
fetched_answers = Answer.fetch_starts_with('as_value', 'This') # 1 intervies meet this
self.assertEquals(fetched_answers.count(), 1)
fetched_answers = Answer.fetch_ends_with('as_value', 'boy') # 2 intervies meet this
self.assertEquals(fetched_answers.count(), 2)
def test_odk_answer_methods(self):
# test odk contain
path = '/qset/qset1/surveyQuestions/q1'
value = 'me doing somthing'
self.assertEquals(Answer.odk_contains(path, value), "regex(%s, '.*(%s).*')" % (path, value))
self.assertEquals(Answer.odk_starts_with(path, value), "regex(%s, '^(%s).*')" % (path, value))
self.assertEquals(Answer.odk_ends_with(path, value), "regex(%s, '.*(%s)$')" % (path, value))
value = 4
upperlmt = 10
self.assertEquals(Answer.odk_greater_than(path, value), "%s > '%s'" % (path, value))
self.assertEquals(Answer.odk_less_than(path, value), "%s < '%s'" % (path, value))
self.assertEquals(Answer.odk_between(path, value, upperlmt),
"(%s > '%s') and (%s <= '%s')" % (path, value, path, upperlmt))
self.assertEquals(NumericalAnswer.odk_less_than(path, value), "%s < %s" % (path, value))
self.assertEquals(NumericalAnswer.odk_between(path, value, upperlmt),
"(%s > %s) and (%s <= %s)" % (path, value, path, upperlmt))
value = '20-07-2017'
self.assertEquals(DateAnswer.odk_greater_than(path, value),
"%s > %s" % (path, DateAnswer.to_odk_date(value)))
self.assertEquals(DateAnswer.odk_less_than(path, value),
"%s < %s" % (path, DateAnswer.to_odk_date(value)))
upperlmt = '25-08-2017'
self.assertEquals(DateAnswer.odk_between(path, value, upperlmt),
"(%s > %s) and (%s <= %s)" % (path,
DateAnswer.to_odk_date(value),
path, DateAnswer.to_odk_date(upperlmt)))
def test_answer_value_methods(self):
value = 'me doing somthing'
test_answer1 = 'nothing good'
self.assertFalse(Answer.equals(test_answer1, value))
self.assertTrue(Answer.equals(value, value))
self.assertTrue(Answer.starts_with(value, 'me d'))
self.assertFalse(Answer.ends_with(value, 'no thing'))
self.assertTrue(Answer.ends_with(value, 'somthing'))
self.assertFalse(Answer.greater_than(5, 9))
self.assertTrue(Answer.greater_than(9, 5))
self.assertTrue(Answer.less_than(5, 9))
self.assertFalse(Answer.less_than(9, 5))
self.assertFalse(Answer.between(9, 5, 7))
self.assertTrue(Answer.between(9, 5, 11))
self.assertTrue(Answer.passes_test('17 > 10'))
self.assertFalse(NumericalAnswer.greater_than(5, 9))
self.assertTrue(NumericalAnswer.greater_than(9, 5))
self.assertTrue(NumericalAnswer.less_than(5, 9))
self.assertFalse(NumericalAnswer.less_than(9, 5))
self.assertFalse(NumericalAnswer.between(9, 5, 7))
self.assertTrue(NumericalAnswer.between(9, 5, 11))
self.assertFalse(TextAnswer.equals(test_answer1, value))
self.assertTrue(TextAnswer.equals(value, value))
self.assertFalse(MultiChoiceAnswer.equals(test_answer1, value))
self.assertTrue(MultiChoiceAnswer.equals(value, value))
self.assertFalse(MultiSelectAnswer.equals(test_answer1, value))
self.assertTrue(MultiSelectAnswer.equals(value, value))
self.assertFalse(DateAnswer.greater_than('12-09-2017', '12-09-2017'))
self.assertTrue(DateAnswer.greater_than('13-09-2017', '12-09-2017'))
self.assertFalse(DateAnswer.less_than('18-09-2017', '12-09-2017'))
self.assertTrue(DateAnswer.less_than('13-09-2017', '17-09-2017'))
self.assertFalse(DateAnswer.between('18-09-2017', '12-09-2017', '16-09-2017'))
self.assertTrue(DateAnswer.between('14-09-2017', '12-09-2017', '16-09-2017'))
def test_other_answer_methods(self):
interviews = self._prep_answers()
m_answer = MultiChoiceAnswer.objects.last()
self.assertEqual(m_answer.pretty_print(as_label=False), m_answer.value.text)
self.assertEqual(m_answer.pretty_print(as_label=True), m_answer.value.order)
multiselect_question = Question.objects.filter(answer_type=MultiSelectAnswer.choice_name()).last()
MultiSelectAnswer.create(self.interview, multiselect_question, 'Y N')
self.assertEqual(MultiSelectAnswer.objects.count(), 1)
multiselect = MultiSelectAnswer.objects.last()
self.assertEqual(multiselect.to_text(), ' and '.join(['Y', 'N']))
self.assertEqual(multiselect.to_label(), ' and '.join(['1', '2']))
self.assertEqual(multiselect.pretty_print(as_label=False), multiselect.to_text())
self.assertEqual(multiselect.pretty_print(as_label=True), multiselect.to_label())
| bsd-3-clause | -1,660,644,044,736,273,700 | 55.824242 | 120 | 0.65753 | false |
jason-ni/eventlet-raft | eventlet_raft/server.py | 1 | 3811 | import sys
import eventlet
from eventlet import event
import logging
import msgpack
from .settings import BUF_LEN
LOG = logging.getLogger('Server')
class Server(object):
exit_event = event.Event()
def __init__(self, conf):
super(Server, self).__init__()
self._node_listen_ip = conf.get('server', 'node_listen_ip')
self._node_listen_port = int(conf.get('server', 'node_listen_port'))
self._node_listen_sock = None
self._client_listen_ip = conf.get('server', 'client_listen_ip')
self._client_listen_port = int(conf.get('server', 'client_listen_port'))
self._client_listen_sock = None
self._threads = []
def _handle_node_sock(self, node_sock):
LOG.debug("Get a node socket")
unpacker = msgpack.Unpacker()
while True:
try:
chunk = node_sock.recv(BUF_LEN)
if not chunk:
break
unpacker.feed(chunk)
for unpacked_msg in unpacker:
self._on_handle_node_msg(unpacked_msg)
except Exception as e:
LOG.exception("node sock error: %s" % str(e))
break
def _on_handle_node_msg(self, msg):
pass
def _handle_client_sock(self, client_sock):
LOG.info("Get a client socket")
unpacker = msgpack.Unpacker()
while True:
try:
chunk = client_sock.recv(BUF_LEN)
if not chunk:
break
unpacker.feed(chunk)
for unpacked_msg in unpacker:
LOG.info(unpacked_msg)
self._on_handle_client_msg(client_sock, unpacked_msg)
except Exception as e:
LOG.exception("client sock error: %s" % str(e))
break
def _on_handle_client_msg(self, msg):
pass
def _on_node_connect(self, node_sock, address):
pass
def _handle_node_accept(self):
while True:
node_sock, address = self._node_listen_sock.accept()
self._on_node_connect(node_sock, address)
self._threads.append(
eventlet.spawn(self._handle_node_sock, node_sock)
)
def _on_client_connect(self, client_sock, address):
pass
def _handle_client_accept(self):
while True:
client_sock, address = self._client_listen_sock.accept()
self._on_client_connect(client_sock, address)
self._threads.append(
eventlet.spawn(self._handle_client_sock, client_sock)
)
def _on_start(self):
pass
def start(self):
self._node_listen_sock = eventlet.listen(
(self._node_listen_ip, self._node_listen_port)
)
self._threads.append(eventlet.spawn(self._handle_node_accept))
self._client_listen_sock = eventlet.listen(
(self._client_listen_ip, self._client_listen_port)
)
self._threads.append(eventlet.spawn(self._handle_client_accept))
self._on_start()
def _shutdown(self):
LOG.debug("Exiting...")
self._on_exit()
for thread in self._threads:
if thread:
thread.kill()
else:
LOG.debug("--- none thread")
sys.exit(0)
def _on_exit(self):
pass
def wait(self):
LOG.debug("Waiting for msg to exit")
self.exit_event.wait()
LOG.debug("Received exit event")
self._shutdown()
def main():
from util import config_log
from conf import set_conf
set_conf('test.conf')
from .conf import CONF
config_log()
server = Server(CONF)
server.start()
server.wait()
if __name__ == '__main__':
main()
| apache-2.0 | 5,002,739,255,207,698,000 | 28.091603 | 80 | 0.547625 | false |
jpadilla/django-extensions | django_extensions/templatetags/highlighting.py | 1 | 3357 | # coding=utf-8
"""
Similar to syntax_color.py but this is intended more for being able to
copy+paste actual code into your Django templates without needing to
escape or anything crazy.
http://lobstertech.com/2008/aug/30/django_syntax_highlight_template_tag/
Example:
{% load highlighting %}
<style>
@import url("http://lobstertech.com/media/css/highlight.css");
.highlight { background: #f8f8f8; }
.highlight { font-size: 11px; margin: 1em; border: 1px solid #ccc;
border-left: 3px solid #F90; padding: 0; }
.highlight pre { padding: 1em; overflow: auto; line-height: 120%; margin: 0; }
.predesc { margin: 1.5em 1.5em -2.5em 1em; text-align: right;
font: bold 12px Tahoma, Arial, sans-serif;
letter-spacing: 1px; color: #333; }
</style>
<h2>check out this code</h2>
{% highlight 'python' 'Excerpt: blah.py' %}
def need_food(self):
print("Love is <colder> than &death&")
{% endhighlight %}
"""
import django
from django import template
from django.template import (
Context, Node, Template, TemplateSyntaxError, Variable,
)
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
try:
from pygments import highlight as pyghighlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
HAS_PYGMENTS = True
except ImportError:
HAS_PYGMENTS = False
register = template.Library()
@stringfilter
def parse_template(value):
return mark_safe(Template(value).render(Context()))
parse_template.is_safe = True
if django.get_version() >= "1.4":
register.filter(parse_template, is_safe=True)
else:
parse_template.is_safe = True
register.filter(parse_template)
class CodeNode(Node):
def __init__(self, language, nodelist, name=''):
self.language = Variable(language)
self.nodelist = nodelist
if name:
self.name = Variable(name)
else:
self.name = None
def render(self, context):
code = self.nodelist.render(context).strip()
lexer = get_lexer_by_name(self.language.resolve(context))
formatter = HtmlFormatter(linenos=False)
html = ""
if self.name:
name = self.name.resolve(context)
html = '<div class="predesc"><span>%s</span></div>' % name
return html + pyghighlight(code, lexer, formatter)
@register.tag
def highlight(parser, token):
"""
Allows you to put a highlighted source code <pre> block in your code.
This takes two arguments, the language and a little explaination message
that will be generated before the code. The second argument is optional.
Your code will be fed through pygments so you can use any language it
supports.
Usage::
{% load highlighting %}
{% highlight 'python' 'Excerpt: blah.py' %}
def need_food(self):
print("Love is colder than death")
{% endhighlight %}
"""
if not HAS_PYGMENTS:
raise ImportError("Please install 'pygments' library to use highlighting.")
nodelist = parser.parse(('endhighlight',))
parser.delete_first_token()
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'highlight' statement requires an argument")
return CodeNode(bits[0], nodelist, *bits[1:])
| mit | 1,858,541,039,714,672,400 | 29.518182 | 83 | 0.670241 | false |
sjdv1982/seamless | seamless/core/status.py | 1 | 10043 | class SeamlessInvalidValueError(ValueError):
def __str__(self):
s = type(self).__name__
if len(self.args):
s += ":" + " ".join([str(a) for a in self.args])
return s
class SeamlessUndefinedError(ValueError):
def __str__(self):
s = type(self).__name__
if len(self.args):
s += ":" + " ".join([str(a) for a in self.args])
return s
import json
from enum import Enum
class MyEnum(Enum):
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __eq__(self, other):
if other is None:
return False
return self.value == other.value
StatusEnum = MyEnum("StatusEnum", (
"OK",
"PENDING",
"SUB",
"VOID",
))
StatusReasonEnum = MyEnum("StatusReasonEnum",(
"UNCONNECTED", # only for workers
# and cells connected from undefined macropaths
"UNDEFINED", # only for cells
"INVALID", # invalid value; worker or cell
"ERROR", # error in execution; only for workers
"UPSTREAM", # worker or cell
"EXECUTING" # only for workers, only for pending
))
class WorkerStatus:
def __init__(self,
status,
reason=None,
pins=None,
preliminary=False,
progress=0.0
):
self.status = status
self.reason = reason
self.pins = pins
self.preliminary = preliminary
self.progress = progress
def __getitem__(self, index):
if index == 0:
return self.status
if index == 1:
return self.reason
raise IndexError(index)
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def status_cell(cell):
if cell._checksum is not None:
return StatusEnum.OK, None, cell._prelim
if not cell._void:
return StatusEnum.PENDING, None, None
return StatusEnum.VOID, cell._status_reason, None
def status_accessor(accessor):
if accessor is None:
return StatusEnum.VOID, StatusReasonEnum.UNCONNECTED, None
if accessor._checksum is not None:
return StatusEnum.OK, None, accessor._prelim
if not accessor._void:
return StatusEnum.PENDING, None, None
return StatusEnum.VOID, accessor._status_reason, None
def status_transformer(transformer):
prelim = transformer.preliminary
checksum = transformer._checksum
if checksum is not None and not prelim:
return WorkerStatus(StatusEnum.OK)
manager = transformer._get_manager()
tcache = manager.cachemanager.transformation_cache
livegraph = manager.livegraph
pins = None
if not transformer._void:
status = StatusEnum.PENDING
reason = StatusReasonEnum.UPSTREAM
tf_checksum = tcache.transformer_to_transformations.get(transformer)
if tf_checksum is not None:
if tf_checksum in tcache.transformation_jobs:
reason = StatusReasonEnum.EXECUTING
if reason == StatusReasonEnum.UPSTREAM:
if checksum is not None:
assert prelim
return WorkerStatus(StatusEnum.OK, preliminary=True)
else:
status = StatusEnum.VOID
reason = transformer._status_reason
upstreams = livegraph.transformer_to_upstream.get(transformer)
downstreams = livegraph.transformer_to_downstream.get(transformer)
pins = []
if reason == StatusReasonEnum.UNCONNECTED:
pins = []
if upstreams is not None:
for pinname, accessor in upstreams.items():
if pinname == "META":
continue
if accessor is None:
pins.append(pinname)
if downstreams is not None:
if not len(downstreams):
outp = transformer._output_name
assert outp is not None
pins.append(outp)
elif reason == StatusReasonEnum.UPSTREAM:
pins = {}
if upstreams is not None:
for pinname, accessor in upstreams.items():
astatus = status_accessor(accessor)
if astatus[0] == StatusEnum.OK:
continue
pins[pinname] = astatus
return WorkerStatus(
status, reason, pins,
preliminary = transformer.preliminary,
progress = transformer._progress
)
def status_reactor(reactor):
manager = reactor._get_manager()
cachemanager = manager.cachemanager
livegraph = manager.livegraph
if reactor._pending:
return WorkerStatus(StatusEnum.PENDING)
elif not reactor._void:
return WorkerStatus(StatusEnum.OK)
rtreactor = livegraph.rtreactors[reactor]
status = StatusEnum.VOID
reason = reactor._status_reason
upstreams = livegraph.reactor_to_upstream[reactor]
pins = None
if reason == StatusReasonEnum.UNCONNECTED:
pins = []
for pinname, accessor in upstreams.items():
if accessor is None:
pins.append(pinname)
elif reason == StatusReasonEnum.UPSTREAM:
pins = {}
for pinname, accessor in upstreams.items():
astatus = status_accessor(accessor)
if astatus[0] == StatusEnum.OK:
continue
pins[pinname] = astatus
for pinname in rtreactor.editpins:
cell = livegraph.editpin_to_cell[reactor][pinname]
astatus = status_accessor(cell)
if astatus[0] == StatusEnum.OK:
continue
pins[pinname] = astatus
return WorkerStatus(
status, reason, pins
)
def status_macro(macro):
if macro._gen_context is not None:
assert not macro._void
gen_status = macro._gen_context._get_status()
if format_context_status(gen_status) != "OK":
return WorkerStatus(
StatusEnum.SUB, None, gen_status
)
return WorkerStatus(StatusEnum.OK)
manager = macro._get_manager()
livegraph = manager.livegraph
pins = None
if not macro._void:
status = StatusEnum.PENDING
reason = StatusReasonEnum.UPSTREAM
else:
status = StatusEnum.VOID
reason = macro._status_reason
upstreams = livegraph.macro_to_upstream[macro]
if reason == StatusReasonEnum.UNCONNECTED:
pins = []
for pinname, accessor in upstreams.items():
if accessor is None:
pins.append(pinname)
elif reason == StatusReasonEnum.UPSTREAM:
pins = {}
for pinname, accessor in upstreams.items():
astatus = status_accessor(accessor)
if astatus[0] == StatusEnum.OK:
continue
pins[pinname] = astatus
return WorkerStatus(status, reason, pins)
def format_status(stat):
status, reason, prelim = stat
if status == StatusEnum.OK:
if prelim:
return "preliminary"
else:
return "OK"
elif status == StatusEnum.PENDING:
return "pending"
else:
if reason is None:
return "void"
else:
return reason.name.lower()
def format_worker_status(stat, as_child=False):
status, reason, pins = (
stat.status, stat.reason, stat.pins
)
if status == StatusEnum.OK:
if stat.preliminary:
return "preliminary"
return "OK"
elif status == StatusEnum.PENDING:
if reason == StatusReasonEnum.EXECUTING:
progress = stat.progress
if progress is not None and progress > 0:
return "executing, %.1f %%" % progress
else:
return "executing"
else:
return "pending"
elif status == StatusEnum.SUB:
sub = pins
ctx_status = format_context_status(sub)
ctx_statustxt = json.dumps(ctx_status, indent=2, sort_keys=True)
return ("macro ctx =>", ctx_status)
else:
if reason == StatusReasonEnum.UNCONNECTED:
result = "unconnected => "
result += ", ".join(pins)
elif reason == StatusReasonEnum.UPSTREAM:
result = reason.name.lower() + " => "
pinresult = []
for pinname, pstatus in pins.items():
if as_child:
pinresult.append(pinname)
else:
pinresult.append(pinname + " " + format_status(pstatus))
result += ", ".join(pinresult)
else:
result = reason.name.lower()
return result
def format_context_status(stat):
from .worker import Worker
from .cell import Cell
from .context import Context
result = {}
for childname, value in stat.items():
if isinstance(value, (str, dict)):
if value == "Status: OK":
continue
result[childname] = value
continue
child, childstat = value
if not isinstance(child, Context):
if childstat[0] == StatusEnum.VOID:
if childstat[1] == StatusReasonEnum.UPSTREAM:
continue
if childstat[0] == StatusEnum.PENDING:
if isinstance(child, Worker):
if childstat.reason != StatusReasonEnum.EXECUTING:
continue
else:
continue
if isinstance(child, Worker):
childresult = format_worker_status(childstat, as_child=True)
elif isinstance(child, Cell):
childresult = format_status(childstat)
elif isinstance(child, Context):
childresult = format_context_status(childstat)
else:
continue
if childresult == "OK":
continue
result[childname] = childresult
if not len(result):
result = "OK"
return result
| mit | 4,051,852,312,388,161,000 | 32.476667 | 76 | 0.572339 | false |
sjorek/geoanonymizer.py | geoanonymizer/spatial/projection.py | 1 | 2642 | # -*- coding: utf-8 -*-
u"""
Functions dealing with geodesic projection systems.
WGS84 (EPSG 4326) projection system
“OpenStreetMap uses the WGS84 spatial reference system used by the
Global Positioning System (GPS). It uses geographic coordinates
between -180° and 180° longitude and -90° and 90° latitude. So
this is the "native" OSM format.
This is the right choice for you if you need geographical coordinates
or want to transform the coordinates into some other spatial reference
system or projection.”
-- from `Projections/Spatial reference systems: WGS84 (EPSG 4326)
<http://openstreetmapdata.com/info/projections#wgs84>`_
Mercator (EPSG 3857) projection system
“Most tiled web maps (such as the standard OSM maps and Google Maps)
use this Mercator projection.
The map area of such maps is a square with x and y coordiates both
between -20,037,508.34 and 20,037,508.34 meters. As a result data
north of about 85.1° and south of about -85.1° latitude can not be
shown and has been cut off. …
This is the right choice for you if you are creating tiled web maps.”
-- from `Projections/Spatial reference systems: Mercator (EPSG 3857)
<http://openstreetmapdata.com/info/projections#mercator>`_
Hint: Apple™ iOS or Google™ Android tracked coordinates use WGS84 (EPSG 4326)
projection and nearly all geomap-services, like google-maps, return this too,
although they're utilizing Mercator (EPSG 3857) projection internally.
"""
import math
def _generate_epsg_4326_to_epsg_3857_converter():
factor1 = 20037508.34 / 180
factor2 = math.pi / 360
factor3 = math.pi / 180
def convert_epsg_4326_to_epsg_3857(latitude, longitude):
"""
Convert WGS84 (EPSG 4326) to Mercator (EPSG 3857) projection.
"""
x = longitude * factor1
y = (math.log(math.tan((90 + latitude) * factor2)) / factor3) * factor1
return x, y
return convert_epsg_4326_to_epsg_3857
convert_gps_to_map_coordinates = _generate_epsg_4326_to_epsg_3857_converter()
def _generate_epsg_3857_to_epsg_4326_converter():
factor1 = 180 / 20037508.34
factor2 = 360 / math.pi
factor3 = math.pi / 20037508.34
def convert_epsg_3857_to_epsg_4326(x, y):
"""
Convert Mercator (EPSG 3857) to WGS84 (EPSG 4326) projection.
"""
longitude = x * factor1
latitude = factor2 * math.atan(math.exp(y * factor3)) - 90
return latitude, longitude
return convert_epsg_3857_to_epsg_4326
convert_map_to_gps_coordinates = _generate_epsg_3857_to_epsg_4326_converter()
| mit | -186,810,838,438,010,940 | 33.051948 | 79 | 0.692601 | false |
alejandrorosas/ardupilot | Tools/autotest/pysim/multicopter.py | 1 | 6720 | #!/usr/bin/env python
from aircraft import Aircraft
import util, time, math
from math import degrees, radians
from rotmat import Vector3, Matrix3
class Motor(object):
def __init__(self, angle, clockwise, servo):
self.angle = angle # angle in degrees from front
self.clockwise = clockwise # clockwise == true, anti-clockwise == false
self.servo = servo # what servo output drives this motor
def build_motors(frame):
'''build a motors list given a frame type'''
frame = frame.lower()
if frame in [ 'quad', '+', 'x' ]:
motors = [
Motor(90, False, 1),
Motor(270, False, 2),
Motor(0, True, 3),
Motor(180, True, 4),
]
if frame in [ 'x', 'quadx' ]:
for i in range(4):
motors[i].angle -= 45.0
elif frame in ["y6"]:
motors = [
Motor(60, False, 1),
Motor(60, True, 7),
Motor(180, True, 4),
Motor(180, False, 8),
Motor(-60, True, 2),
Motor(-60, False, 3),
]
elif frame in ["hexa", "hexa+"]:
motors = [
Motor(0, True, 1),
Motor(60, False, 4),
Motor(120, True, 8),
Motor(180, False, 2),
Motor(240, True, 3),
Motor(300, False, 7),
]
elif frame in ["hexax"]:
motors = [
Motor(30, False, 7),
Motor(90, True, 1),
Motor(150, False, 4),
Motor(210, True, 8),
Motor(270, False, 2),
Motor(330, True, 3),
]
elif frame in ["octa", "octa+", "octax" ]:
motors = [
Motor(0, True, 1),
Motor(180, True, 2),
Motor(45, False, 3),
Motor(135, False, 4),
Motor(-45, False, 5),
Motor(-135, False, 6),
Motor(270, True, 7),
Motor(90, True, 8),
]
if frame == 'octax':
for i in range(8):
motors[i].angle += 22.5
elif frame in ["octa-quad"]:
motors = [
Motor( 45, False, 1),
Motor( -45, True, 2),
Motor(-135, False, 3),
Motor( 135, True, 4),
Motor( -45, False, 5),
Motor( 45, True, 6),
Motor( 135, False, 7),
Motor(-135, True, 8),
]
else:
raise RuntimeError("Unknown multicopter frame type '%s'" % frame)
return motors
class MultiCopter(Aircraft):
'''a MultiCopter'''
def __init__(self, frame='+',
hover_throttle=0.45,
terminal_velocity=15.0,
frame_height=0.1,
mass=1.5):
Aircraft.__init__(self)
self.motors = build_motors(frame)
self.motor_speed = [ 0.0 ] * len(self.motors)
self.mass = mass # Kg
self.hover_throttle = hover_throttle
self.terminal_velocity = terminal_velocity
self.terminal_rotation_rate = 4*radians(360.0)
self.frame_height = frame_height
# scaling from total motor power to Newtons. Allows the copter
# to hover against gravity when each motor is at hover_throttle
self.thrust_scale = (self.mass * self.gravity) / (len(self.motors) * self.hover_throttle)
self.last_time = time.time()
def update(self, servos):
for i in range(0, len(self.motors)):
servo = servos[self.motors[i].servo-1]
if servo <= 0.0:
self.motor_speed[i] = 0
else:
self.motor_speed[i] = servo
m = self.motor_speed
# how much time has passed?
t = time.time()
delta_time = t - self.last_time
self.last_time = t
# rotational acceleration, in rad/s/s, in body frame
rot_accel = Vector3(0,0,0)
thrust = 0.0
for i in range(len(self.motors)):
rot_accel.x += -radians(5000.0) * math.sin(radians(self.motors[i].angle)) * m[i]
rot_accel.y += radians(5000.0) * math.cos(radians(self.motors[i].angle)) * m[i]
if self.motors[i].clockwise:
rot_accel.z -= m[i] * radians(400.0)
else:
rot_accel.z += m[i] * radians(400.0)
thrust += m[i] * self.thrust_scale # newtons
# rotational air resistance
rot_accel.x -= self.gyro.x * radians(5000.0) / self.terminal_rotation_rate
rot_accel.y -= self.gyro.y * radians(5000.0) / self.terminal_rotation_rate
rot_accel.z -= self.gyro.z * radians(400.0) / self.terminal_rotation_rate
# update rotational rates in body frame
self.gyro += rot_accel * delta_time
# update attitude
self.dcm.rotate(self.gyro * delta_time)
self.dcm.normalize()
# air resistance
air_resistance = - self.velocity * (self.gravity/self.terminal_velocity)
accel_body = Vector3(0, 0, -thrust / self.mass)
accel_earth = self.dcm * accel_body
accel_earth += Vector3(0, 0, self.gravity)
accel_earth += air_resistance
# add in some wind (turn force into accel by dividing by mass).
# NOTE: disable this drag correction until we work out
# why it is blowing up
# accel_earth += self.wind.drag(self.velocity) / self.mass
# if we're on the ground, then our vertical acceleration is limited
# to zero. This effectively adds the force of the ground on the aircraft
if self.on_ground() and accel_earth.z > 0:
accel_earth.z = 0
# work out acceleration as seen by the accelerometers. It sees the kinematic
# acceleration (ie. real movement), plus gravity
self.accel_body = self.dcm.transposed() * (accel_earth + Vector3(0, 0, -self.gravity))
# new velocity vector
self.velocity += accel_earth * delta_time
# new position vector
old_position = self.position.copy()
self.position += self.velocity * delta_time
# constrain height to the ground
if self.on_ground():
if not self.on_ground(old_position):
print("Hit ground at %f m/s" % (self.velocity.z))
self.velocity = Vector3(0, 0, 0)
# zero roll/pitch, but keep yaw
(r, p, y) = self.dcm.to_euler()
self.dcm.from_euler(0, 0, y)
self.position = Vector3(self.position.x, self.position.y,
-(self.ground_level + self.frame_height - self.home_altitude))
# update lat/lon/altitude
self.update_position(delta_time)
| gpl-3.0 | 795,064,505,372,224,100 | 34.183246 | 98 | 0.524554 | false |
Hanaasagi/Ushio | initdb.py | 1 | 1961 | #!/usr/bin/python
# -*-coding:UTF-8-*-
import sys
import time
import yaml
import pymongo
from hashlib import md5
zone_map = {
'动漫': '一些关于动漫的事情',
'音乐': '一些关于音乐的事情',
'轻小说': '一些关于轻小说的事情'
}
def create_admin(db, setting):
email = raw_input('请输入可用邮箱账号: ')
username = raw_input('请输入管理员用户名: ')
password = raw_input('请输入管理员密码: ')
hash_object = md5(password + setting['salt'])
password = hash_object.hexdigest()
user = {
'username': username,
'password': password,
'money': setting['init_money'],
'register_time': time.time(),
'favorite': [],
'email': email,
'level': 0,
'qq': '',
'website': '',
'address': '',
'signal': u'这个人太懒,还没有留下任何东西',
'openemail': 1,
'openfavorite': 1,
'openqq': 1,
'following': [],
'follower': [],
'allowemail': 1,
'logintime': None,
'loginip': None
}
db.user.insert(user)
def create_zone():
for name, desc in zone_map.items():
data = {
'name': name,
'description': desc,
'nums': 0
}
db.zone.insert(data)
if __name__ == '__main__':
try:
with open('setting.yaml', 'r') as f:
setting = yaml.load(f)
except:
print 'can not load setting file'
sys.exit(0)
client = pymongo.MongoClient(setting['database']['address'])
db = client[setting['database']['db']]
isdo = raw_input('是否创建管理员账户(Y/n): ')
if isdo in ('Y', 'y'):
create_admin(db, setting['global'])
else:
print '什么都没做'
isdo = raw_input('是否初始化版块分区(Y/n): ')
if isdo in ('Y', 'y'):
create_zone()
else:
print '什么都没做'
| mit | 6,451,959,847,060,826,000 | 21.766234 | 64 | 0.509983 | false |
richbrowne/f5-openstack-agent | test/functional/neutronless/service_object_rename/test_service_object_rename.py | 1 | 8414 | from copy import deepcopy
import json
import logging
import mock
import os
import pytest
import requests
import time
from f5.bigip import ManagementRoot
from f5.utils.testutils.registrytools import register_device
from f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver import \
iControlDriver
from f5_openstack_agent.lbaasv2.drivers.bigip.service_adapter import \
ServiceModelAdapter
from f5_openstack_agent.lbaasv2.drivers.bigip.system_helper import SystemHelper
from conftest import ConfFake
from conftest import setup_neutronless_test
requests.packages.urllib3.disable_warnings()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
# Get the Oslo Config File
curdir = os.path.dirname(os.path.realpath(__file__))
oslo_config_filename = os.path.join(curdir, 'oslo_confs.json')
OSLO_CONFIGS = json.load(open(oslo_config_filename))
TEST_CONFIG = OSLO_CONFIGS['default']
LBAAS_SERVICES = json.load(open(os.path.join(
curdir, 'lbaas_services.json')))
CREATELB = LBAAS_SERVICES['full_service']
@pytest.fixture
def service_name(request):
return request.config.getoption("--service-name")
@pytest.fixture
def bigip():
return ManagementRoot(pytest.symbols.bigip_mgmt_ip_public, 'admin', 'admin')
@pytest.fixture
def setup_test_wrapper(request, bigip, makelogdir):
loghandler = setup_neutronless_test(request, bigip, makelogdir, vlan=True)
return loghandler
@pytest.fixture
def service_adapter():
return ServiceModelAdapter(ConfFake(TEST_CONFIG))
@pytest.fixture
def system_helper():
return SystemHelper()
def prepare_service(service):
member = service['members'][0]
member_ip = pytest.symbols.server_ip
member['address'] = member_ip
member['port']['fixed_ips'][0]['ip_address'] = member_ip
lb_network_id = service['loadbalancer']['network_id']
lb_net_seg_id = pytest.symbols.vip_vxlan_segid
service['networks'][lb_network_id]['provider:segmentation_id'] = (
lb_net_seg_id
)
for member in service['members']:
member_network_id = member['network_id']
service['networks'][member_network_id]['provider:segmentation_id'] = (
pytest.symbols.server_vxlan_segid
)
def get_listener_status(bigip, listener_name, listener_folder):
vs_name = listener_name
vs_folder = listener_folder
v = bigip.tm.ltm.virtuals.virtual
if v.exists(name=vs_name, partition=vs_folder):
v_obj = v.load(name=vs_name, partition=vs_folder)
v_stats = v_obj.stats.load(name=vs_name, partition=vs_folder)
if v_stats:
v_stat_entries = v_stats.entries
for v_stat_key in v_stat_entries.keys():
v_stat_nested = (
v_stat_entries.get(v_stat_key, None)['nestedStats']
)
collected_status = {}
for entry in ['status.availabilityState', 'status.enabledState']:
if entry in v_stat_nested['entries']:
collected_status[entry] = (
v_stat_nested['entries'][entry]['description']
)
return collected_status
@pytest.fixture
def service(request, bigip, service_name, service_adapter, system_helper):
print("Creating service for %s" % service_name)
service = deepcopy(LBAAS_SERVICES[service_name])
prepare_service(service)
folder_name = service_adapter.get_folder_name(
service['loadbalancer']['tenant_id'])
def teardown_service():
if system_helper.folder_exists(bigip, folder_name):
system_helper.purge_folder_contents(bigip, folder_name)
system_helper.delete_folder(bigip, folder_name)
request.addfinalizer(teardown_service)
return service
def create_default_mock_rpc_plugin():
bigip_selfip = pytest.symbols.bigip_selfip
mock_rpc_plugin = mock.MagicMock(name='mock_rpc_plugin')
mock_rpc_plugin.get_port_by_name.return_value = [
{'fixed_ips': [{'ip_address': bigip_selfip}]}
]
return mock_rpc_plugin
def configure_icd(icd_config, create_mock_rpc):
class ConfFake(object):
'''minimal fake config object to replace oslo with controlled params'''
def __init__(self, params):
self.__dict__ = params
for k, v in self.__dict__.items():
if isinstance(v, unicode):
self.__dict__[k] = v.encode('utf-8')
def __repr__(self):
return repr(self.__dict__)
icontroldriver = iControlDriver(ConfFake(icd_config),
registerOpts=False)
icontroldriver.plugin_rpc = create_mock_rpc()
return icontroldriver
def logcall(lh, call, *cargs, **ckwargs):
call(*cargs, **ckwargs)
def handle_init_registry(bigip, icd_configuration,
create_mock_rpc=create_default_mock_rpc_plugin):
register_device(bigip)
icontroldriver = configure_icd(icd_configuration, create_mock_rpc)
start_registry = register_device(bigip)
return icontroldriver, start_registry
def deploy_service(bigip, service_name):
icontroldriver, start_registry = handle_init_registry(bigip, TEST_CONFIG)
service = deepcopy(LBAAS_SERVICES[service_name])
prepare_service(service)
logcall(setup_test_wrapper,
icontroldriver._common_service_handler,
service)
def test_create_config(track_bigip_cfg, setup_test_wrapper, bigip,
service_name):
"""Tests creation of a config"""
print("Creating service for %s" % service_name)
deploy_service(bigip, service_name)
def test_cleanup_config(track_bigip_cfg, bigip, service_name, service_adapter,
system_helper):
"""Tests the cleanup of a config"""
print("Teardown service for %s" % service_name)
icontroldriver, start_registry = handle_init_registry(bigip, TEST_CONFIG)
service = deepcopy(LBAAS_SERVICES[service_name])
folder_name = service_adapter.get_folder_name(
service['loadbalancer']['tenant_id'])
if system_helper.folder_exists(bigip, folder_name):
system_helper.purge_folder_contents(bigip, folder_name)
system_helper.delete_folder(bigip, folder_name)
def get_listener_name(service, listener, service_adapter, unique_name=False):
vs_name = listener.get("name", "")
if not vs_name or unique_name:
svc = {"loadbalancer": service['loadbalancer'],
"listener": listener}
virtual = service_adapter.get_virtual_name(svc)
vs_name = virtual['name']
return vs_name
def test_rename_service_objects(track_bigip_cfg, bigip, service,
service_adapter):
"""Tests the renaming of service objects"""
icontroldriver, start_registry = handle_init_registry(bigip, TEST_CONFIG)
folder_name = service_adapter.get_folder_name(
service['loadbalancer']['tenant_id'])
pre_vs_status = {}
for listener in service['listeners']:
vs_name = get_listener_name(service,
listener,
service_adapter)
pre_vs_status['vs_name'] = get_listener_status(
bigip,
vs_name,
folder_name)
assert(not icontroldriver.service_exists(service))
assert(icontroldriver.service_rename_required(service))
icontroldriver.service_object_teardown(service)
logcall(setup_test_wrapper,
icontroldriver._common_service_handler,
service)
time.sleep(10)
assert(icontroldriver.service_exists(service))
post_vs_status = {}
for listener in service['listeners']:
vs_name = get_listener_name(service,
listener,
service_adapter,
unique_name=True)
post_vs_status['vs_name'] = get_listener_status(
bigip,
vs_name,
folder_name)
for k, v in pre_vs_status.iteritems():
assert(v == post_vs_status[k])
def test_no_rename_service_objects(track_bigip_cfg, bigip, service,
service_name):
"""Tests the no renaming of service objects"""
deploy_service(bigip, service_name)
icontroldriver, start_registry = handle_init_registry(bigip, TEST_CONFIG)
assert(icontroldriver.service_exists(service))
assert(not icontroldriver.service_rename_required(service))
| apache-2.0 | -8,537,590,265,664,096,000 | 30.162963 | 80 | 0.650226 | false |
brumar/WPsolving | lib/postEvaluation.py | 1 | 4325 | '''
Created on 25 juil. 2014
@author: Nevrose
'''
import csv
class weightEvaluator():
def __init__(self):
self.datas={}
def prepareStructure(self,problemBank):
for problemName in problemBank.dicPbm.iterkeys():
problem=problemBank.dicPbm[problemName]
for indexInfo in range(len(problem.text.textInformations)):
textInfo=problem.text.textInformations[indexInfo]
for indexRepresentation in range(len(textInfo.representations)):
rep=textInfo.representations[indexRepresentation]
if(problemName not in self.datas.keys()):
self.datas[problemName]={}
if(indexInfo not in self.datas[problemName].keys()):
self.datas[problemName][indexInfo]={}
if(indexRepresentation not in self.datas[problemName][indexInfo].keys()):
self.datas[problemName][indexInfo][indexRepresentation]={}
self.datas[problemName][indexInfo][indexRepresentation]={"representation":rep,"occurences":0,"verbalDescription":"","weight":0}
def bindConfrontationToPathsDatas(self,confrontationDic, dDic):
#self.dicPbmSetFormulaPlannedObserved[pbm][setName][formula]=[planned,observationsCount]
dic=confrontationDic.dicPbmSetFormulaPlannedObserved
for problem in dic.iterkeys():
for set in dic[problem].iterkeys():
for formula in dic[problem][set]:
if dic[problem][set][formula][0]==True:
numberOfObservations=dic[problem][set][formula][1]
#logging.info("found")
congruentLines=(len(dDic[problem][formula]))
for pathLine in dDic[problem][formula]:
path=pathLine["path"]
numberOfRepresentationUsed=len(path.interpretationsList)
for interpIndex in range(numberOfRepresentationUsed):
verbalDescription=path.interpretationsList[interpIndex]
textIndex=path.richInterpretationsList[interpIndex].indexTextInformation
repIndex=path.richInterpretationsList[interpIndex].indexSelectedRepresentation
#logging.info(verbalDescription,textIndex,repIndex)
self.datas[problem][textIndex][repIndex]["occurences"]+=float(numberOfObservations)/congruentLines
#logging.info(self.datas[problem][textIndex][repIndex]["occurences"])
if( self.datas[problem][textIndex][repIndex]["verbalDescription"]==""):
self.datas[problem][textIndex][repIndex]["verbalDescription"]=verbalDescription
def normaliseWeightByPbm(self):
for pbm in self.datas:
suma=0
for info in self.datas[pbm]:
for rep in self.datas[pbm][info]:
suma+=self.datas[pbm][info][rep]["occurences"]
for info in self.datas[pbm]:
for rep in self.datas[pbm][info]:
if(self.datas[pbm][info][rep]["verbalDescription"]!=""):
self.datas[pbm][info][rep]["weight"]=float(self.datas[pbm][info][rep]["occurences"])/suma
#=======================================================
# logging.info(pbm)
# logging.info(self.datas[pbm][info][rep]["verbalDescription"])
# logging.info(self.datas[pbm][info][rep]["weight"])
#=======================================================
def printCSV(self,csvFile="datasWeight.csv"):
with open(csvFile, 'wb') as csvfile:
for pbm in self.datas:
for info in self.datas[pbm]:
for rep in self.datas[pbm][info]:
writer = csv.writer(csvfile, delimiter=';',quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow([pbm,self.datas[pbm][info][rep]["verbalDescription"],self.datas[pbm][info][rep]["weight"],self.datas[pbm][info][rep]["occurences"]])
| mit | -129,583,702,923,696,180 | 56.666667 | 176 | 0.551676 | false |
cedrick-f/pyVot | src/PyVot.py | 1 | 5947 | #!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
##This file is part of PyVot
#############################################################################
#############################################################################
## ##
## PyVot ##
## ##
#############################################################################
#############################################################################
## Copyright (C) 2006-2009 Cédrick FAURY
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import wx
import Icones
import sys, os, getpass
##import psyco
##psyco.log()
##psyco.full()
import globdef
from globdef import *
#import sys, os, time, traceback, types
import FenPrincipale
#import wx.aui
#import wx.html
#import images
# For debugging
##wx.Trap();
##print "wx.VERSION_STRING = %s (%s)" % (wx.VERSION_STRING, wx.USE_UNICODE and 'unicode' or 'ansi')
##print "pid:", os.getpid()
##raw_input("Press Enter...")
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
class MySplashScreen(wx.SplashScreen):
def __init__(self):
bmp = Icones.getLogoSplashBitmap()
wx.SplashScreen.__init__(self, bmp,
wx.SPLASH_CENTRE_ON_SCREEN | wx.SPLASH_TIMEOUT,
5000, None, -1,
style = wx.BORDER_NONE|wx.FRAME_NO_TASKBAR)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.fc = wx.FutureCall(2000, self.ShowMain)
def OnClose(self, evt):
# Make sure the default handler runs too so this window gets
# destroyed
evt.Skip()
self.Hide()
# if the timer is still running then go ahead and show the
# main frame now
if self.fc.IsRunning():
self.fc.Stop()
self.ShowMain()
def ShowMain(self):
NomFichier = None
if len(sys.argv)>1: #un paramètre a été passé
parametre=sys.argv[1]
# on verifie que le fichier passé en paramètre existe
if os.path.isfile(parametre):
NomFichier = parametre
frame = FenPrincipale.wxPyVot(None, "PyVot", NomFichier)
frame.Show()
if self.fc.IsRunning():
self.Raise()
# wx.CallAfter(frame.ShowTip)
#---------------------------------------------------------------------------
class PyVotApp(wx.App):
def OnInit(self):
"""
Create and show the splash screen. It will then create and show
the main frame when it is time to do so.
"""
self.version = VERSION
# try:
self.auteur = unicode(getpass.getuser(),'cp1252')
# except:
# self.auteur = ""
wx.SystemOptions.SetOptionInt("mac.window-plain-transition", 1)
self.SetAppName("PyVot")
# For debugging
#self.SetAssertMode(wx.PYAPP_ASSERT_DIALOG)
# Normally when using a SplashScreen you would create it, show
# it and then continue on with the applicaiton's
# initialization, finally creating and showing the main
# application window(s). In this case we have nothing else to
# do so we'll delay showing the main frame until later (see
# ShowMain above) so the users can see the SplashScreen effect.
splash = MySplashScreen()
splash.Show()
return True
#---------------------------------------------------------------------------
def main():
## try:
# demoPath = os.path.dirname(__file__)
# os.chdir(demoPath)
# print demoPath
# except:
# pass
app = PyVotApp(False)
# wx.Log.SetActiveTarget( LogPrintStackStderr() )
app.MainLoop()
# def PyVotRunning():
# #
# # Cette fonction teste si PyVot.exe est déjà lancé, auquel cas on arrete tout.
# #
# if not HAVE_WMI:
# return False
# else:
# nb_instances=0
# try:
# controler=wmi.WMI()
# for elem in controler.Win32_Process():
# if "PyVot.exe"==elem.Caption:
# nb_instances=nb_instances+1
# if nb_instances>=2:
# sys.exit(0)
# except:
# pass
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# from customLogTarget import *
if __name__ == '__main__':
# __name__ = 'Main'
#
# On teste si PyVot est déjà lancé
#
# PyVotRunning()
#
# Amélioration de la vitesse de traitement en utilisant psyco
#
# if USE_PSYCO:
# try:
# import psyco
# HAVE_PSYCO=True
# except ImportError:
# HAVE_PSYCO=False
# if HAVE_PSYCO:
# print "Psyco !!!!!"
# psyco.full()
main()
#----------------------------------------------------------------------------
| gpl-3.0 | -7,407,038,307,259,723,000 | 30.802139 | 99 | 0.478056 | false |
holocronweaver/wanikani2anki | app/widgets.py | 1 | 2595 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from kivy.base import EventLoop
from kivy.graphics import Color, Rectangle
from kivy.uix.button import Button
from kivy.uix.behaviors.togglebutton import ToggleButtonBehavior
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
class WKToggleButton(ToggleButton):
def on_state(self, widget, value):
"""Change solid color based on button state.
Unfortunately not implemented in default Kivy."""
if value == 'down':
self.background_color = self.background_color_down
self.color = self.color_down
else:
self.background_color = self.background_color_normal
self.color = self.color_normal
class ErrorLabel(Label):
"""Label widget which only shows itself when an error label is set."""
_error = False
@property
def error(self):
return self._error
@error.setter
def error(self, value):
self._error = value
if self._error:
self.text = self._error
with self.canvas.before:
# Border.
Color(rgba=self.border_color)
Rectangle(pos=self.pos, size=self.size)
# Background.
Color(rgba=self.background_color)
Rectangle(
pos=[int(self.pos[i] + self.border_margin)
for i in range(2)],
size=[self.size[i] - 2 * self.border_margin
for i in range(2)])
else:
self.text = ''
self.canvas.before.clear()
class TextInputPlus(TextInput):
"""Supports right-click context menus and max characters."""
use_bubble = True
max_char = None
def on_text(self, instance, value):
if self.max_char and len(value) > self.max_char:
self.text = value[:self.max_char]
def on_touch_down(self, touch):
super().on_touch_down(touch)
if touch.button == 'right':
pos = touch.pos
if self.collide_point(pos[0], pos[1]):
self._show_cut_copy_paste(
pos, EventLoop.window, mode='paste')
def paste(self):
super().paste()
if not self.multiline:
# Remove extraneous newlines.
self.text = self.text.rstrip()
| mpl-2.0 | 5,325,115,064,332,014,000 | 33.144737 | 74 | 0.597303 | false |
EricssonResearch/calvin-base | calvin/actorstore/systemactors/net/UDPSender.py | 1 | 3461 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinlib, calvinsys
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class UDPSender(Actor):
"""
Send all incoming tokens to given address/port over UDP
Control port takes control commands of the form (uri only applicable for connect.)
{
"command" : "connect"/"disconnect",
"uri": "udp://<address>:<port>"
}
Input:
data_in : Each received token will be sent to address set via control port
control_in : Control port
"""
@manage(['address', 'port'])
def init(self):
self.address = None
self.port = None
self.sender = None
self.setup()
def connect(self):
if self.sender:
calvinsys.close(self.sender)
self.sender = calvinsys.open(self, "network.socketclient", address=self.address, port=self.port, connection_type="UDP")
def will_migrate(self):
if self.sender:
calvinsys.close(self.sender)
def did_migrate(self):
self.setup()
if self.address is not None:
self.connect()
def setup(self):
self.regexp = calvinlib.use('regexp')
@stateguard(lambda self: self.sender and calvinsys.can_write(self.sender))
@condition(action_input=['data_in'])
def send(self, token):
calvinsys.write(self.sender, token)
# URI parsing - 0: protocol, 1: host, 2: port
URI_REGEXP = r'([^:]+)://([^/:]*):([0-9]+)'
def parse_uri(self, uri):
status = False
try:
parsed_uri = self.regexp.findall(self.URI_REGEXP, uri)[0]
protocol = parsed_uri[0]
if protocol != 'udp':
_log.warn("Protocol '%s' not supported, assuming udp" % (protocol,))
self.address = parsed_uri[1]
self.port = int(parsed_uri[2])
status = True
except:
_log.warn("malformed or erroneous control uri '%s'" % (uri,))
self.address = None
self.port = None
return status
@condition(action_input=['control_in'])
def control(self, control):
cmd = control.get('command', '')
if cmd == 'connect' and self.sender is None:
self._new_connection(control)
elif cmd == 'disconnect' and self.sender is not None:
self._close_connection()
def _new_connection(self, control):
if self.parse_uri(control.get('uri', '')):
self.connect()
def _close_connection(self):
calvinsys.close(self.sender)
self.sender = None
action_priority = (control, send)
requires = ['network.socketclient', 'regexp']
test_set = [
{
'input': {'data_in': [],
'control_in': []}
}
]
| apache-2.0 | 5,605,926,367,011,575,000 | 29.359649 | 127 | 0.59896 | false |
juliangarcia/pyevodyn | PyEvoDyn/pyevodyn/tests/weak_selection_theory_test.py | 1 | 1830 | '''
Created on Sep 28, 2012
@author: garcia
'''
import unittest
from pyevodyn import symbolic, numerical
from sympy.matrices import Matrix
import numpy as np
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_stationary_distribution(self):
for _ in range(0, 10):
game_matrix_numeric = np.array([[np.random.randint(0, 10), np.random.randint(0, 10)],
[np.random.randint(0, 10), np.random.randint(0, 10)]])
game_matrix_symbolic = Matrix(game_matrix_numeric.tolist())
intensity_of_selection = 0.001
mutation_probability = 0.001
population_size = 100
symbolic_result = symbolic.symbolic_matrix_to_array(
symbolic.stationary_distribution_weak_selection(game_matrix_symbolic, intensity_of_selection,
population_size, mutation_probability).T)
numerical_result = np.array([numerical.stationary_distribution_weak_selection(game_matrix_numeric,
population_size,
intensity_of_selection,
mutation_probability)])
# print symbolic_result
# print numerical_result
np.testing.assert_allclose(symbolic_result, numerical_result, rtol=0.05,
err_msg="Numerical does not match symbolic", verbose=False)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.test_stationary_distribution']
unittest.main()
| bsd-2-clause | 304,276,415,320,919,300 | 42.571429 | 113 | 0.515301 | false |
BrunoCaimar/ArcREST | src/arcresthelper/publishingtools.py | 1 | 133645 |
from __future__ import print_function
from __future__ import absolute_import
from .securityhandlerhelper import securityhandlerhelper
import re as re
dateTimeFormat = '%Y-%m-%d %H:%M'
import arcrest
from . import featureservicetools as featureservicetools
from arcrest.hostedservice import AdminFeatureService
import datetime, time
import json
import os
import arcresthelper.common as common
import gc
import sys
from .packages.six.moves import urllib_parse as urlparse
try:
import pyparsing
pyparsingInstall = True
from arcresthelper import select_parser
except:
pyparsingInstall = False
import inspect
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
#----------------------------------------------------------------------
def trace():
"""Determines information about where an error was thrown.
Returns:
tuple: line number, filename, error message
Examples:
>>> try:
... 1/0
... except:
... print("Error on '{}'\\nin file '{}'\\nwith error '{}'".format(*trace()))
...
Error on 'line 1234'
in file 'C:\\foo\\baz.py'
with error 'ZeroDivisionError: integer division or modulo by zero'
"""
import traceback, inspect, sys
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
class publishingtools(securityhandlerhelper):
#----------------------------------------------------------------------
def getItemID(self, userContent, title=None, name=None, itemType=None):
"""Gets the ID of an item by a combination of title, name, and type.
Args:
userContent (list): A list of user content.
title (str): The title of the item. Defaults to ``None``.
name (str): The name of the item. Defaults to ``None``.
itemType (str): The type of the item. Defaults to ``None``.
Returns:
str: The item's ID. If the item does not exist, ``None``.
Raises:
AttributeError: If both ``title`` and ``name`` are not specified (``None``).
See Also:
:py:func:`getItem`
"""
itemID = None
if name is None and title is None:
raise AttributeError('Name or Title needs to be specified')
for item in userContent:
if title is None and name is not None:
if item.name == name and (itemType is None or item.type == itemType):
return item.id
elif title is not None and name is None:
if item.title == title and (itemType is None or item.type == itemType):
return item.id
else:
if item.name == name and item.title == title and (itemType is None or item.type == itemType):
return item.id
return None
#----------------------------------------------------------------------
def getItem(self, userContent, title=None, name=None, itemType=None):
"""Gets an item by a combination of title, name, and type.
Args:
userContent (list): A list of user content.
title (str): The title of the item. Defaults to ``None``.
name (str): The name of the item. Defaults to ``None``.
itemType (str): The type of the item. Defaults to ``None``.
Returns:
str: The item's ID. If the item does not exist, ``None``.
Raises:
AttributeError: If both ``title`` and ``name`` are not specified (``None``).
See Also:
:py:func:`getItemID`
"""
itemID = None
if name is None and title is None:
raise AttributeError('Name or Title needs to be specified')
for item in userContent:
if title is None and name is not None:
if item.name == name and (itemType is None or item.type == itemType):
return item
elif title is not None and name is None:
if item.title == title and (itemType is None or item.type == itemType):
return item
else:
if item.name == name and item.title == title and (itemType is None or item.type == itemType):
return item
return None
#----------------------------------------------------------------------
def folderExist(self, name, folders):
"""Determines if a folder exists, case insensitively.
Args:
name (str): The name of the folder to check.
folders (list): A list of folder dicts to check against. The dicts must contain
the key:value pair ``title``.
Returns:
bool: ``True`` if the folder exists in the list, ``False`` otherwise.
"""
if name is not None and name != '':
folderID = None
for folder in folders:
if folder['title'].lower() == name.lower():
return True
del folders
return folderID
else:
return False
#----------------------------------------------------------------------
def publishItems(self, items_info):
"""Publishes a list of items.
Args:
items_info (list): A list of JSON configuration items to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.User.addItem`.
"""
if self.securityhandler is None:
print ("Security handler required")
return
itemInfo = None
item_results = None
item_info = None
admin = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
item_results = []
for item_info in items_info:
if 'ReplaceTag' in item_info:
itemInfo = {"ReplaceTag":item_info['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{FeatureService}" }
itemInfo['ItemInfo'] = self._publishItems(config=item_info)
if itemInfo['ItemInfo'] is not None and 'name' in itemInfo['ItemInfo']:
print ("%s created" % itemInfo['ItemInfo']['name'])
item_results.append(itemInfo)
else:
print (str(itemInfo['ItemInfo']))
return item_results
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishItems",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
itemInfo = None
item_results = None
item_info = None
admin = None
del itemInfo
del item_results
del item_info
del admin
gc.collect()
#----------------------------------------------------------------------
def _publishItems(self, config):
name = None
tags = None
description = None
extent = None
admin = None
adminusercontent = None
itemData = None
itemId = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
itemParams = None
content = None
userInfo = None
userCommunity = None
results = None
folderName = None
folderId = None
res = None
sea = None
group_ids = None
shareResults = None
updateParams = None
url = None
resultItem = {}
try:
name = ''
tags = ''
description = ''
extent = ''
webmap_data = ''
if 'Data' in config:
itemData = config['Data']
if 'Url' in config:
url = config['Url']
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
skipIfExist = False
if 'SkipIfExist' in config:
skipIfExist = config['SkipIfExist']
if str(skipIfExist).lower() == 'true':
skipIfExist = True
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = itemType
itemParams.overwrite = True
itemParams.snippet = snippet
itemParams.description = description
itemParams.extent = extent
itemParams.tags = tags
itemParams.typeKeywords = ",".join(typeKeywords)
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name, itemType=itemType,searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
if skipIfExist == True:
resultItem['itemId'] = item.id
resultItem['url'] = item.item._curl + "/data"
resultItem['folderId'] = folderId
resultItem['name'] = name
return resultItem
results = item.updateItem(itemParameters=itemParams,
data=itemData,serviceUrl=url)
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=url,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
filePath=itemData)
#updateParams = arcrest.manageorg.ItemParameter()
#updateParams.title = name
#updateResults = item.updateItem(itemParameters=updateParams)
except Exception as e:
print (e)
if item is None:
return "Item could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
resultItem['itemId'] = item.id
resultItem['url'] = item.item._curl + "/data"
resultItem['folderId'] = folderId
resultItem['name'] = name
return resultItem
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishItems",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
name = None
tags = None
description = None
extent = None
admin = None
adminusercontent = None
itemData = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
itemId = None
thumbnail = None
itemType = None
itemParams = None
content = None
userInfo = None
userCommunity = None
results = None
folderName = None
folderId = None
res = None
sea = None
group_ids = None
shareResults = None
updateParams = None
del name
del tags
del description
del extent
del admin
del adminusercontent
del itemData
del datestring
del snippet
del everyone
del org
del groupNames
del itemId
del thumbnail
del itemType
del itemParams
del content
del userInfo
del userCommunity
del results
del folderName
del folderId
del res
del sea
del group_ids
del shareResults
del updateParams
gc.collect()
#----------------------------------------------------------------------
def publishMap(self, maps_info, fsInfo=None, itInfo=None):
"""Publishes a list of maps.
Args:
maps_info (list): A list of JSON configuration maps to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`.
"""
if self.securityhandler is None:
print ("Security handler required")
return
itemInfo = None
itemId = None
map_results = None
replaceInfo = None
replaceItem = None
map_info = None
admin = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
map_results = []
for map_info in maps_info:
itemInfo = {}
if 'ReplaceInfo' in map_info:
replaceInfo = map_info['ReplaceInfo']
else:
replaceInfo = None
if replaceInfo != None:
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Layer':
if fsInfo is not None:
for fs in fsInfo:
if fs is not None and replaceItem['ReplaceString'] == fs['ReplaceTag']:
replaceItem['ReplaceString'] = fs['FSInfo']['url']
replaceItem['ItemID'] = fs['FSInfo']['itemId']
replaceItem['ItemFolder'] = fs['FSInfo']['folderId']
if 'convertCase' in fs['FSInfo']:
replaceItem['convertCase'] = fs['FSInfo']['convertCase']
elif 'ItemID' in replaceItem:
if 'ItemFolder' in replaceItem == False:
itemId = replaceItem['ItemID']
itemInfo = admin.content.getItem(itemId=itemId)
if itemInfo.owner:
if itemInfo.owner == self._securityHandler.username and itemInfo.ownerFolder:
replaceItem['ItemFolder'] = itemInfo.ownerFolder
else:
replaceItem['ItemFolder'] = None
elif replaceItem['ReplaceType'] == 'Global':
if itInfo is not None:
for itm in itInfo:
if itm is not None:
if replaceItem['ReplaceString'] == itm['ReplaceTag']:
if 'ItemInfo' in itm:
if 'url' in itm['ItemInfo']:
replaceItem['ReplaceString'] = itm['ItemInfo']['url']
if 'ReplaceTag' in map_info:
itemInfo = {"ReplaceTag":map_info['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{WebMap}" }
itemInfo['MapInfo'] = self._publishMap(config=map_info,
replaceInfo=replaceInfo)
map_results.append(itemInfo)
print ("%s webmap created" % itemInfo['MapInfo']['Name'])
return map_results
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishMap",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
itemInfo = None
itemId = None
replaceInfo = None
replaceItem = None
map_info = None
admin = None
del itemInfo
del itemId
del replaceInfo
del replaceItem
del map_info
del admin
gc.collect()
#----------------------------------------------------------------------
def _publishMap(self, config, replaceInfo=None, operationalLayers=None, tableLayers=None):
name = None
tags = None
description = None
extent = None
webmap_data = None
itemJson = None
update_service = None
admin = None
adminusercontent = None
resultMap = None
json_data = None
replaceItem = None
opLayers = None
opLayer = None
layers = None
item = None
response = None
layerIdx = None
updatedLayer = None
updated = None
text = None
itemParams = None
updateResults = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
try:
name = ''
tags = ''
description = ''
extent = ''
webmap_data = None
mapJson = config['ItemJSON']
if isinstance(mapJson,list):
webmap_data = []
for jsonItem in mapJson:
#if os.path.exists(jsonItem) == False:
#return {"Results":{"error": "%s does not exist" % jsonItem}}
#if webmap_data is None:
#try:
#with open(jsonItem) as webMapInfo:
#webmap_data = json.load(webMapInfo)
#except:
#raise ValueError("%s is not a valid JSON File" % jsonItem)
#else:
try:
with open(jsonItem) as webMapInfo:
webmap_data.append(json.load(webMapInfo))
except:
raise ValueError("%s is not a valid JSON File" % jsonItem)
webmap_data = common.merge_dicts(webmap_data)
else:
if os.path.exists(mapJson) == False:
return {"Results":{"error": "%s does not exist" % mapJson}}
try:
with open(mapJson) as webMapInfo:
webmap_data = json.load(webMapInfo)
except:
raise ValueError("%s is not a valid JSON File" % mapJson)
update_service = 'FALSE'
resultMap = {'Layers':[],'Tables':[],'Results':{}}
if webmap_data is not None:
layersInfo= {}
if operationalLayers:
webmap_data['operationalLayers'] = operationalLayers
if tableLayers:
webmap_data['tables'] = tableLayers
if replaceInfo:
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Global':
webmap_data = common.find_replace(webmap_data,replaceItem['SearchString'],replaceItem['ReplaceString'])
elif replaceItem['ReplaceType'] == 'Layer':
if 'tables' in webmap_data:
opLayers = webmap_data['tables']
for opLayer in opLayers:
layerInfo= {}
if replaceItem['SearchString'] in opLayer['url']:
opLayer['url'] = opLayer['url'].replace(replaceItem['SearchString'],replaceItem['ReplaceString'])
if 'ItemID' in replaceItem:
opLayer['itemId'] = replaceItem['ItemID']
else:
opLayer['itemId'] = None
#opLayer['itemId'] = get_guid()
if 'convertCase' in replaceItem:
if replaceItem['convertCase'] == 'lower':
layerInfo = {}
layerInfo['convertCase'] = replaceItem['convertCase']
layerInfo['fields'] = []
if 'layerDefinition' in opLayer:
if 'drawingInfo' in opLayer["layerDefinition"]:
if 'renderer' in opLayer["layerDefinition"]['drawingInfo']:
if 'field1' in opLayer["layerDefinition"]['drawingInfo']['renderer']:
opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'] = opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'].lower()
if 'labelingInfo' in opLayer["layerDefinition"]['drawingInfo']:
lblInfos = opLayer["layerDefinition"]['drawingInfo']['labelingInfo']
if len(lblInfos) > 0:
for lblInfo in lblInfos:
if 'labelExpression' in lblInfo:
result = re.findall(r"\[.*\]", lblInfo['labelExpression'])
if len(result)>0:
for res in result:
lblInfo['labelExpression'] = str(lblInfo['labelExpression']).replace(res,str(res).lower())
if 'labelExpressionInfo' in lblInfo:
if 'value' in lblInfo['labelExpressionInfo']:
result = re.findall(r"{.*}", lblInfo['labelExpressionInfo']['value'])
if len(result)>0:
for res in result:
lblInfo['labelExpressionInfo']['value'] = str(lblInfo['labelExpressionInfo']['value']).replace(res,str(res).lower())
if 'popupInfo' in opLayer:
if 'mediaInfos' in opLayer['popupInfo'] and not opLayer['popupInfo']['mediaInfos'] is None:
for chart in opLayer['popupInfo']['mediaInfos']:
if 'value' in chart:
if 'normalizeField' in chart and not chart['normalizeField'] is None:
chart['normalizeField'] = chart['normalizeField'].lower()
if 'fields' in chart['value']:
for i in range(len(chart['value']['fields'])):
chart['value']['fields'][i] = str(chart['value']['fields'][i]).lower()
if 'fieldInfos' in opLayer['popupInfo']:
for field in opLayer['popupInfo']['fieldInfos']:
newFld = str(field['fieldName']).lower()
if 'description' in opLayer['popupInfo']:
opLayer['popupInfo']['description'] = common.find_replace(obj = opLayer['popupInfo']['description'],
find = "{" + field['fieldName'] + "}",
replace = "{" + newFld + "}")
layerInfo['fields'].append({"PublishName":field['fieldName'],
'ConvertName':newFld})
field['fieldName'] = newFld
layersInfo[opLayer['id']] = layerInfo
opLayers = webmap_data['operationalLayers']
for opLayer in opLayers:
layerInfo= {}
if replaceItem['SearchString'] in opLayer['url']:
opLayer['url'] = opLayer['url'].replace(replaceItem['SearchString'],replaceItem['ReplaceString'])
if 'ItemID' in replaceItem:
opLayer['itemId'] = replaceItem['ItemID']
else:
opLayer['itemId'] = None
#opLayer['itemId'] = get_guid()
if 'convertCase' in replaceItem:
if replaceItem['convertCase'] == 'lower':
layerInfo = {}
layerInfo['convertCase'] = replaceItem['convertCase']
layerInfo['fields'] = []
if 'layerDefinition' in opLayer:
if 'drawingInfo' in opLayer["layerDefinition"]:
if 'renderer' in opLayer["layerDefinition"]['drawingInfo']:
if 'field1' in opLayer["layerDefinition"]['drawingInfo']['renderer']:
opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'] = opLayer["layerDefinition"]['drawingInfo']['renderer']['field1'].lower()
if 'labelingInfo' in opLayer["layerDefinition"]['drawingInfo']:
lblInfos = opLayer["layerDefinition"]['drawingInfo']['labelingInfo']
if len(lblInfos) > 0:
for lblInfo in lblInfos:
if 'labelExpression' in lblInfo:
result = re.findall(r"\[.*\]", lblInfo['labelExpression'])
if len(result)>0:
for res in result:
lblInfo['labelExpression'] = str(lblInfo['labelExpression']).replace(res,str(res).lower())
if 'labelExpressionInfo' in lblInfo:
if 'value' in lblInfo['labelExpressionInfo']:
result = re.findall(r"{.*}", lblInfo['labelExpressionInfo']['value'])
if len(result)>0:
for res in result:
lblInfo['labelExpressionInfo']['value'] = str(lblInfo['labelExpressionInfo']['value']).replace(res,str(res).lower())
if 'popupInfo' in opLayer:
if 'mediaInfos' in opLayer['popupInfo'] and not opLayer['popupInfo']['mediaInfos'] is None:
for k in range(len(opLayer['popupInfo']['mediaInfos'])):
chart = opLayer['popupInfo']['mediaInfos'][k]
if 'value' in chart:
if 'normalizeField' in chart and not chart['normalizeField'] is None:
chart['normalizeField'] = chart['normalizeField'].lower()
if 'fields' in chart['value']:
for i in range(len(chart['value']['fields'])):
chart['value']['fields'][i] = str(chart['value']['fields'][i]).lower()
opLayer['popupInfo']['mediaInfos'][k] = chart
if 'fieldInfos' in opLayer['popupInfo']:
for field in opLayer['popupInfo']['fieldInfos']:
newFld = str(field['fieldName']).lower()
if 'description' in opLayer['popupInfo']:
opLayer['popupInfo']['description'] = common.find_replace(obj = opLayer['popupInfo']['description'],
find = "{" + field['fieldName'] + "}",
replace = "{" + newFld + "}")
layerInfo['fields'].append({"PublishName":field['fieldName'],
'ConvertName':newFld})
field['fieldName'] = newFld
layersInfo[opLayer['id']] = layerInfo
opLayers = webmap_data['operationalLayers']
resultMap['Layers'] = {}
for opLayer in opLayers:
currentID = opLayer['id']
#if 'url' in opLayer:
#opLayer['id'] = common.getLayerName(url=opLayer['url']) + "_" + str(common.random_int_generator(maxrange = 9999))
if 'applicationProperties' in webmap_data:
if 'editing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['editing'] is None:
if 'locationTracking' in webmap_data['applicationProperties']['editing'] and \
not webmap_data['applicationProperties']['editing']['locationTracking'] is None:
if 'info' in webmap_data['applicationProperties']['editing']['locationTracking'] and \
not webmap_data['applicationProperties']['editing']['locationTracking']['info'] is None:
if 'layerId' in webmap_data['applicationProperties']['editing']['locationTracking']['info']:
if webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] == currentID:
webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] = opLayer['id']
if 'viewing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['viewing'] is None:
if 'search' in webmap_data['applicationProperties']['viewing'] and \
not webmap_data['applicationProperties']['viewing']['search'] is None:
if 'layers' in webmap_data['applicationProperties']['viewing']['search'] and \
not webmap_data['applicationProperties']['viewing']['search']['layers'] is None:
for k in range(len(webmap_data['applicationProperties']['viewing']['search']['layers'])):
searchlayer = webmap_data['applicationProperties']['viewing']['search']['layers'][k]
if searchlayer['id'] == currentID:
searchlayer['id'] = opLayer['id']
if 'fields' in searchlayer and \
not searchlayer['fields'] is None:
for i in range(len(searchlayer['fields'])):
searchlayer['fields'][i]['Name'] = str(searchlayer['fields'][i]['Name']).lower()
if 'field' in searchlayer and \
not searchlayer['field'] is None:
searchlayer['field']['name'] = searchlayer['field']['name'].lower()
webmap_data['applicationProperties']['viewing']['search']['layers'][k] = searchlayer
if 'applicationProperties' in webmap_data:
webmap_data['applicationProperties'] = common.find_replace(webmap_data['applicationProperties'], currentID, opLayer['id'])
resultLayer = {"Name":opLayer['title'],
"ID":opLayer['id']
}
if currentID in layersInfo:
resultLayer['FieldInfo'] = layersInfo[currentID]
resultMap['Layers'][currentID] = resultLayer
if 'tables' in webmap_data:
opLayers = webmap_data['tables']
for opLayer in opLayers:
currentID = opLayer['id']
#opLayer['id'] = common.getLayerName(url=opLayer['url']) + "_" + str(common.random_int_generator(maxrange = 9999))
if 'applicationProperties' in webmap_data:
if 'editing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['editing'] is None:
if 'locationTracking' in webmap_data['applicationProperties']['editing'] and \
not webmap_data['applicationProperties']['editing']['locationTracking'] is None:
if 'info' in webmap_data['applicationProperties']['editing']['locationTracking'] and \
not webmap_data['applicationProperties']['editing']['locationTracking']['info'] is None:
if 'layerId' in webmap_data['applicationProperties']['editing']['locationTracking']['info']:
if webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] == currentID:
webmap_data['applicationProperties']['editing']['locationTracking']['info']['layerId'] = opLayer['id']
if 'viewing' in webmap_data['applicationProperties'] and \
not webmap_data['applicationProperties']['viewing'] is None:
if 'search' in webmap_data['applicationProperties']['viewing'] and \
not webmap_data['applicationProperties']['viewing']['search'] is None:
if 'layers' in webmap_data['applicationProperties']['viewing']['search'] and \
not webmap_data['applicationProperties']['viewing']['search']['layers'] is None:
for k in range(len(webmap_data['applicationProperties']['viewing']['search']['layers'])):
searchlayer = webmap_data['applicationProperties']['viewing']['search']['layers'][k]
if searchlayer['id'] == currentID:
searchlayer['id'] = opLayer['id']
if 'fields' in searchlayer and \
not searchlayer['fields'] is None:
for i in range(len(searchlayer['fields'])):
searchlayer['fields'][i]['Name'] = str(searchlayer['fields'][i]['Name']).lower()
if 'field' in searchlayer and \
not searchlayer['field'] is None:
searchlayer['field']['name'] = searchlayer['field']['name'].lower()
webmap_data['applicationProperties']['viewing']['search']['layers'][k] = searchlayer
if 'applicationProperties' in webmap_data:
webmap_data['applicationProperties'] = common.find_replace(webmap_data['applicationProperties'], currentID, opLayer['id'])
resultMap['Tables'].append({"Name":opLayer['title'],"ID":opLayer['id']})
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
extent = config['Extent']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
if webmap_data is None:
return None
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = "Web Map"
itemParams.overwrite = True
itemParams.snippet = snippet
itemParams.description = description
itemParams.extent = extent
itemParams.tags = tags
itemParams.typeKeywords = ",".join(typeKeywords)
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name, itemType=itemType,searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
results = item.updateItem(itemParameters=itemParams,
text=json.dumps(webmap_data))
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=None,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
text=json.dumps(webmap_data))
except Exception as e:
print (e)
if item is None:
return "Item could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = name
updateResults = item.updateItem(itemParameters=updateParams)
resultMap['Results']['itemId'] = item.id
resultMap['folderId'] = folderId
resultMap['Name'] = name
return resultMap
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishMap",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
name = None
tags = None
description = None
extent = None
webmap_data = None
itemJson = None
update_service = None
admin = None
adminusercontent = None
resultMap = None
json_data = None
replaceItem = None
opLayers = None
opLayer = None
layers = None
item = None
response = None
layerIdx = None
updatedLayer = None
updated = None
text = None
itemParams = None
updateResults = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
del name
del tags
del description
del extent
del webmap_data
del itemJson
del update_service
del admin
del adminusercontent
del resultMap
del json_data
del replaceItem
del opLayers
del opLayer
del layers
del item
del response
del layerIdx
del updatedLayer
del updated
del text
del itemParams
del updateResults
del loc_df
del datestring
del snippet
del everyone
del org
del groupNames
del folderName
del thumbnail
del itemType
del typeKeywords
del userCommunity
del userContent
del folderId
del res
del folderContent
del itemId
del group_ids
del shareResults
del updateParams
gc.collect()
#----------------------------------------------------------------------
def publishCombinedWebMap(self, maps_info, webmaps):
"""Publishes a combination of web maps.
Args:
maps_info (list): A list of JSON configuration combined web maps to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`.
"""
if self.securityhandler is None:
print ("Security handler required")
return
admin = None
map_results = None
map_info = None
operationalLayers = None
tableLayers = None
item = None
response = None
opLays = None
operationalLayers = None
tblLays = None
tblLayer = None
itemInfo = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
map_results = []
for map_info in maps_info:
operationalLayers = []
tableLayers = []
for webmap in webmaps:
item = admin.content.getItem(itemId=webmap)
response = item.itemData()
if 'operationalLayers' in response:
opLays = []
for opLayer in response['operationalLayers']:
opLays.append(opLayer)
opLays.extend(operationalLayers)
operationalLayers = opLays
if 'tables' in response:
tblLays = []
for tblLayer in response['tables']:
tblLays.append(tblLayer)
tblLays.extend(tableLayers)
tableLayers = tblLays
if 'ReplaceTag' in map_info:
itemInfo = {"ReplaceTag":map_info['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{WebMap}" }
itemInfo['MapInfo'] = self._publishMap(config=map_info,
replaceInfo=None,
operationalLayers=operationalLayers,
tableLayers=tableLayers)
map_results.append(itemInfo)
if not itemInfo is None:
if not 'error' in itemInfo['MapInfo']['Results']:
print ("%s webmap created" % itemInfo['MapInfo']['Name'])
else:
print (str(itemInfo['MapInfo']['Results']))
else:
print ("Map not created")
return map_results
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishedCombinedWebMap",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
admin = None
map_info = None
tableLayers = None
item = None
response = None
opLays = None
operationalLayers = None
tblLays = None
tblLayer = None
itemInfo = None
del admin
del map_info
del tableLayers
del item
del response
del opLays
del operationalLayers
del tblLays
del tblLayer
del itemInfo
gc.collect()
#----------------------------------------------------------------------
def publishFsFromMXD(self, fs_config):
"""Publishes the layers in a MXD to a feauture service.
Args:
fs_config (list): A list of JSON configuration feature service details to publish.
Returns:
dict: A dictionary of results objects.
"""
fs = None
res = None
resItm = None
if self.securityhandler is None:
print ("Security handler required")
return
if self.securityhandler.is_portal:
url = self.securityhandler.org_url
else:
url = 'http://www.arcgis.com'
try:
res = []
if isinstance(fs_config, list):
for fs in fs_config:
if 'ReplaceTag' in fs:
resItm = {"ReplaceTag":fs['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
resItm['FSInfo'] = self._publishFSFromMXD(config=fs, url=url)
if not resItm['FSInfo'] is None and 'url' in resItm['FSInfo']:
print ("%s created" % resItm['FSInfo']['url'])
res.append(resItm)
else:
print (str(resItm['FSInfo']))
else:
if 'ReplaceTag' in fs_config:
resItm = {"ReplaceTag":fs_config['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
resItm['FSInfo'] = self._publishFSFromMXD(config=fs_config, url=url)
if 'url' in resItm['FSInfo']:
print ("%s created" % resItm['FSInfo']['url'])
res.append(resItm)
else:
print (str(resItm['FSInfo']))
return res
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishFsFromMXD",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
resItm = None
fs = None
del resItm
del fs
gc.collect()
#----------------------------------------------------------------------
def publishFeatureCollections(self, configs):
"""Publishes feature collections to a feature service.
Args:
configs (list): A list of JSON configuration feature service details to publish.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
config = None
res = None
resItm = None
try:
res = []
if isinstance(configs, list):
for config in configs:
if 'ReplaceTag' in config:
resItm = {"ReplaceTag":config['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
if 'Zip' in config:
resItm['FCInfo'] = self._publishFeatureCollection(config=config)
if not resItm['FCInfo'] is None and 'id' in resItm['FCInfo']:
print ("%s feature collection created" % resItm['FCInfo']['id'])
res.append(resItm)
else:
print (str(resItm['FCInfo']))
return res
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishFeatureCollections",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
resItm = None
config = None
del resItm
del config
gc.collect()
#----------------------------------------------------------------------
def _publishFSFromMXD(self, config, url='http://www.arcgis.com'):
mxd = None
q = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
capabilities = None
maxRecordCount = None
loc_df = None
datestring = None
service_name = None
service_name_safe = None
sd_Info = None
admin = None
itemParams = None
adminusercontent = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
resultSD = None
publishParameters = None
resultFS = None
delres = None
status = None
group_ids = None
shareResults = None
updateParams = None
enableEditTracking = None
adminFS = None
json_dict = None
enableResults = None
layer = None
layers = None
layUpdateResult = None
definition = None
try:
# Report settings
dataFile = None
if 'Mxd' in config:
dataFile = config['Mxd']
elif 'Zip' in config:
dataFile = config['Zip']
# Service settings
service_name = config['Title']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
if 'EnableEditTracking' in config:
print ("enableEditTracking parameter has been deprecated, please add a definition section to the config")
enableEditTracking = config['EnableEditTracking']
else:
#print ("Please add an EnableEditTracking parameter to your feature service section")
enableEditTracking = False
folderName = config['Folder']
thumbnail = config['Thumbnail']
if 'Capabilities' in config:
print ("Capabilities parameter has been deprecated, please add a definition section to the config")
capabilities = config['Capabilities']
if 'Definition' in config:
definition = config['Definition']
if 'capabilities' in definition:
capabilities = definition['capabilities']
if 'maxRecordCount' in config:
maxRecordCount = config["maxRecordCount"]
else:
maxRecordCount = '1000' # If not cast as a string, the MXDtoFeatureServiceDef method called below returns an error stating 'cannot serialize 1000 (type int)'
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
skipIfExist = False
if 'SkipIfExist' in config:
skipIfExist = config['SkipIfExist']
if str(skipIfExist).lower() == 'true':
skipIfExist = True
datestring = datetime.datetime.now().strftime(loc_df)
service_name = service_name.replace('{DATE}',datestring)
service_name = service_name.replace('{Date}',datestring)
service_name_safe = service_name.replace(' ','_')
service_name_safe = service_name_safe.replace(':','_')
service_name_safe = service_name_safe.replace('-','_')
if os.path.exists(path=dataFile) == False:
raise ValueError("data file does not exit")
extension = os.path.splitext(dataFile)[1]
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
hostingServers = admin.hostingServers()
if len(hostingServers) == 0:
return "No hosting servers can be found, if this is portal, update the settings to include a hosting server."
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
if skipIfExist == True:
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=service_name, itemType='Feature Service',searchorg=False)
if 'total' in items:
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Service':
if 'name' in res and res['name'] == service_name:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name:
itemId = res['id']
break
if itemId is not None:
defItem = content.getItem(itemId)
results = {
"url": defItem.url,
"folderId": folderId,
"itemId": defItem.id,
"convertCase": self._featureServiceFieldCase,
"messages":"Exist"
}
return results
else:
print ("Error searching organzation, {0}".format(items))
if (extension == ".mxd"):
dataFileType = "serviceDefinition"
searchType = "Service Definition"
sd_Info = arcrest.common.servicedef.MXDtoFeatureServiceDef(mxd_path=dataFile,
service_name=service_name_safe,
tags=None,
description=None,
folder_name=None,
capabilities=capabilities,
maxRecordCount=maxRecordCount,
server_type='MY_HOSTED_SERVICES',
url=url)
if sd_Info is not None:
publishParameters = arcrest.manageorg.PublishSDParameters(tags=sd_Info['tags'],
overwrite='true')
elif (extension == ".zip"):
dataFileType = "Shapefile"
searchType = "Shapefile"
sd_Info = {'servicedef':dataFile,'tags':config['Tags']}
description = ""
if 'Description' in config:
description = config['Description']
publishParameters = arcrest.manageorg.PublishShapefileParameter(name=service_name,
layerInfo={'capabilities':capabilities},
description=description)
if 'hasStaticData' in definition:
publishParameters.hasStaticData = definition['hasStaticData']
if sd_Info is None:
print ("Publishing SD or Zip not valid")
raise common.ArcRestHelperError({
"function": "_publishFsFromMXD",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": "Publishing SD or Zip not valid"
})
itemParams = arcrest.manageorg.ItemParameter()
#if isinstance(hostingServers[0],arcrest.manageags.administration.AGSAdministration):
#itemParams.title = service_name_safe
#else:
#itemParams.title = service_name
itemParams.title = service_name
itemParams.thumbnail = thumbnail
itemParams.type = searchType
itemParams.overwrite = True
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=service_name, itemType=searchType,searchorg=False)
defItem = None
defItemID = None
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == searchType:
if 'name' in res and res['name'] == service_name:
defItemID = res['id']
break
if 'title' in res and res['title'] == service_name:
defItemID = res['id']
break
#itemId = items['results'][0]['id']
if not defItemID is None:
defItem = content.getItem(defItemID).userItem
resultSD = defItem.updateItem(itemParameters=itemParams,
data=sd_Info['servicedef'])
if 'error' in resultSD:
return resultSD
if defItem.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=defItem.id,folder=folderId)
else:
try:
defItem = userInfo.addItem(itemParameters=itemParams,
filePath=sd_Info['servicedef'],
overwrite=True,
url=None,
text=None,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None)
except Exception as e:
print (e)
if defItem is None:
return "Item could not be added "
try:
serviceItem = userInfo.publishItem(
fileType=dataFileType,
itemId=defItem.id,
publishParameters=publishParameters,
overwrite = True,
wait=True)
except Exception as e:
print ("Error publishing item: Error Details: {0}".format(str(e)))
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title =service_name, itemType='Feature Service',searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Service':
if 'name' in res and res['name'] == service_name:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name:
itemId = res['id']
break
if not itemId is None:
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title =service_name_safe, itemType='Feature Service',searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Service':
if 'name' in res and res['name'] == service_name_safe:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name_safe:
itemId = res['id']
break
if not itemId is None:
existingItem = admin.content.getItem(itemId = itemId).userItem
if existingItem.url is not None:
adminFS = AdminFeatureService(url=existingItem.url, securityHandler=self._securityHandler)
cap = str(adminFS.capabilities)
existingDef = {}
if 'Sync' in cap:
print ("Disabling Sync")
capItems = cap.split(',')
if 'Sync' in capItems:
capItems.remove('Sync')
existingDef['capabilities'] = ','.join(capItems)
enableResults = adminFS.updateDefinition(json_dict=existingDef)
if 'error' in enableResults:
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
else:
print ("Sync Disabled")
else:
print ("Attempting to delete")
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
adminFS = None
del adminFS
else:
print ("Attempting to delete")
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
else:
print ("Item exist and cannot be found, probably owned by another user.")
raise common.ArcRestHelperError({
"function": "_publishFsFromMXD",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": "Item exist and cannot be found, probably owned by another user."
}
)
try:
serviceItem = userInfo.publishItem(
fileType=dataFileType,
itemId=defItem.id,
overwrite = True,
publishParameters=publishParameters,
wait=True)
except Exception as e:
print ("Overwrite failed, deleting")
delres = userInfo.deleteItems(items=existingItem.id)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
try:
serviceItem = userInfo.publishItem(
fileType=dataFileType,
itemId=defItem.id,
overwrite = True,
publishParameters=publishParameters,
wait=True)
except Exception as e:
return e
results = {
"url": serviceItem.url,
"folderId": folderId,
"itemId": serviceItem.id,
"convertCase": self._featureServiceFieldCase,
"messages":""
}
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=serviceItem.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = service_name
updateResults = serviceItem.updateItem(itemParameters=updateParams)
adminFS = AdminFeatureService(url=serviceItem.url, securityHandler=self._securityHandler)
if enableEditTracking == True or str(enableEditTracking).upper() == 'TRUE':
json_dict = {'editorTrackingInfo':{}}
json_dict['editorTrackingInfo']['allowOthersToDelete'] = True
json_dict['editorTrackingInfo']['allowOthersToUpdate'] = True
json_dict['editorTrackingInfo']['enableEditorTracking'] = True
json_dict['editorTrackingInfo']['enableOwnershipAccessControl'] = False
enableResults = adminFS.updateDefinition(json_dict=json_dict)
if 'error' in enableResults:
results['messages'] += enableResults
json_dict = {'editFieldsInfo':{}}
json_dict['editFieldsInfo']['creationDateField'] = ""
json_dict['editFieldsInfo']['creatorField'] = ""
json_dict['editFieldsInfo']['editDateField'] = ""
json_dict['editFieldsInfo']['editorField'] = ""
layers = adminFS.layers
tables = adminFS.tables
for layer in layers:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] += layUpdateResult['error']
if not tables is None:
for layer in tables:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] += layUpdateResult['error']
if definition is not None:
enableResults = adminFS.updateDefinition(json_dict=definition)
if enableResults is not None and 'error' in enableResults:
results['messages'] = enableResults
else:
if 'editorTrackingInfo' in definition:
if 'enableEditorTracking' in definition['editorTrackingInfo']:
if definition['editorTrackingInfo']['enableEditorTracking'] == True:
json_dict = {'editFieldsInfo':{}}
json_dict['editFieldsInfo']['creationDateField'] = ""
json_dict['editFieldsInfo']['creatorField'] = ""
json_dict['editFieldsInfo']['editDateField'] = ""
json_dict['editFieldsInfo']['editorField'] = ""
layers = adminFS.layers
tables = adminFS.tables
for layer in layers:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] = layUpdateResult['error']
if not tables is None:
for layer in tables:
if layer.canModifyLayer is None or layer.canModifyLayer == True:
if layer.editFieldsInfo is None:
layUpdateResult = layer.addToDefinition(json_dict=json_dict)
if 'error' in layUpdateResult:
layUpdateResult['error']['layerid'] = layer.id
results['messages'] = layUpdateResult['error']
return results
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishFsFromMXD",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
definition = None
mxd = None
q = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
capabilities = None
maxRecordCount = None
loc_df = None
datestring = None
service_name = None
service_name_safe = None
sd_Info = None
admin = None
itemParams = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
resultSD = None
publishParameters = None
resultFS = None
delres = None
status = None
group_ids = None
shareResults = None
updateParams = None
enableEditTracking = None
adminFS = None
json_dict = None
enableResults = None
layer = None
layers = None
layUpdateResult = None
del definition
del layer
del layers
del layUpdateResult
del mxd
del q
del everyone
del org
del groupNames
del folderName
del thumbnail
del capabilities
del maxRecordCount
del loc_df
del datestring
del service_name
del service_name_safe
del sd_Info
del admin
del itemParams
del userCommunity
del userContent
del folderId
del res
del folderContent
del itemId
del resultSD
del publishParameters
del resultFS
del delres
del status
del group_ids
del shareResults
del updateParams
del enableEditTracking
del adminFS
del json_dict
del enableResults
gc.collect()
#----------------------------------------------------------------------
def _publishAppLogic(self, appDet, map_info=None, fsInfo=None):
itemInfo = None
replaceInfo = None
replaceItem = None
mapDet = None
lay = None
itemId = None
admin = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
itemInfo = {}
if 'ReplaceInfo' in appDet:
replaceInfo = appDet['ReplaceInfo']
else:
replaceInfo = None
if replaceInfo != None:
for replaceItem in replaceInfo:
if fsInfo is not None:
for fsDet in fsInfo:
if 'ReplaceTag' in fsDet:
if 'ReplaceString' in replaceItem:
if fsDet is not None and replaceItem['ReplaceString'] == fsDet['ReplaceTag'] and \
(replaceItem['ReplaceType'] == 'Service' or replaceItem['ReplaceType'] == 'Layer'):
replaceItem['ReplaceString'] = fsDet['FSInfo']['url']
replaceItem['ItemID'] = fsDet['FSInfo']['itemId']
replaceItem['ItemFolder'] = fsDet['FSInfo']['folderId']
if 'convertCase' in fsDet['FSInfo']:
replaceItem['convertCase'] = fsDet['FSInfo']['convertCase']
replaceItem['ReplaceType'] = "Global"
if map_info is not None:
for mapDet in map_info:
if 'ReplaceTag' in mapDet:
if 'ReplaceString' in replaceItem:
if mapDet is not None and replaceItem['ReplaceString'] == mapDet['ReplaceTag'] and \
replaceItem['ReplaceType'] == 'Map':
replaceItem['ItemID'] = mapDet['MapInfo']['Results']['itemId']
replaceItem['ItemFolder'] = mapDet['MapInfo']['folderId']
replaceItem['LayerInfo'] = mapDet['MapInfo']['Layers']
elif mapDet is not None and replaceItem['ReplaceType'] == 'Layer':
repInfo = replaceItem['ReplaceString'].split("|")
if len(repInfo) == 2:
if repInfo[0] == mapDet['ReplaceTag']:
for key,value in mapDet['MapInfo']['Layers'].items():
if value["Name"] == repInfo[1]:
replaceItem['ReplaceString'] = value["ID"]
if 'ItemID' in replaceItem:
if 'ItemFolder' in replaceItem == False:
itemId = replaceItem['ItemID']
itemInfo = admin.content.getItem(itemId=itemId)
if itemInfo.owner == self._securityHandler.username and itemInfo.ownerFolder:
replaceItem['ItemFolder'] = itemInfo['ownerFolder']
else:
replaceItem['ItemFolder'] = None
if 'ReplaceTag' in appDet:
itemInfo = {"ReplaceTag":appDet['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{App}" }
if appDet['Type'] == 'Web Mapping Application':
itemInfo['AppInfo'] = self._publishApp(config=appDet,
replaceInfo=replaceInfo)
elif appDet['Type'] == 'Operation View':
itemInfo['AppInfo'] = self._publishDashboard(config=appDet,
replaceInfo=replaceInfo)
else:
itemInfo['AppInfo'] = self._publishApp(config=appDet,
replaceInfo=replaceInfo)
if not itemInfo['AppInfo'] is None:
if not 'error' in itemInfo['AppInfo']['Results'] :
print ("%s app created" % itemInfo['AppInfo']['Name'])
else:
print (str(itemInfo['AppInfo']['Results']))
else:
print ("App was not created")
return itemInfo
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishAppLogic",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
replaceInfo = None
replaceItem = None
mapDet = None
lay = None
itemId = None
admin = None
del admin
del replaceInfo
del replaceItem
del mapDet
del lay
del itemId
gc.collect()
#----------------------------------------------------------------------
def publishApp(self, app_info, map_info=None, fsInfo=None):
"""Publishes apps to AGOL/Portal
Args:
app_info (list): A list of JSON configuration apps to publish.
map_info (list): Defaults to ``None``.
fsInfo (list): Defaults to ``None``.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
appDet = None
try:
app_results = []
if isinstance(app_info, list):
for appDet in app_info:
app_results.append(self._publishAppLogic(appDet=appDet,map_info=map_info,fsInfo=fsInfo))
else:
app_results.append(self._publishAppLogic(appDet=app_info,map_info=map_info,fsInfo=fsInfo))
return app_results
except (common.ArcRestHelperError) as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishApp",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
appDet = None
del appDet
gc.collect()
#----------------------------------------------------------------------
def _publishApp(self, config, replaceInfo):
resultApp = None
name = None
tags = None
description = None
extent = None
itemJson = None
admin = None
json_data = None
itemData = None
replaceItem = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
url = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
userCommunity = None
userContent = None
res = None
folderId = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
url = None
updateResults = None
portal = None
try:
resultApp = {'Results':{}}
name = ''
tags = ''
description = ''
extent = ''
itemJson = config['ItemJSON']
if os.path.exists(itemJson) == False:
return {"Results":{"error": "%s does not exist" % itemJson} }
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
portalself = admin.portals.portalSelf
if portalself.urlKey is None or portalself.customBaseUrl is None:
parsedURL = urlparse.urlparse(url=self._securityHandler.org_url, scheme='', allow_fragments=True)
orgURL = parsedURL.netloc + parsedURL.path
else:
orgURL = portalself.urlKey + '.' + portalself.customBaseUrl
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
folderName = config['Folder']
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
if os.path.exists(itemJson):
with open(itemJson) as json_data:
try:
itemData = json.load(json_data)
except:
raise ValueError("%s is not a valid JSON File" % itemJson)
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Map' and 'ItemID' in replaceItem:
if 'values' in itemData:
if 'webmap' in itemData['values']:
if itemData['values']['webmap'] == replaceItem['SearchString']:
itemData['values']['webmap'] = replaceItem['ItemID']
if 'folderId' in itemData:
itemData['folderId'] = replaceItem['ItemFolder']
if 'map' in itemData:
if 'itemId' in itemData['map']:
if itemData['map']['itemId'] == replaceItem['SearchString']:
itemData['map']['itemId'] = replaceItem['ItemID']
elif replaceItem['ReplaceType'] == 'Layer' and 'ReplaceString' in replaceItem:
itemData = common.find_replace(itemData,replaceItem['SearchString'],replaceItem['ReplaceString'])
elif replaceItem['ReplaceType'] == 'Folder':
if 'id' in userInfo.currentFolder:
folderID = userInfo.currentFolder['id']
else:
folderID = None
itemData = common.find_replace(itemData,replaceItem['SearchString'],folderID)
elif replaceItem['ReplaceType'] == 'Org':
itemData = common.find_replace(itemData,replaceItem['SearchString'],orgURL)
elif replaceItem['ReplaceType'] == 'GeoService':
if 'geometry' in portalself.helperServices:
if 'url' in portalself.helperServices["geometry"]:
itemData = common.find_replace(itemData,replaceItem['SearchString'],portalself.helperServices["geometry"]['url'])
elif replaceItem['ReplaceType'] == 'Global':
itemData = common.find_replace(itemData,replaceItem['SearchString'],replaceItem['ReplaceString'])
else:
print ("%s does not exist." % itemJson)
itemData = None
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
url = config['Url']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = itemType
itemParams.overwrite = True
itemParams.description = description
itemParams.tags = tags
itemParams.snippet = snippet
itemParams.description = description
itemParams.typeKeywords = ",".join(typeKeywords)
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name,
itemType=
["Web Mapping Application",
"Application"],
searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
results = item.updateItem(itemParameters=itemParams,
text=json.dumps(itemData))
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(
itemParameters=itemParams,
overwrite=True,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
text=json.dumps(itemData))
except Exception as e:
print (e)
if item is None:
return "App could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = name
url = url.replace("{AppID}",item.id)
url = url.replace("{OrgURL}",orgURL)
#if portalself.urlKey is None or portalself.customBaseUrl is None:
#parsedURL = urlparse.urlparse(url=self._securityHandler.org_url, scheme='', allow_fragments=True)
#else:
#url = url.replace("{OrgURL}", portalself.urlKey + '.' + portalself.customBaseUrl)
updateParams.url = url
updateResults = item.updateItem(itemParameters=updateParams)
resultApp['Results']['itemId'] = item.id
resultApp['folderId'] = folderId
resultApp['Name'] = name
return resultApp
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishApp",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
name = None
tags = None
description = None
extent = None
itemJson = None
admin = None
adminusercontent = None
json_data = None
itemData = None
replaceItem = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
url = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
userCommunity = None
userContent = None
res = None
folderId = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
url = None
updateResults = None
portal = None
del name
del portal
del tags
del description
del extent
del itemJson
del admin
del adminusercontent
del json_data
del itemData
del replaceItem
del loc_df
del datestring
del snippet
del everyone
del org
del groupNames
del folderName
del url
del thumbnail
del itemType
del typeKeywords
del itemParams
del userCommunity
del userContent
del res
del folderId
del folderContent
del itemId
del group_ids
del shareResults
del updateParams
del updateResults
gc.collect()
#----------------------------------------------------------------------
def _publishDashboard(self, config, replaceInfo):
resultApp = None
tags = None
description = None
extent = None
itemJson = None
layerIDSwitch = None
admin = None
adminusercontent = None
json_data = None
itemData = None
replaceItem = None
item = None
response = None
layerNamesID = None
layerIDs = None
tableNamesID = None
tableIDs = None
opLayer = None
widget = None
widgets = None
mapTool = None
dataSource = None
configFileAsString = None
repl = None
name = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
adminusercontent = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
resultApp = None
updateResults = None
try:
resultApp = {'Results':{}}
tags = ''
description = ''
extent = ''
itemJson = config['ItemJSON']
if os.path.exists(itemJson) == False:
return {"Results":{"error": "%s does not exist" % itemJson} }
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
content = admin.content
userInfo = content.users.user()
userCommunity = admin.community
folderName = config['Folder']
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.refresh()
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
layerIDSwitch = []
if os.path.exists(itemJson):
with open(itemJson) as json_data:
try:
itemData = json.load(json_data)
except:
raise ValueError("%s is not a valid JSON File" % itemJson)
for replaceItem in replaceInfo:
if replaceItem['ReplaceType'] == 'Global':
itemData = common.find_replace(itemData,replaceItem['SearchString'],replaceItem['ReplaceString'])
elif replaceItem['ReplaceType'] == 'Map' and 'ItemID' in replaceItem:
item = admin.content.getItem(itemId=replaceItem['ItemID'])
response = item.itemData()
layerNamesID = {}
layerIDs =[]
tableNamesID = {}
tableIDs =[]
if 'operationalLayers' in response:
for opLayer in response['operationalLayers']:
#if 'LayerInfo' in replaceItem:
#for layers in replaceItem['LayerInfo']:
layerNamesID[opLayer['title']] = opLayer['id']
layerIDs.append(opLayer['id'])
if 'tables' in response:
for opLayer in response['tables']:
tableNamesID[opLayer['title']] = opLayer['id']
tableIDs.append(opLayer['id'])
widgets = itemData['widgets']
dataSourceIDToFields = {}
for widget in widgets:
if 'mapId' in widget:
if replaceItem['SearchString'] == widget['mapId']:
widget['mapId'] = replaceItem['ItemID']
if 'mapTools' in widget:
for mapTool in widget['mapTools']:
if 'layerIds' in mapTool:
mapTool['layerIds'] = layerIDs
if 'dataSources' in widget:
for dataSource in widget['dataSources']:
if 'layerId' in dataSource:
if 'LayerInfo' in replaceItem:
if dataSource['layerId'] in replaceItem['LayerInfo']:
layerIDSwitch.append({"OrigID":dataSource['layerId'],
"NewID":replaceItem['LayerInfo'][dataSource['layerId']]['ID']})
#'FieldInfo':replaceItem['LayerInfo'][dataSource['layerId']]['FieldInfo']})
#dataSourceIDToFields[dataSource['id']] = {'NewID': replaceItem['LayerInfo'][dataSource['layerId']]['ID'],
#'FieldInfo': replaceItem['LayerInfo'][dataSource['layerId']]['FieldInfo']}
dataSource['layerId'] = replaceItem['LayerInfo'][dataSource['layerId']]['ID']
elif dataSource['name'] in layerNamesID:
layerIDSwitch.append({"OrigID":dataSource['layerId'],"NewID":layerNamesID[dataSource['name']] })
dataSource['layerId'] = layerNamesID[dataSource['name']]
for dataSource in widget['dataSources']:
if 'filter' in dataSource:
if dataSource['parentDataSourceId'] in dataSourceIDToFields:
if 'whereClause' in dataSource['filter']:
whercla = str(dataSource['filter']['whereClause'])
if pyparsingInstall:
try:
selectResults = select_parser.select_stmt.parseString("select * from xyzzy where " + whercla)
whereElements = list(selectResults['where_expr'])
for h in range(len(whereElements)):
for field in dataSourceIDToFields[dataSource['parentDataSourceId']]['FieldInfo']['fields']:
if whereElements[h] == field['PublishName']:
whereElements[h] = field['ConvertName']
#whercla = whercla.replace(
#old=field['PublishName'],
#new=field['ConvertName'])
dataSource['filter']['whereClause'] = " ".join(whereElements)
except select_parser.ParseException as pe:
for field in dataSourceIDToFields[dataSource['parentDataSourceId']]['FieldInfo']['fields']:
if whercla.contains(field['PublishName']):
whercla = whercla.replace(
old=field['PublishName'],
new=field['ConvertName'])
else:
for field in dataSourceIDToFields[dataSource['parentDataSourceId']]['FieldInfo']['fields']:
if whercla.contains(field['PublishName']):
whercla = whercla.replace(
old=field['PublishName'],
new=field['ConvertName'])
configFileAsString = json.dumps(itemData)
for repl in layerIDSwitch:
configFileAsString.replace(repl['OrigID'],repl['NewID'])
itemData = json.loads(configFileAsString)
name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
datestring = datetime.datetime.now().strftime(loc_df)
name = name.replace('{DATE}',datestring)
name = name.replace('{Date}',datestring)
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
itemType = config['Type']
typeKeywords = config['typeKeywords']
itemParams = arcrest.manageorg.ItemParameter()
itemParams.title = name
itemParams.thumbnail = thumbnail
itemParams.type = itemType
itemParams.overwrite = True
itemParams.description = description
itemParams.snippet = snippet
itemParams.typeKeywords = ",".join(typeKeywords)
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=name, itemType=
["Web Mapping Application",
"Application",
"Operation View"],
searchorg=False)
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == itemType:
if 'name' in res and res['name'] == name:
itemId = res['id']
break
if 'title' in res and res['title'] == name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
results = item.updateItem(itemParameters=itemParams,
text=json.dumps(itemData))
if 'error' in results:
return results
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
try:
item = userInfo.addItem(
itemParameters=itemParams,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
text=json.dumps(itemData))
except Exception as e:
print (e)
if item is None:
return "Dashboard could not be added"
group_ids = userCommunity.getGroupIDs(groupNames=groupNames)
shareResults = userInfo.shareItems(items=item.id,
groups=','.join(group_ids),
everyone=everyone,
org=org)
updateParams = arcrest.manageorg.ItemParameter()
updateParams.title = name
updateResults = item.updateItem(itemParameters=updateParams)
resultApp['Results']['itemId'] = item.id
resultApp['folderId'] = folderId
resultApp['Name'] = name
return resultApp
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishDashboard",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
tags = None
description = None
extent = None
itemJson = None
layerIDSwitch = None
admin = None
adminusercontent = None
json_data = None
itemData = None
replaceItem = None
item = None
response = None
layerNamesID = None
layerIDs = None
tableNamesID = None
tableIDs = None
opLayer = None
widget = None
widgets = None
mapTool = None
dataSource = None
configFileAsString = None
repl = None
name = None
loc_df = None
datestring = None
snippet = None
everyone = None
org = None
groupNames = None
folderName = None
thumbnail = None
itemType = None
typeKeywords = None
itemParams = None
adminusercontent = None
userCommunity = None
userContent = None
folderId = None
res = None
folderContent = None
itemId = None
group_ids = None
shareResults = None
updateParams = None
updateResults = None
del tags
del description
del extent
del itemJson
del layerIDSwitch
del admin
del json_data
del itemData
del replaceItem
del item
del response
del layerNamesID
del layerIDs
del tableNamesID
del tableIDs
del opLayer
del widget
del widgets
del mapTool
del dataSource
del configFileAsString
del repl
del name
del loc_df
del datestring
del snippet
del everyone
del org
del groupNames
del folderName
del thumbnail
del itemType
del typeKeywords
del itemParams
del adminusercontent
del userCommunity
del userContent
del folderId
del res
del folderContent
del itemId
del group_ids
del shareResults
del updateParams
del updateResults
gc.collect()
#----------------------------------------------------------------------
def updateFeatureService(self, efs_config):
"""Updates a feature service.
Args:
efs_config (list): A list of JSON configuration feature service details to update.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
fsRes = None
fst = None
fURL = None
resItm= None
try:
fsRes = []
fst = featureservicetools.featureservicetools(securityinfo=self)
if isinstance(efs_config, list):
for ext_service in efs_config:
fURL = None
cs = 0
try:
if 'ChunkSize' in ext_service:
if common.is_number(ext_service['ChunkSize']):
cs = ext_service['ChunkSize']
except Exception as e:
pass
resItm={"DeleteDetails": None,"AddDetails":None}
if 'ItemId' in ext_service and 'LayerName' in ext_service:
fs = fst.GetFeatureService(itemId=ext_service['ItemId'],returnURLOnly=False)
if not fs is None:
fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=ext_service['LayerName'],returnURLOnly=True)
if fURL is None and 'URL' in ext_service:
fURL = ext_service['URL']
if fURL is None:
print("Item and layer not found or URL not in config")
continue
if 'DeleteInfo' in ext_service:
if str(ext_service['DeleteInfo']['Delete']).upper() == "TRUE":
resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=ext_service['DeleteInfo']['DeleteSQL'],chunksize=cs)
if not 'error' in resItm['DeleteDetails'] :
print ("Delete Successful: %s" % fURL)
else:
print (str(resItm['DeleteDetails']))
resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = ext_service['FeatureClass'],chunksize=cs)
fsRes.append(resItm)
if not 'error' in resItm['AddDetails']:
print ("Add Successful: %s " % fURL)
else:
print (str(resItm['AddDetails']))
else:
resItm={"DeleteDetails": None,"AddDetails":None}
fURL = efs_config['URL']
cs = 0
try:
if 'ChunkSize' in efs_config:
if common.is_number(efs_config['ChunkSize']):
cs = efs_config['ChunkSize']
except Exception as e:
pass
if 'ItemId' in efs_config and 'LayerName' in efs_config:
fs = fst.GetFeatureService(itemId=efs_config['ItemId'],returnURLOnly=False)
if not fs is None:
fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=efs_config['LayerName'],returnURLOnly=True)
if fURL is None and 'URL' in efs_config:
fURL = efs_config['URL']
if fURL is None:
print("Item and layer not found or URL not in config")
return None
if 'DeleteInfo' in efs_config:
if str(efs_config['DeleteInfo']['Delete']).upper() == "TRUE":
resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=efs_config['DeleteInfo']['DeleteSQL'],chunksize=cs)
if not 'error' in resItm['DeleteDetails'] :
print (" Delete Successful: %s" % fURL)
else:
print (" " + str(resItm['DeleteDetails']))
resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = efs_config['FeatureClass'],chunksize=cs)
fsRes.append(resItm)
if not 'error' in resItm['AddDetails']:
print (" Add Successful: %s " % fURL)
else:
print (" " + str(resItm['AddDetails']))
return fsRes
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "updateFeatureService",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
fst = None
fURL = None
resItm= None
del fst
del fURL
del resItm
gc.collect()
#----------------------------------------------------------------------
def _publishFeatureCollection(self, config):
try:
# Service settings
zipfile = config['Zip']
service_name = config['Title']
if 'DateTimeFormat' in config:
loc_df = config['DateTimeFormat']
else:
loc_df = dateTimeFormat
description = ""
if 'Description' in config:
description = config['Description']
tags = config['Tags']
snippet = config['Summary']
extent = config['Extent']
everyone = config['ShareEveryone']
org = config['ShareOrg']
groupNames = config['Groups'] #Groups are by ID. Multiple groups comma separated
folderName = config['Folder']
thumbnail = config['Thumbnail']
typeKeywords = config['typeKeywords']
datestring = datetime.datetime.now().strftime(loc_df)
service_name = service_name.replace('{DATE}',datestring)
service_name = service_name.replace('{Date}',datestring)
service_name_safe = service_name.replace(' ','_')
service_name_safe = service_name_safe.replace(':','_')
service_name_safe = service_name_safe.replace('-','_')
if os.path.exists(path=zipfile) == False:
raise ValueError("Zip does not exit")
admin = arcrest.manageorg.Administration(securityHandler=self.securityhandler)
content = admin.content
feature_content = content.FeatureContent
publishParameters = arcrest.manageorg.GenerateParameter(
name=service_name,maxRecordCount=4000
)
fcResults = feature_content.generate(publishParameters=publishParameters,
itemId=None,
filePath=zipfile,
fileType='shapefile')
if not 'featureCollection' in fcResults:
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": fcResults
})
if not 'layers' in fcResults['featureCollection']:
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": fcResults
})
fcJson = {'visibility':True,
'showLegend':True,
'opacity':1}
for layer in fcResults['featureCollection']['layers']:
oidFldName = ''
highOID = -1
popInfo = {'title':'',
'description':None,
'showAttachments': False,
'mediaInfo': [],
'fieldInfos': []
}
if 'layerDefinition' in layer:
extVal = extent.split(',')
layer['layerDefinition']['extent'] = {'type':'extent',
'xmin':extVal[0],
'ymin':extVal[1],
'xmax':extVal[2],
'ymax':extVal[3]
}
layer['layerDefinition']['spatialReference'] = {'wkid':102100}
if 'fields' in layer['layerDefinition']:
for field in layer['layerDefinition']['fields']:
fieldInfos = None
if field['type'] == 'esriFieldTypeOID':
oidFldName = field['name']
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':False,
'tooltip':'',
'visible':False,
'format':None,
'stringFieldOption':'textbox'
}
elif field['type'] == 'esriFieldTypeInteger':
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':{
'places':0,
'digitSeparator':True
},
'stringFieldOption':'textbox'
}
elif field['type'] == 'esriFieldTypeDouble':
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':{
'places':2,
'digitSeparator':True
},
'stringFieldOption':'textbox'
}
elif field['type'] == 'esriFieldTypeString':
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':None,
'stringFieldOption':'textbox'
}
else:
fieldInfos = {
'fieldName':field['name'],
'label':field['alias'],
'isEditable':True,
'tooltip':'',
'visible':True,
'format':None,
'stringFieldOption':'textbox'
}
if fieldInfos is not None:
popInfo['fieldInfos'].append(fieldInfos)
if 'featureSet' in layer:
if 'features' in layer['featureSet']:
for feature in layer['featureSet']['features']:
if 'attributes' in feature:
if feature['attributes'][oidFldName] > highOID:
highOID = feature[oidFldName]
layer['nextObjectId'] = highOID + 1
fcJson['layers'] = fcResults['featureCollection']['layers']
itemParams = arcrest.manageorg.ItemParameter()
itemParams.type = "Feature Collection"
itemParams.title = service_name
itemParams.thumbnail = thumbnail
itemParams.overwrite = True
itemParams.snippet = snippet
itemParams.description = description
itemParams.extent = extent
itemParams.tags = tags
itemParams.typeKeywords = ",".join(typeKeywords)
userInfo = content.users.user()
userCommunity = admin.community
if folderName is not None and folderName != "":
if self.folderExist(name=folderName,folders=userInfo.folders) is None:
res = userInfo.createFolder(name=folderName)
userInfo.currentFolder = folderName
if 'id' in userInfo.currentFolder:
folderId = userInfo.currentFolder['id']
sea = arcrest.find.search(securityHandler=self._securityHandler)
items = sea.findItem(title=service_name, itemType='Feature Collection',searchorg=False)
itemId = None
if items['total'] >= 1:
for res in items['results']:
if 'type' in res and res['type'] == 'Feature Collection':
if 'name' in res and res['name'] == service_name:
itemId = res['id']
break
if 'title' in res and res['title'] == service_name:
itemId = res['id']
break
if not itemId is None:
item = content.getItem(itemId).userItem
resultSD = item.updateItem(itemParameters=itemParams,
text=fcJson)
if item.ownerFolder != folderId:
if folderId is None:
folderId = "/"
moveRes = userInfo.moveItems(items=item.id,folder=folderId)
else:
resultSD = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=None,
text= fcJson,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None)
if 'error' in resultSD:
if not itemId is None:
print ("Attempting to delete")
delres=userInfo.deleteItems(items=itemId)
if 'error' in delres:
print (delres)
return delres
print ("Delete successful")
else:
print ("Item exist and cannot be found, probably owned by another user.")
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": lineno(),
"filename": 'publishingtools.py',
"synerror": "Item exist and cannot be found, probably owned by another user."
})
resultSD = userInfo.addItem(itemParameters=itemParams,
overwrite=True,
url=None,
text=fcResults['featureCollection'],
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None)
return resultSD
else:
return resultSD
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "_publishFeatureCollection",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
gc.collect()
| apache-2.0 | -5,585,332,328,714,547,000 | 41.834936 | 201 | 0.440428 | false |
AnhellO/DAS_Sistemas | Ene-Jun-2021/perez-sanchez-jose-jahir/Examen/Ejercicio4/chain_of_test.py | 1 | 1251 | import unittest
from chain_of import *
#Las pruebas del chain of responsability, como lo que devuelve al final es un print y no un return, lo que me da es un None por eso se compara con un None para que pase,
#se que no esta bien, pero se que si pasaria la prueba si supiera implementar el unittest para prints con el mock y todo eso, porque si corres el test te impreme lo que deberia darte
class CajeroTest(unittest.TestCase):
def test_cadena_correcta(self):
b50 = Cajero50ConcreteHandler()
b20 = Cajero20ConcreteHandler()
b10 = Cajero10ConcreteHandler()
b50.next_succesor(b20).next_succesor(b10)
self.assertEqual(b50.handle(80), None)
def test_cadena_incorrecta(self):
b50 = Cajero50ConcreteHandler()
b20 = Cajero20ConcreteHandler()
b10 = Cajero10ConcreteHandler()
b10.next_succesor(b20).next_succesor(b50)
self.assertEqual(b10.handle(135), None)
def test_cadena_20_primero(self):
b50 = Cajero50ConcreteHandler()
b20 = Cajero20ConcreteHandler()
b10 = Cajero10ConcreteHandler()
b20.next_succesor(b50).next_succesor(b10)
self.assertEqual(b20.handle(90), None)
if __name__ == "__main__":
unittest.main() | mit | 4,310,769,567,808,078,300 | 42.172414 | 182 | 0.68745 | false |
ktnyt/autosearch | autosearch/searcher.py | 1 | 1549 | from autosearch.form import Form
from autosearch.path import Path, PathFinder
from autosearch.utils import *
class Searcher(object):
def __init__(self, url, query):
self.url = url
top = addScore(parse(fetch(url)), query)
forms = []
paths = []
scores = []
# Find forms with text inputs
for form in top.find_all('form'):
if 'action' not in form.attrs:
continue
form['action'] = absolutify(url, form['action'])
for input in form.find_all('input'):
attrs = input.attrs
if 'type' not in attrs or attrs['type'] in ['text', 'search']:
forms.append(Form(form))
if not len(forms):
return
# Try each form
for form in forms:
result = form(query)
finder = PathFinder().fromDom(result, tag='a', attr='href')
path, score = finder.bestPath()
paths.append(path)
scores.append(score)
# Find best form
i = argmax(scores)
form, path = forms[i], paths[i]
self.form = form
self.path = path.stringify()
def __call__(self, query):
form = self.form
path = self.path
result = form(query)
finder = PathFinder().fromDom(result, tag='a', attr='href')
matches, scores = finder.matchPath(path, string=True)
return [absolutify(form.action, match.elements[-1]['href']) for match in matches], scores
| mit | -2,114,310,529,578,682,600 | 29.98 | 97 | 0.53583 | false |
PaulEcoffet/stonewallsgate | dunwallsgate/test/test_battle.py | 1 | 5888 | import unittest
from collections import Counter
import battle
import inventory
from character import Character
class TestBattle(unittest.TestCase):
def test_init_battle(self):
char1 = Character(None, maxhealth=100, initiative=1000)
char2 = Character(None, maxhealth=100, initiative=1)
battle1 = battle.Battle([char1], [char2])
self.assertListEqual(battle1.team1, [char1])
self.assertListEqual(battle1.team2, [char2])
self.assertIs(battle1.playing_char, char1)
def test_possible_target(self):
char1 = Character(None, maxhealth=100, initiative=1000)
char2 = Character(None, maxhealth=100, initiative=10)
char3 = Character(None, maxhealth=100, initiative=1)
battle1 = battle.Battle([char1], [char2, char3])
self.assertEqual(Counter(battle1.possible_targets_attack()),
Counter([char2, char3]))
battle1.end_turn()
self.assertEqual(Counter(battle1.possible_targets_attack()),
Counter([char1]))
def test_do_attack(self):
char1 = Character(None, maxhealth=100, initiative=1000, attack=2)
char2 = Character(None, maxhealth=100, initiative=1, defense=0)
battle1 = battle.Battle([char1], [char2])
battle1.do_attack(char2)
self.assertLessEqual(char2.health, char2.maxhealth)
battle1.end_turn()
self.assertRaises(battle.CantAttackException, battle1.do_attack,
char2)
def test_end_turn(self):
char1 = Character(None, maxhealth=100, initiative=1000, attack=2)
char2 = Character(None, maxhealth=100, initiative=1, defense=0)
battle1 = battle.Battle([char1], [char2])
battle1.end_turn()
self.assertIs(battle1.playing_char, char2)
def test_get_all_character(self):
char1 = Character(None, maxhealth=100, initiative=1000, attack=2)
char2 = Character(None, maxhealth=100, initiative=1, defense=0)
battle1 = battle.Battle([char1], [char2])
self.assertEqual(Counter((char1, char2)),
Counter(battle1.all_characters))
def test_get_current_playe_team(self):
char1 = Character(None, maxhealth=100, initiative=1000, attack=2)
char2 = Character(None, maxhealth=100, initiative=1, defense=0)
battle1 = battle.Battle([char1], [char2])
self.assertEqual(battle1.cur_player_team, 1)
battle1.end_turn()
self.assertEqual(battle1.cur_player_team, 2)
def test_win(self):
char1 = Character(None, maxhealth=10, initiative=1000)
char2 = Character(None, maxhealth=10, initiative=10)
char3 = Character(None, maxhealth=10, initiative=1)
battle1 = battle.Battle([char1], [char2, char3])
self.assertIsNone(battle1.winner)
battle1.do_run()
self.assertEqual(battle1.winner, 2)
battle2 = battle.Battle([char1], [char2, char3])
while char1.is_alive:
if battle2.cur_player_team == 2:
battle2.do_attack(char1)
battle2.end_turn()
self.assertEqual(battle2.winner, 2)
char1 = Character(None, maxhealth=10, initiative=1)
char2 = Character(None, maxhealth=10, initiative=1000)
char3 = Character(None, maxhealth=10, initiative=10)
battle3 = battle.Battle([char1], [char2, char3])
battle3.do_run()
self.assertEqual(battle3.winner, 1)
battle4 = battle.Battle([char1], [char2, char3])
while char2.is_alive or char3.is_alive:
if battle4.cur_player_team == 1:
battle4.do_attack(battle4.possible_targets_attack()[0])
battle4.end_turn()
self.assertEqual(battle4.winner, 1)
def test_can_attack(self):
char1 = Character(None, maxhealth=100, initiative=1000, attack=2)
char2 = Character(None, maxhealth=0, initiative=1, defense=0)
char3 = Character(None, maxhealth=10, initiative=1, defense=0)
battle1 = battle.Battle([char1], [char2, char3])
self.assertFalse(battle1.can_attack(char1))
self.assertFalse(battle1.can_attack(char2))
self.assertTrue(battle1.can_attack(char3))
def test_already_played(self):
char1 = Character(None, maxhealth=100, initiative=1000)
char2 = Character(None, maxhealth=100, initiative=1)
gun = inventory.create_item("gun")
ammo = inventory.create_item("gun_ammo", 20)
char1.inventory.add(gun)
char1.inventory.add(ammo)
battle1 = battle.Battle([char1], [char2])
battle1.do_attack(battle1.possible_targets_attack()[0])
self.assertRaises(battle.AlreadyPlayedException,
battle1.change_weapon, gun, ammo)
self.assertRaises(battle.AlreadyPlayedException,
battle1.do_attack,
battle1.possible_targets_attack()[0])
def test_change_weapon(self):
char1 = Character(None, maxhealth=100, initiative=1000)
gun = inventory.create_item("gun")
ammo = inventory.create_item("gun_ammo", 20)
char1.inventory.add(gun)
char1.inventory.add(ammo)
battle1 = battle.Battle([char1], [])
battle1.change_weapon(gun, ammo)
battle1.end_turn()
self.assertRaises(battle.CantChangeWeaponException,
battle1.change_weapon,
inventory.create_item("gun"), ammo)
self.assertRaises(battle.CantChangeWeaponException,
battle1.change_weapon,
gun, inventory.create_item("gun_ammo"))
self.assertRaises(inventory.IncompatibleAmmoException,
battle1.change_weapon,
gun, None)
battle1.change_weapon(char1.inventory.get_first("bare_hands"), None)
| gpl-2.0 | -6,642,242,283,896,249,000 | 43.606061 | 76 | 0.625849 | false |
ndaniel/fusioncatcher | bin/build_report_fusions_psl.py | 1 | 28831 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
It produces a report with the summary of the fusion genes found. Also
FASTQ and FASTA files containing the supporting reads corresponding to each
fusion gene is generated.
Author: Daniel Nicorici, [email protected]
Copyright (c) 2009-2021 Daniel Nicorici
This file is part of FusionCatcher.
FusionCatcher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FusionCatcher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FusionCatcher (see file 'COPYING.txt'). If not, see
<http://www.gnu.org/licenses/>.
By default, FusionCatcher is running BLAT aligner
<http://users.soe.ucsc.edu/~kent/src/> but it offers also the option to disable
all its scripts which make use of BLAT aligner if you choose explicitly to do so.
BLAT's license does not allow to be used for commercial activities. If BLAT
license does not allow to be used in your case then you may still use
FusionCatcher by forcing not use the BLAT aligner by specifying the option
'--skip-blat'. Fore more information regarding BLAT please see its license.
Please, note that FusionCatcher does not require BLAT in order to find
candidate fusion genes!
This file is running/executing/using BLAT.
"""
import sys
import os
import optparse
import gc
import string
import zipfile
import Bio.SeqIO
import datetime
import tempfile
import shutil
import gzip
empty_zip_data = 'PK\x05\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ttable = string.maketrans("ACGTN","TGCAA") # global
#ttable = string.maketrans("ACGTYRSWKMBDHV-","TGCARYSWMKVHDB-")
mapping_solexa2sanger = "".join([chr(0) for ascii in range(0, 59)]
+ [chr(33 + int(round(Bio.SeqIO.QualityIO.phred_quality_from_solexa(q)))) for q in range(-5, 62 + 1)]
+ [chr(0) for ascii in range(127, 256)])
mapping_illumina2sanger = "".join([chr(0) for ascii in range(0, 64)]
+ [chr(33 + q) for q in range(0, 62 + 1)]
+ [chr(0) for ascii in range(127, 256)])
def solexa2sanger(qual):
return qual.translate(mapping_solexa2sanger)
def illumina2sanger(qual):
return qual.translate(mapping_illumina2sanger)
def give_me_temp_filename(tmp_dir = None):
if tmp_dir and (not os.path.isdir(tmp_dir)) and (not os.path.islink(tmp_dir)):
os.makedirs(tmp_dir)
(ft,ft_name) = tempfile.mkstemp(dir = tmp_dir)
os.close(ft)
return ft_name
def myorder(a,b):
return (a,b) if a <= b else (b,a)
def dnaReverseComplement(seq):
seq = seq.upper()
seq = seq.translate(ttable)
return seq[::-1]
def reads_from_fastq_file(file_name, size_read_buffer = 10**8):
fid = None
if file_name == '-':
fid = sys.stdin
elif file_name.lower().endswith('.gz'):
fid = gzip.open(file_name,'r')
else:
fid = open(file_name,'r')
piece = [None,None,None,None]
ij = 0
while True:
gc.disable()
lines = fid.readlines(size_read_buffer)
gc.enable()
if not lines:
break
for line in lines:
ij = ij + 1
piece[ij-1] = line
if ij == 4:
bucket = (piece[0].rstrip('\r\n')[1:],
piece[1].rstrip('\r\n'),
piece[3].rstrip('\r\n'))
yield bucket
piece = [None,None,None,None]
ij = 0
fid.close()
def delete_file(some_file):
if os.path.isfile(some_file) or os.path.islink(some_file):
os.remove(some_file)
elif os.path.isdir(some_file):
shutil.rmtree(some_file)
def give_me_psl(fasta, twobit, blat_dir = None, tmp_dir = None, align_type = 'web'):
# give as input a file as a list of strings it runs BLAT and it returns
# the PSL output as a list of strings
fasta_file = give_me_temp_filename(tmp_dir = tmp_dir)
psl_file = give_me_temp_filename(tmp_dir = tmp_dir)
file(fasta_file,'w').writelines(fasta)
# web version of blat
# blat -stepSize=5 -repMatch=2253 -minScore=0 -minIdentity=0 database.2bit query.fa output.psl
# from: http://http://genome.ucsc.edu/FAQ/FAQblat.html
#
# other idea: ./blat -minIdentity=95 –fine -stepSize=1 –tileSize=6 -repMatch = 1000000
# from http://www.gene2drug.com/product/?p=671 by Sucheta Tripathy
# BLAT stands for Blast like Alignment Tool and was designed by Jim Kent.
# It is relatively easy to install the software and is an excellent way of
# mapping assembly files generated from abyss into reference genome for
# finding new transcripts and new intron exon junctions. BLAT has recently
# added few fine tuning options for short read mapping. Setting the stepSize
# and tileSize parameters for mapping reads of length n, where
# n = 2 * stepSize + tileSize – 1. While tileSize can range from 6 to 15,
# stepSize can be from 1 to tileSize. So, in other words, reads as short
# as 7 bases can be mapped into reference [2 * 1 + 6 – 1 = 7]. Also other
# commandline options can be used to make the mapping more sensitive such
# as –fine and -repMatch = 1000000. –fastmap option and –ooc option should
# be avoided for mapping short reads. In addition –minIdentity may be set
# to 95%.
_BT_ = ""
if blat_dir and blat_dir.strip():
_BT_ = blat_dir.rstrip("/")+"/"
cmd = None
if align_type == 'web':
cmd = [_BT_+'blat',
'-stepSize=5', # 5
'-repMatch=2253', # 2253
'-minScore=0', # 0
'-minIdentity=0', # 0
twobit,
fasta_file,
psl_file]
elif align_type == 'sensitive':
cmd = [_BT_+'blat',
'-stepSize=5', # 5
'-repMatch=2253', # 2253
'-minScore=0', # 0
'-minIdentity=95', # 0
'-fine',
twobit,
fasta_file,
psl_file]
else:
print "ERROR: Not known type of BLAT search!"
sys.exit(1)
cmd = ' '.join(cmd)
proc = os.system(cmd)
if proc:
print >>sys.stderr, "ERROR while executing '%s'" % (cmd,)
sys.exit(1)
psl = file(psl_file,'r').readlines()
# add chr to the column number 14 (index = 1) so that can be loaded into UCSC
# genome browser
chr_psl = []
for line in psl:
li = line.split('\t')
if li and len(li) > 14:
l = li[13]
li[13] = 'chr' + l if not l.startswith('chr') else l
if li[13] == 'chrMT':
li[13] = 'chrM'
chr_psl.append('\t'.join(li))
else:
chr_psl.append(line)
delete_file(fasta_file)
delete_file(psl_file)
return chr_psl
def give_me_sam(fastq, anchor, bowtie2index, bowtie2_dir = None, tmp_dir = None, cpus = 1):
# give as input a file as a list of strings it runs BOWTIE2 and it returns
# the SAM output as a list of strings
fastq_file = give_me_temp_filename(tmp_dir = tmp_dir)
sam_file = give_me_temp_filename(tmp_dir = tmp_dir)
file(fastq_file,'w').writelines(fastq)
_B2_ = ""
if bowtie2_dir and bowtie2_dir.strip():
_B2_ = bowtie2_dir.rstrip("/")+"/"
cmd = [_B2_+'bowtie2',
'-p',str(cpus),
'--local',
'-k','10',
'-L',str(anchor),
'-x',bowtie2index,
'-U',fastq_file,
'-S',sam_file]
sam = []
cmd = ' '.join(cmd)
proc = os.system(cmd)
if proc:
print >>sys.stderr, "WARNING: unable to execute: '%s'" % (cmd,)
else:
sam = file(sam_file,'r').readlines()
delete_file(fastq_file)
delete_file(sam_file)
return sam
def mycols(t):
return t[0:4] + t[5:11]
def strmycols(t):
return '\t'.join(mycols(t))
def mygenes(t):
return tuple(t[0],t[6])
def ordmygenes(t):
return myorder(mygenes(t))
def give_me_assembly(fasta, kmer = 31, velvet_dir = None, tmp_dir = None):
# use Velvet to assembl the supporting reads
#
# velveth /tmp/velvet-unmapped-reads/ 17 -fasta -short myfasta.fa
# velvetg /tmp/velvet-unmapped-reads/
if fasta:
fasta_file = give_me_temp_filename(tmp_dir = tmp_dir)
ase_dir = give_me_temp_filename(tmp_dir = tmp_dir)
if os.path.isfile(ase_dir) or os.path.islink(ase_dir):
os.remove(ase_dir)
elif os.path.isdir(ase_dir):
os.rmtree(ase_dir)
os.makedirs(ase_dir)
file(fasta_file,'w').writelines(fasta)
_VT_ = ""
if velvet_dir and velvet_dir.strip():
_VT_ = velvet_dir.rstrip("/")+"/"
cmd = [_VT_+'velveth',
ase_dir,
str(kmer),
'-fasta',
'-short',
fasta_file,
';',
_VT_+'velvetg',
ase_dir,
'>',
'/dev/null',
'2>&1'
]
cmd = ' '.join(cmd)
proc = os.system(cmd)
if proc:
print >>sys.stderr, "ERROR while executing '%s'" % (cmd,)
sys.exit(1)
else:
return []
ase = file(os.path.join(ase_dir,'contigs.fa'),'r').readlines()
delete_file(fasta_file)
shutil.rmtree(ase_dir)
return ase
################################################################################
################################################################################
################################################################################
if __name__ == '__main__':
#command line parsing
usage = "%prog [options]"
description = """It analyzes the mappings of reads on exon-exon junctions."""
version = "%prog 0.12 beta"
parser = optparse.OptionParser(usage = usage,
description = description,
version = version)
parser.add_option("--input_fastq",
action = "store",
type = "string",
dest = "input_fastq_filename",
help = """The input FASTQ file containing all the reads.""")
parser.add_option("--input_fusion_psl",
action = "store",
type = "string",
dest = "input_fusion_psl_filename",
help = """The input PSL file containing the candidate fusion genes.""")
parser.add_option("--input_candidate_fusion_genes_reads",
action = "store",
type = "string",
dest = "input_candidate_fusion_genes_reads_filename",
help = """The input list of candidate fusion genes and ids of the supporting reads, for example 'candidate_fusion-genes_not-filtered_supporting_paired-reads.txt'. This is processed even further.""")
parser.add_option("--input_unmapped_reads",
action = "store",
type = "string",
dest = "input_unmapped_reads_filename",
help = """The input list of ids of reads that are unmapped (that are mapping over the fusion junction).""")
parser.add_option("--output_super_summary",
action = "store",
type = "string",
dest = "output_super_summary_filename",
help = """The output super summary report for candidate fusion genes.""")
parser.add_option("--output_zip_fasta",
action = "store",
type = "string",
dest = "output_zip_fasta_filename",
help = """The ouput FASTQ file containing the reads which support each candidate fusion gene.""")
parser.add_option("--suporting_unique_reads",
action = "store",
type = "int",
dest = "supporting_unique_reads",
default = 1,
help = """The minimum number of unique reads which overlap over an exon-exon junction. Default is %default.""")
parser.add_option("--anchor2",
action = "store",
type = "int",
dest = "anchor2",
default = 40,
help = """For anchors longer (or equal) with this value it is enough to have only one supporting read. Default is '%default'.""")
parser.add_option("--input_genome_2bit",
action = "store",
type = "string",
dest = "input_genome_2bit",
help = """Path to the genome in 2bit format (generated with faToTwoBit) which will be used for aligning using BLAT the supporting reads and their alignment in PSL format is added to file specified with '--output_zip_fasta'.""")
parser.add_option("--input_genome_bowtie2",
action = "store",
type = "string",
dest = "input_genome_bowtie2",
help = """Path to the genome in BOWTIE2 index format which will be used for aligning using BOWTIE2 the supporting reads and their alignment in PSL format is added to file specified with '--output_zip_fasta'.""")
choices = ('web','sensitive')
parser.add_option("--psl_alignment_type",
action = "store",
type = "choice",
choices = choices,
dest = "psl_search_type",
default = "web",
help = "The type of BLAT alignment to be used for aligning "+
"the supporting reads when BLAT is chosen. The choices "+
"are ['"+"','".join(choices)+"']. "+
"Default is '%default'.")
parser.add_option("--blat-dir",
action = "store",
type = "string",
dest = "blat_directory",
help = """Path to Blat's executable.""")
parser.add_option("--sam_alignment",
action = "store",
type = "int",
dest = "sam_alignment",
default = 10,
help = """If set then a SAM file will be generated using BOWTIE2. Default is '%default'.""")
parser.add_option("--bowtie2-dir",
action = "store",
type = "string",
dest = "bowtie2_directory",
help = """Path to Bowtie2's executable.""")
parser.add_option("--mismatches",
action = "store",
type = "int",
dest = "mismatches",
default = 3,
help = """The minimum number of mismatches accepted in the alignment. Default is '%default'.""")
parser.add_option("--mismatches-gap",
action = "store",
type = "int",
dest = "mismatches_gap",
default = 7,
help = """The minimum number of mismatches accepted in the gap alignment. Default is '%default'.""")
parser.add_option("--junction",
action = "store_true",
dest = "junction",
default = False,
help = """If used then the junction sequence is added to the FASTA file with the supporting reads. Default is '%default'.""")
parser.add_option("--threads","-p",
action = "store",
type = "int",
dest = "processes",
default = 1,
help = "Number or processes to be used for running Bowtie2. "+
"Default is '%default'. ")
parser.add_option("--tmp_dir",'-t',
action = "store",
type = "string",
dest = "tmp_directory",
default = None,
help = "The directory which should be used as temporary directory. By default is the OS temporary directory.")
parser.add_option("--velvet",
action = "store_true",
dest = "velvet",
default = False,
help = """If used then the supporting reads from the FASTA file are assembled using VELVET. Default is '%default'.""")
parser.add_option("--velvet-dir",
action = "store",
type = "string",
dest = "velvet_directory",
help = """Path to Velvet's executable.""")
(options, args) = parser.parse_args()
# validate options
if not (options.input_fusion_psl_filename and
options.output_super_summary_filename
):
parser.print_help()
sys.exit(1)
#
# HEADER PSL file
#
#header = ['gene-5end', # 0
# 'gene-5end_symbol', # 1
# 'chromosome_gene-5end', # 2
# 'strand_gene-5end', # 3
# 'start_chromosome_part-1-of-read-mapped-gene-5end', # 4
# 'end_chromosome_part-1-read-mapped-gene-5end', # 5
# 'gene-3end', # 6
# 'gene-3end_symbol', # 7
# 'chromosome_gene-3end', # 8
# 'strand_gene-3end', # 9
# 'start_chromosome_part-2-of-read-mapped-gene-3end', # 10
# 'end_chromosome_part-2-read-mapped-gene-3end', # 11
# 'short_read', # 12
# 'mismatches', # 13
# 'length_short_read', # 14
# 'start_part-1-read_on_gene-5end', # 15
# 'end_part-1-read_on_gene-5end', # 16
# 'start_part-2-read_on_gene-3end', # 17
# 'end_part-2-read_on_gene-3end', # 18
# 'anchor_length', # 19
# 'fusion_sequence' # 20
# ]
unmapped_reads = set()
if options.input_unmapped_reads_filename:
print "Reading...",options.input_unmapped_reads_filename
unmapped_reads = set([line.rstrip('\r\n') for line in file(options.input_unmapped_reads_filename,'r').readlines()])
print "Reading...",options.input_fusion_psl_filename
data = [line.rstrip('\r\n').split('\t') for line in file(options.input_fusion_psl_filename,'r') if line.rstrip('\r\n')]
header = data.pop(0)
# filter for mismatches
#data = [line for line in data if int(line[13])<=options.mismatches]
dudu = []
for line in data:
if line[20].lower().find('*n') == -1:
if int(line[13])<=options.mismatches:
dudu.append(line)
elif int(line[13])<=options.mismatches_gap:
dudu.append(line)
# here I have gaps in alignment (i.e. IGH fusions) because there is "*NNNN"
#data = [line for line in data if int(line[13])<=options.mismatches] # ORIGINAL
data = dudu
# find unique reads
data_uniq = list(set(['\t'.join(line[:12]) for line in data]))
data_uniq = [line.split('\t') for line in data_uniq]
# find same splicing sites = remove cols 4 and 11
data_uniq = [strmycols(line) for line in data_uniq]
# counts the unique reads for unique splicing sites
data_dict = dict()
for line in data_uniq:
data_dict[line] = data_dict.get(line,0) + 1
# sort the counts
dd = sorted(data_dict.items(),key = lambda x: -x[1])
# filter those fusion with too few counts
#dd = [(k,v) for (k,v) in dd if v >= options.supporting_unique_reads]
dd = [(k,v) for (k,v) in dd if v >= 1] # in order to allow the use of options.anchor2
# find those reads and the fusion sequence for the unique fusion points
summary = []
summary_reads = []
summary.append("%s\tcounts\tlongest_anchor\tfusion_sequence\n"%(strmycols(header),))
summary_reads.append('header')
singles = set()
ggenes_e = list()
ggenes_s = list()
ggenes_p = list()
ggenes_e.append('header')
ggenes_s.append('header')
ggenes_p.append('header')
fast_gg = dict()
for (k,v) in dd:
if v >= options.supporting_unique_reads:
r = []
fs = None
gg = None
anchor_max = 0
for li in data:
if strmycols(li) == k:
r.append(li[12])
fs = li[20]
anchor = int(li[19])
if anchor > anchor_max:
anchor_max = anchor
gg_e = (li[0],li[6])
gg_s = (li[1],li[7])
gg_p = (li[5],li[10])
summary.append("%s\t%d\t%d\t%s\n"%(k,v,anchor_max,fs))
r = set(r)
rr = set(el[:-1] + '1' if el.endswith('/2') else el[:-1]+'2' for el in r)
summary_reads.append(list(r)+list(rr))
singles.update(r)
singles.update(rr)
ggenes_e.append(gg_e)
ggenes_s.append(gg_s)
ggenes_p.append(gg_p)
fast_gg[myorder(*gg_e)] = None
elif options.supporting_unique_reads > 1:
r = []
fs = None
gg = None
anchor_max = 0
for li in data:
if strmycols(li) == k:
r.append(li[12])
fs = li[20]
anchor = int(li[19])
if anchor > anchor_max:
anchor_max = anchor
gg_e = (li[0],li[6])
gg_s = (li[1],li[7])
gg_p = (li[5],li[10])
if anchor_max >= options.anchor2:
summary.append("%s\t%d\t%d\t%s\n"%(k,v,anchor_max,fs))
r = set(r)
rr = set(el[:-1] + '1' if el.endswith('/2') else el[:-1]+'2' for el in r)
summary_reads.append(list(r)+list(rr))
singles.update(r)
singles.update(rr)
ggenes_e.append(gg_e)
ggenes_s.append(gg_s)
ggenes_p.append(gg_p)
fast_gg[myorder(*gg_e)] = None
print "Writing the summary file...", options.output_super_summary_filename
file(options.output_super_summary_filename,'w').writelines(summary)
print "Reading...",options.input_candidate_fusion_genes_reads_filename
# 0 - Fusion_gene_symbol_1
# 1 - Fusion_gene_symbol_2
# 2 - Fusion_gene_1
# 3 - Fusion_gene_2
# 4 - Count_paired-end_reads
# 5 - Supporting_paired-read_ids ==> separated by commas, e.g. F1000018349733,F1000033997513,F1000046358541,F1000034322437,...
candidate_fusions_reads = [line.rstrip('\r\n').split('\t') for line in file(options.input_candidate_fusion_genes_reads_filename,'r').readlines()]
candidate_fusions_reads.pop(0) # remove the header
candidate_fusions_reads = dict([(myorder(el[2],el[3]),el[5].split(',')) for el in candidate_fusions_reads if fast_gg.has_key(myorder(el[2],el[3]))])
#
# for each candidate fusion genes build a FASTA file containing the reads which support it
#
print "Processing the supporting reads..."
fasta = dict()
fastq = dict()
pairs = dict()
for (k,v) in candidate_fusions_reads.items():
pairs[k] = []
for vv in v:
s1 = '%s/1' % (vv,)
s2 = '%s/2' % (vv,)
fasta[s1] = None
fasta[s2] = None
pairs[k].append(s1)
pairs[k].append(s2)
for k in singles:
fasta[k] = None
print "Scanning the FASTQ file...",options.input_fastq_filename
for a_read in reads_from_fastq_file(options.input_fastq_filename):
if fasta.has_key(a_read[0]):
ev = a_read[0]
w = a_read[1]
q = a_read[2]
if ev.endswith("/1"):
fasta[ev] = w
elif ev.endswith("/2"):
fasta[ev] = dnaReverseComplement(w)
fastq[ev] = (w,q)
# create a ZIP FASTA file where is a file for each candidate fusion gene
print "Writing the FASTA/FASTQ files containing the supporting reads...",options.output_zip_fasta_filename
archive = zipfile.ZipFile(options.output_zip_fasta_filename, 'w', zipfile.ZIP_STORED, allowZip64 = True)
for i in xrange(len(summary)):
if i == 0: # skip header
continue
# for each candidate fusion
#gg = "%s:%s_%s:%s_%s:%s" % (ggenes_s[i][0],ggenes_s[i][1],ggenes_e[i][0],ggenes_e[i][1],ggenes_p[i][0],ggenes_p[i][1])
gg = "%s--%s__%s--%s" % (ggenes_s[i][0],ggenes_s[i][1],ggenes_p[i][0],ggenes_p[i][1])
gg_e = myorder(ggenes_e[i][0],ggenes_e[i][1])
da = []
if options.junction:
u = summary[i].rstrip('\n').split('\t')
da = ['>JUNCTION__%s\n%s\n' % ('_'.join(u[:-1]),u[-1])]
# write the junction sequence
#archive.writestr("%s_junction.fa" % (gg,), da)
# PSL
#if options.input_genome_2bit:
# psl = give_me_psl(da,
# options.input_genome_2bit,
# tmp_dir = options.tmp_directory,
# align_type = options.blat_search_type)
# archive.writestr("%s_junction.psl" % (gg,), ''.join(psl))
# write the reads in FASTA file
#da = []
for v in sorted(summary_reads[i]):
da.append(">%s_supports_fusion_junction\n"%(v,))
da.append("%s\n"%(fasta[v],))
for v in sorted(pairs[gg_e]):
da.append(">%s_supports_fusion_pair\n"%(v,))
da.append("%s\n"%(fasta[v],))
archive.writestr("%s_reads.fa" % (gg,), ''.join(da))
# PSL
if options.input_genome_2bit:
psl = give_me_psl(da,
options.input_genome_2bit,
blat_dir = options.blat_directory,
tmp_dir = options.tmp_directory,
align_type = options.psl_search_type)
archive.writestr("%s_reads.psl" % (gg,), ''.join(psl))
# VELVET
if options.velvet:
ase = give_me_assembly(da,
17,
velvet_dir = options.velvet_directory,
tmp_dir = options.tmp_directory)
archive.writestr("%s_assembly.fa" % (gg,), ''.join(ase))
# write the reads in FASTQ file
da = []
for v in sorted(summary_reads[i]):
da.append("@%s_supports_fusion_junction%s\n"%(v[:-2],v[-2:]))
sq = fastq[v]
da.append("%s\n+\n%s\n"%(sq[0],sq[1]))
for v in sorted(pairs[gg_e]):
da.append("@%s_supports_fusion_pair%s\n"%(v[:-2],v[-2:]))
sq = fastq[v]
#da.append("%s\n+\n%s\n"%(sq[0],illumina2sanger(sq[1])))
da.append("%s\n+\n%s\n"%(sq[0],sq[1]))
archive.writestr("%s_reads.fq" % (gg,), ''.join(da))
if options.input_genome_bowtie2:
sam = give_me_sam(da,
options.sam_alignment,
options.input_genome_bowtie2,
bowtie2_dir = options.bowtie2_directory,
tmp_dir = options.tmp_directory,
cpus = options.processes)
archive.writestr("%s_reads.sam" % (gg,), ''.join(sam))
# Ensembl ids of genes
archive.writestr("%s_ensembl_ids.txt" % (gg,), '%s\n%s\n' % (ggenes_e[i][0],ggenes_e[i][1]))
archive.close()
| gpl-3.0 | -3,517,396,408,604,192,000 | 38.635488 | 249 | 0.512997 | false |
keithasaurus/django_fun_views | fun_views/views/generic/form/render.py | 1 | 1144 | from fun_views.patterns.form.render import form_render_pattern
from fun_views.views.utils import (get_context_base, make_base_view,
not_set_get_form_class,
not_set_get_template_name, prefer_func,
prefer_literal, render_response_base)
form_render_base = make_base_view(form_render_pattern)
def _init_form(req_data, form_class):
return form_class()
def form_render(template_name=None,
get_template_name=not_set_get_template_name,
form_class=None,
get_form_class=not_set_get_form_class,
init_form=_init_form,
form_context_name='form',
get_form_context_name=None,
get_context=get_context_base,
render_response=render_response_base):
return form_render_base(
prefer_literal(form_class, get_form_class),
init_form,
prefer_func(form_context_name, get_form_context_name),
get_context,
prefer_literal(template_name, get_template_name),
render_response
)
| mit | -5,728,591,379,269,749,000 | 35.903226 | 74 | 0.578671 | false |
mumax/2 | examples/stdproblem5-am01.py | 1 | 1828 | # Micromagnetics standard proplem no. 5
# As proposed by M. Najafi et al., JAP 105, 113914 (2009).
# @author Mykola Dvornik
from mumax2 import *
from mumax2_geom import *
Nx = 32
Ny = 32
Nz = 1
setgridsize(Nx, Ny, Nz)
# physical size in meters
sizeX = 100e-9
sizeY = 100e-9
sizeZ = 10e-9
csX = (sizeX/Nx)
csY = (sizeY/Ny)
csZ = (sizeZ/Nz)
setcellsize(csX, csY, csZ)
# load modules
load('exchange6')
load('demag')
load('zeeman')
load('llg')
load('maxtorque')
load('solver/am12')
setv('m_maxabserror', 1e-4)
setv('m_maxrelerror', 1e-3)
setv('maxdt', 1e-10)
setv('mindt', 1e-17)
savegraph("graph.png")
setv('Msat', 800e3)
setv('Aex', 1.3e-11)
setv('alpha', 1.0)
setv('gamma', 2.211e5)
setv('dt', 1e-15)
setv('maxdt', 1e-12)
# Set a initial magnetisation which will relax into a vortex
mv = makearray(3, Nx, Ny, Nz)
for m in range(Nx):
for n in range(Ny):
for o in range(Nz):
xx = float(m) * csX - 50.0e-9
yy = 50.0e-9 - float(n) * csY
mv[0][m][n][o] = yy
mv[1][m][n][o] = xx
mv[2][m][n][o] = 40.0e-9
setarray('m', mv)
run_until_smaller('maxtorque', 1e-3 * gets('gamma') * 800e3)
load('zhang-li')
setv('alpha', 0.1)
setv('dt', 1e-15)
setv('t', 0)
tabulate(["t", "<m>"], "m.txt")
setv('xi',0.05)
setv('polarisation',1.0)
save("m","png",[])
save("m","vtk",[])
j = makearray(3, Nx, Ny, Nz)
for m in range(Nx):
for n in range(Ny):
for o in range(Nz):
j[0][m][n][o] = 1.0
j[1][m][n][o] = 0.0
j[2][m][n][o] = 0.0
setv('j', [1e12, 0, 0])
setmask('j', j)
#autosave("m", "png", [], 10e-12)
autosave("m", "gplot", [], 10e-12)
autotabulate(["t", "<m>"], "m.txt", 50e-12)
autotabulate(["t", "m_error"], "error.dat", 1e-13)
autotabulate(["t", "dt"], "dt.dat", 1e-13)
run(15.0e-9)
printstats()
| gpl-3.0 | -2,265,707,924,753,422,800 | 17.464646 | 60 | 0.561269 | false |
dseuss/pycsalgs | csalgs/lowrank/gradient.py | 1 | 2641 | # encoding: utf-8
import itertools as it
import numpy as np
import numpy.linalg as la
from scipy.sparse.linalg import svds
__all__ = ['adaptive_stepsize', 'iht_estimator', 'cgm_estimator']
def _vec(A):
newshape = A.shape[:-2]
newshape = newshape + (A.shape[-2] * A.shape[-1],)
return A.reshape(newshape)
def hard_threshold(mat, rank, retproj=False):
"""PU, PV ... projectors on left/right eigenspaces"""
U_full, s, Vstar_full = la.svd(mat)
U = U_full[:, :rank]
V = Vstar_full.T.conj()[:, :rank]
PU = U @ U.T.conj()
PV = V @ V.T.conj()
mat_projected = U @ np.diag(s[:rank]) @ V.conj().T
return (mat_projected, (PU, PV)) if retproj else mat_projected
def adaptive_stepsize(projection='row'):
"""@todo: Docstring for adaptive_stepsize.
:param projection: Possible values: 'row', 'col', 'rowcol', None
:returns: @todo
"""
assert projection in {'col', 'row', 'rowcol', None}
def stepsize(A, g, projectors):
PU, PV = projectors
if projection == 'col':
g = PU @ g
elif projection == 'row':
g = g @ PV
elif projection == 'rowcol':
g = PU @ g @ PV
return la.norm(g)**2 / la.norm(_vec(A) @ _vec(g))**2
return stepsize
def iht_estimator(A, y, rank, stepsize=adaptive_stepsize(), x_init=None):
x_hat = np.zeros(A.shape[1:]) if x_init is None else x_init
_, projectors = hard_threshold(np.tensordot(y, A, axes=(-1, 0)), rank,
retproj=True)
while True:
g = np.tensordot(y - (_vec(A) @ _vec(x_hat)), A, axes=(-1, 0))
mu = stepsize(A, g, projectors)
x_hat, projectors = hard_threshold(x_hat + mu * g, rank, retproj=True)
yield x_hat
def _expval(A, x):
return np.dot(A.reshape((len(A), -1)), x.ravel())
def _cgm_iterator(A, y, alpha, svds=svds, ret_gap=False):
x = np.zeros(A.shape[1:3], dtype=A.dtype)
for iteration in it.count():
z = _expval(A, x)
u, _, v = svds(np.tensordot(z - y, A, axes=(0, 0)), 1)
h = - alpha * u * v
eta = 2 / (iteration + 2)
x = (1 - eta) * x + eta * h
duality_gap = np.dot(z - _expval(A, h), z - y)
yield x, duality_gap
def cgm_estimator(A, y, alpha, relerr=1e-1, maxiter=int(1e6)):
"""@todo: Docstring for cgm_estimator.
"""
solution = _cgm_iterator(A, y, alpha, ret_gap=True)
for x, gap in it.islice(solution, maxiter):
if gap < relerr:
return x
raise ValueError("Did not find solution with error < {} in {} iterations"
.format(relerr, maxiter))
| gpl-3.0 | 84,450,061,899,274,080 | 27.706522 | 78 | 0.558122 | false |
Vaidyanath/tempest | tempest/common/cred_provider.py | 1 | 4110 | # Copyright (c) 2014 Deutsche Telekom AG
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
from tempest import auth
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
# Type of credentials available from configuration
CREDENTIAL_TYPES = {
'identity_admin': ('identity', 'admin'),
'user': ('identity', None),
'alt_user': ('identity', 'alt')
}
# Read credentials from configuration, builds a Credentials object
# based on the specified or configured version
def get_configured_credentials(credential_type, fill_in=True,
identity_version=None):
identity_version = identity_version or CONF.identity.auth_version
if identity_version not in ('v2', 'v3'):
raise exceptions.InvalidConfiguration(
'Unsupported auth version: %s' % identity_version)
if credential_type not in CREDENTIAL_TYPES:
raise exceptions.InvalidCredentials()
conf_attributes = ['username', 'password', 'tenant_name']
if identity_version == 'v3':
conf_attributes.append('domain_name')
# Read the parts of credentials from config
params = {}
section, prefix = CREDENTIAL_TYPES[credential_type]
for attr in conf_attributes:
_section = getattr(CONF, section)
if prefix is None:
params[attr] = getattr(_section, attr)
else:
params[attr] = getattr(_section, prefix + "_" + attr)
# Build and validate credentials. We are reading configured credentials,
# so validate them even if fill_in is False
credentials = get_credentials(fill_in=fill_in, **params)
if not fill_in:
if not credentials.is_valid():
msg = ("The %s credentials are incorrectly set in the config file."
" Double check that all required values are assigned" %
credential_type)
raise exceptions.InvalidConfiguration(msg)
return credentials
# Wrapper around auth.get_credentials to use the configured identity version
# is none is specified
def get_credentials(fill_in=True, identity_version=None, **kwargs):
identity_version = identity_version or CONF.identity.auth_version
# In case of "v3" add the domain from config if not specified
if identity_version == 'v3':
domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
if 'domain' in x)
if not domain_fields.intersection(kwargs.keys()):
kwargs['user_domain_name'] = CONF.identity.admin_domain_name
auth_url = CONF.identity.uri_v3
else:
auth_url = CONF.identity.uri
return auth.get_credentials(auth_url,
fill_in=fill_in,
identity_version=identity_version,
**kwargs)
@six.add_metaclass(abc.ABCMeta)
class CredentialProvider(object):
def __init__(self, name, password='pass', network_resources=None):
self.name = name
@abc.abstractmethod
def get_primary_creds(self):
return
@abc.abstractmethod
def get_admin_creds(self):
return
@abc.abstractmethod
def get_alt_creds(self):
return
@abc.abstractmethod
def clear_isolated_creds(self):
return
@abc.abstractmethod
def is_multi_user(self):
return
@abc.abstractmethod
def is_multi_tenant(self):
return
| apache-2.0 | 1,885,659,017,075,570,700 | 34.73913 | 79 | 0.659854 | false |
EwanC/MrMangler | test/itanium_runner.py | 1 | 4683 | #!/usr/bin/env python
''' Test harness for validating MrMangler binary
Checks itanium manglings generated by MrMangler matches
the counterpart demangling from c++filt.
'''
import argparse
import os
import subprocess
import sys
def run_mangler(func_signature, exe):
''' Runs MrMangler executable
Args:
func_signature signature to mangle
exe MrMangler executable
Returns:
(return code, output) - return code and stdout from MrMangler execution
'''
child_echo = subprocess.Popen(
['echo', '-n', func_signature], stdout=subprocess.PIPE)
child_mangler = subprocess.Popen(
exe, stdin=child_echo.stdout, stdout=subprocess.PIPE)
child_echo.stdout.close()
output = child_mangler.communicate()[0].rstrip(b'\n')
return (child_mangler.returncode, output)
def run_filt(mangled):
''' Runs c++filt executable
Args:
mangled mangled symbol to demangle
Returns:
(return code, output) - return code and stdout from c++filt execution
'''
child_echo = subprocess.Popen(
['echo', '-n', mangled], stdout=subprocess.PIPE)
child_filt = subprocess.Popen(('c++filt'), stdin=child_echo.stdout,
stdout=subprocess.PIPE)
child_echo.stdout.close()
output = child_filt.communicate()[0].rstrip(b'\n')
return (child_filt.returncode, output)
def validate_environment(filename, exe):
'''Checks script arguments and platform, exiting if not suitable
Args:
filename - Input file containing func decls to test
exe - MrMangler executable file to test
'''
def is_exe(path):
''' Returns True if @path exists and has executable permissions '''
return os.path.isfile(path) and os.access(path, os.X_OK)
if os.name != 'posix':
print('Test script only supports *nix systems')
sys.exit()
if not os.path.isfile(filename):
print('Could not find input file ' + filename)
sys.exit()
if not is_exe(exe):
print('Could not find test executable ' + exe)
sys.exit()
# check c++filt exist
found = False
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, 'c++filt')
if is_exe(exe_file):
found = True
if not found:
print('Could not locate c++filt in PATH')
sys.exit()
def main():
'''Script entry point
Returns(int): number of fails
'''
parser = argparse.ArgumentParser(
description='Test runner for MrMangler using Linux c++filt '
'to verify manglings.')
parser.add_argument(
'filename', help='Input file containing function signatures to test. '
'One signature per line.')
parser.add_argument('binary', help='MrMangler binary executable to test.')
args = parser.parse_args()
# Exit script if input files don't exist or not running on supported OS
validate_environment(args.filename, args.binary)
with open(args.filename) as test_script_fd:
passes = [] # list containing passing inputs
fails = [] # list containing tuple of fails '(input, error)'
for line in test_script_fd:
line = line.rstrip('\n')
# Mangle function decl
(return_code, mangled) = run_mangler(line, args.binary)
if return_code != 0:
fails.append((line, mangled))
continue
# Demangle our mangling
(return_code, demangled) = run_filt(mangled)
if return_code != 0:
fails.append((line, mangled))
continue
# Check if demangling matches original decl
if demangled == line:
passes.append(line)
continue
# When demangling and original differ then mangle the demangling,
# if this matches the original mangling then our mangling was
# correct.
(return_code, fallback) = run_mangler(demangled, args.binary)
if (mangled == fallback) and (return_code == 0):
passes.append(line)
else:
fails.append((line, mangled))
# Print test results
print("Total tests run: {0}".format((len(passes) + len(fails))))
print("Passes: {0}".format(len(passes)))
print("Fails: {0}".format(len(fails)))
for (expected, actual) in fails:
print('\tExpected "{0}", was "{1}"'.format(expected, actual))
return len(fails)
if __name__ == '__main__':
ret_code = main()
sys.exit(ret_code)
| mit | -4,054,789,201,465,943,000 | 31.075342 | 79 | 0.599402 | false |
cathyyul/sumo-0.18 | tests/complex/traci/busySocket/runner.py | 1 | 1168 | #!/usr/bin/env python
import os, subprocess, sys, time, shutil
sumoHome = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
sys.path.append(os.path.join(sumoHome, "tools"))
import traci
if sys.argv[1]=="sumo":
sumoBinary = os.environ.get("SUMO_BINARY", os.path.join(sumoHome, 'bin', 'sumo'))
addOption = ""
secondConfig = "sumo.sumocfg"
else:
sumoBinary = os.environ.get("GUISIM_BINARY", os.path.join(sumoHome, 'bin', 'sumo-gui'))
addOption = "-S -Q"
secondConfig = "sumo_log.sumocfg"
PORT = 8813
subprocess.Popen("%s -c sumo.sumocfg %s" % (sumoBinary, addOption), shell=True, stdout=sys.stdout, stderr=sys.stderr)
traci.init(PORT)
subprocess.Popen("%s -c %s %s" % (sumoBinary, secondConfig, addOption), shell=True, stdout=sys.stdout, stderr=sys.stderr)
time.sleep(10)
step = 0
while not step>100:
traci.simulationStep()
vehs = traci.vehicle.getIDList()
if vehs.index("horiz")<0 or len(vehs)>1:
print "Something is false"
step += 1
traci.close()
sys.stdout.flush()
if os.path.exists("lastrun.stderr"):
f = open("lastrun.stderr")
shutil.copyfileobj(f, sys.stderr)
f.close()
| gpl-3.0 | -7,274,768,031,797,202,000 | 32.371429 | 121 | 0.662671 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_service_tags_operations.py | 1 | 4633 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceTagsOperations(object):
"""ServiceTagsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceTagsListResult"
"""Gets a list of service tag information resources.
:param location: The location that will be used as a reference for version (not as a filter
based on location, you will get the list of service tags with prefix details across all regions
but limited to the cloud that your subscription belongs to).
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceTagsListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ServiceTagsListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceTagsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceTagsListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/serviceTags'} # type: ignore
| mit | -2,006,565,035,034,045,000 | 44.421569 | 139 | 0.667386 | false |
javimosch/cdnsviewer | savecss.py | 1 | 1626 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# server.py: receive CSS and JS files from Chrome extension
# and save files locally
#
# Author: [email protected]
# 30.10.2011 - Created
try:
# python 2.x
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except:
# python 3.x
from http.server import HTTPServer, BaseHTTPRequestHandler
class MyServer(BaseHTTPRequestHandler):
def do_POST(self):
hd = self.headers
# chrome sent data:
url = hd.get("X-origurl")
fpath = hd.get("X-filepath")
bodylen = int(hd['content-length'])
body = self.rfile.read(bodylen)
print (url, " ->", fpath, len(body))
reply = "OK"
# optional security: check that path is under given folder
ROOT = ""
if ROOT and not fpath.startswith(ROOT):
reply = "access denied: " + fpath
else:
# save file
try:
f = open(fpath, "wb")
f.write(body)
f.close()
except Exception as e:
print (e)
reply = "Server couldn't save "+fpath
# return reply
self.send_response(200)
self.end_headers()
self.wfile.write(reply.encode('utf-8'))
# optional security: chroot this script to a folder, run with
# "sudo python server.py"
# (remember to adjust your url mappings in the extension too)
# import os
# os.chroot("/Users/myusername/")
# start http server
server = HTTPServer(('localhost', 8080), MyServer)
print ("Server running in port 8080...")
server.serve_forever()
| mit | 2,339,234,633,292,802,600 | 25.655738 | 66 | 0.591021 | false |
qmarlats/pyquizz | env-3/bin/rst2odt_prepstyles.py | 1 | 1745 | #!/home/qmarlats/Documents/Projets/utbm/pyquizz/env-3/bin/python3
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <[email protected]>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
| gpl-3.0 | -4,412,693,940,476,137,500 | 25.044776 | 75 | 0.633238 | false |
BotDevGroup/marvin | marvinbot/views.py | 1 | 1385 | import logging
from flask import (
request, session, g, redirect, url_for, abort, render_template,
flash, current_app, Blueprint
)
from flask_login import login_user, logout_user, current_user, login_required
from marvinbot.models import User
from marvinbot.forms import LoginForm
from marvinbot.utils.net import is_safe_url
log = logging.getLogger(__name__)
marvinbot = Blueprint('marvinbot', __name__, template_folder='templates')
@marvinbot.route('/')
@login_required
def home():
return render_template('index.html')
@marvinbot.route('/login', methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.by_username(form.username.data)
if user and user.check_password(form.passwd.data):
login_user(user, remember=True)
flash('Successful login')
next_url = request.args.get('next')
# is_safe_url should check if the url is safe for redirects.
# See http://flask.pocoo.org/snippets/62/ for an example.
if not is_safe_url(request, next_url):
return abort(400)
return redirect(next_url or url_for(".home"))
return render_template("login.html", form=form)
@marvinbot.route('/logout', methods=["GET", "POST"])
@login_required
def logout():
logout_user()
return redirect(url_for('.login'))
| mit | -2,103,130,300,611,279,000 | 28.468085 | 77 | 0.658484 | false |
rszki/Protein | PDBList.py | 1 | 4456 | # -*- coding: utf-8 -*-
'''
Created on 2012/12/14
自作のPDB関連リストの操作をまとめたモジュール
対象となるものは、基本的にはリストになる。
セパレーターはデフォルトを "," とする
ReadList
ToInt
ToStr
WriteList
DivideList
ToDetailNum
ToPDBNum
MakeLinkerTupple
@author: ryosuke
'''
def ReadList(filename, shold_ToInt=True, sep=","):
'''
Read my definition PDB list file (ex. linker list) and make list.
@param filename: is PDB list file name.
@param shold_ToInt: is whether you use ToInt function.
@param sep: is separator in PDB list.
@return: PDB list converted to list object
'''
with open(filename) as fp:
ls = fp.readlines()
ls = [x.splitlines() for x in ls]
ls = [x[0].split(sep) for x in ls]
if shold_ToInt:
return ToInt(ls)
return ls
def ToInt(PDB_ls):
'''
Convert str type number into int type number
@param PDB_ls: is made by ReadList()
@return: converted PDB list
'''
new_ls = []
for line in PDB_ls:
new_line = [line[0]]
for i in range(1,len(line)):
try:
elem_new = int(line[i])
except:
elem_new = line[i]
new_line += [elem_new]
new_ls.append(new_line)
return new_ls
def ToStr(PDB_ls):
'''
Convert int type number into str type number
@param PDB_ls: is made by ReadList()
@return converted PDB list
'''
new_ls = []
for line in PDB_ls:
new_line = []
for x in line:
new_line.append(str(x))
new_ls.append(new_line)
return new_ls
def WriteList(PDB_ls,output_file, sep = ",", shold_ToStr = True):
'''
Write PDB list to file
@param PDB_ls: is made by ReadList()
@param output_file: include directory
@param sep: is separator
@param shold_ToStr: is whether you use ToStr function
@return: is None
'''
if shold_ToStr:
PDB_ls = ToStr(PDB_ls)
with open(output_file,mode="w") as fp:
fp.writelines(sep.join(line)+"\n" for line in PDB_ls)
def DivdeList(PDB_ls, div_num, output_dir):
'''
'''
for i in range(div_num):
cross_ls = []
for j, x in enumerate(PDB_ls):
if j%div_num == i:
cross_ls.append(x)
filename = "{0}/crosslist_{1}".format(output_dir, i+1)
WriteList(cross_ls, filename)
def ToDetailNum(PDB_ls,Detail_dir):
"""
Convert linker pdb_number to detail line number.
@param PDB_ls: is made by ReadList()
@param Detail_dir: is need to use Detail module.
@return: Converted PDBlist
"""
import Detail
list_conv = []
for protein in PDB_ls:
pdbid = protein[0]
pdbnum = protein[1:]
newline = [pdbid]
list_detail = Detail.ReadList(pdbid, Detail_dir)
for line_detail in list_detail[:]:
if line_detail[8] in pdbnum:
newline.append(line_detail[2])
list_conv.append(newline)
return list_conv
def ToPDBNum(PDB_fasta_ls,Detail_dir):
"""
Convert linker fasta number to pdb number.
@param PDB_fasta_ls: is made by ToDetailNum()
@param Detail_dir: is need to use Detail module.
@return: Converted PDBlist
"""
import Detail
list_conv = []
for protein in PDB_fasta_ls:
pdbid = protein[0]
pdbnum = protein[1:]
newline = [pdbid]
list_detail = Detail.ReadList(pdbid, Detail_dir)
for line_detail in list_detail[:]:
if line_detail[2] in pdbnum:
newline.append(line_detail[8])
list_conv.append(newline)
return list_conv
def MakeLinkerTupple(linker_line):
"""
Convert linker line to linkers tupple
@param linker_line: linker list's one line made by ReadList()
@return: linkers tupple
"""
linkers = []
for i in range(1,len(linker_line),2):
linkers.append((linker_line[i],linker_line[i+1]))
return linkers
#test section
if __name__ == "__main__":
a = ReadList("/home/ryosuke/Dropbox/Task/test_list")
b = ToDetailNum(a,"/home/ryosuke/Dropbox/Task/")
print b
# WriteList(a,"test_list")
# print NotExistInDatabase("/home/ryosuke/db/FASTA/",a,"txt") | gpl-2.0 | -6,933,500,030,450,986,000 | 23.116667 | 69 | 0.571198 | false |
openstack/python-magnumclient | magnumclient/v1/client.py | 1 | 8312 | # Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneauth1.exceptions import catalog
from keystoneauth1 import session as ksa_session
import os_client_config
from oslo_utils import importutils
from magnumclient.common import httpclient
from magnumclient.v1 import baymodels
from magnumclient.v1 import bays
from magnumclient.v1 import certificates
from magnumclient.v1 import cluster_templates
from magnumclient.v1 import clusters
from magnumclient.v1 import mservices
from magnumclient.v1 import nodegroups
from magnumclient.v1 import quotas
from magnumclient.v1 import stats
profiler = importutils.try_import("osprofiler.profiler")
DEFAULT_SERVICE_TYPE = 'container-infra'
LEGACY_DEFAULT_SERVICE_TYPE = 'container'
def _load_session(cloud=None, insecure=False, timeout=None, **kwargs):
cloud_config = os_client_config.OpenStackConfig()
cloud_config = cloud_config.get_one_cloud(
cloud=cloud,
verify=not insecure,
**kwargs)
verify, cert = cloud_config.get_requests_verify_args()
auth = cloud_config.get_auth()
session = ksa_session.Session(
auth=auth, verify=verify, cert=cert,
timeout=timeout)
return session
def _load_service_type(session,
service_type=None, service_name=None,
interface=None, region_name=None):
try:
# Trigger an auth error so that we can throw the exception
# we always have
session.get_endpoint(
service_type=service_type,
service_name=service_name,
interface=interface,
region_name=region_name)
except catalog.EndpointNotFound:
service_type = LEGACY_DEFAULT_SERVICE_TYPE
try:
session.get_endpoint(
service_type=service_type,
service_name=service_name,
interface=interface,
region_name=region_name)
except Exception as e:
raise RuntimeError(str(e))
except Exception as e:
raise RuntimeError(str(e))
return service_type
def _load_session_client(session=None, endpoint_override=None, username=None,
project_id=None, project_name=None,
auth_url=None, password=None, auth_type=None,
insecure=None, user_domain_id=None,
user_domain_name=None, project_domain_id=None,
project_domain_name=None, auth_token=None,
timeout=None, service_type=None, service_name=None,
interface=None, region_name=None, api_version=None,
**kwargs):
if not session:
session = _load_session(
username=username,
project_id=project_id,
project_name=project_name,
auth_url=auth_url,
password=password,
auth_type=auth_type,
insecure=insecure,
user_domain_id=user_domain_id,
user_domain_name=user_domain_name,
project_domain_id=project_domain_id,
project_domain_name=project_domain_name,
auth_token=auth_token,
timeout=timeout,
**kwargs
)
if not endpoint_override:
service_type = _load_service_type(
session,
service_type=service_type,
service_name=service_name,
interface=interface,
region_name=region_name,
)
return httpclient.SessionClient(
service_type=service_type,
service_name=service_name,
interface=interface,
region_name=region_name,
session=session,
endpoint_override=endpoint_override,
api_version=api_version,
)
class Client(object):
def __init__(self, username=None, api_key=None, project_id=None,
project_name=None, auth_url=None, magnum_url=None,
endpoint_type=None, endpoint_override=None,
service_type=DEFAULT_SERVICE_TYPE,
region_name=None, input_auth_token=None,
session=None, password=None, auth_type='password',
interface=None, service_name=None, insecure=False,
user_domain_id=None, user_domain_name=None,
project_domain_id=None, project_domain_name=None,
auth_token=None, timeout=600, api_version=None,
**kwargs):
# We have to keep the api_key are for backwards compat, but let's
# remove it from the rest of our code since it's not a keystone
# concept
if not password:
password = api_key
# Backwards compat for people passing in input_auth_token
if input_auth_token:
auth_token = input_auth_token
# Backwards compat for people passing in endpoint_type
if endpoint_type:
interface = endpoint_type
# osc sometimes give 'None' value
if not interface:
interface = 'public'
if interface.endswith('URL'):
interface = interface[:-3]
# fix (yolanda): os-cloud-config is using endpoint_override
# instead of magnum_url
if magnum_url and not endpoint_override:
endpoint_override = magnum_url
if endpoint_override and auth_token:
self.http_client = httpclient.HTTPClient(
endpoint_override,
token=auth_token,
api_version=api_version,
timeout=timeout,
insecure=insecure,
**kwargs
)
else:
self.http_client = _load_session_client(
session=session,
endpoint_override=endpoint_override,
username=username,
project_id=project_id,
project_name=project_name,
auth_url=auth_url,
password=password,
auth_type=auth_type,
insecure=insecure,
user_domain_id=user_domain_id,
user_domain_name=user_domain_name,
project_domain_id=project_domain_id,
project_domain_name=project_domain_name,
auth_token=auth_token,
timeout=timeout,
service_type=service_type,
service_name=service_name,
interface=interface,
region_name=region_name,
api_version=api_version,
**kwargs
)
self.bays = bays.BayManager(self.http_client)
self.clusters = clusters.ClusterManager(self.http_client)
self.certificates = certificates.CertificateManager(self.http_client)
self.baymodels = baymodels.BayModelManager(self.http_client)
self.cluster_templates = \
cluster_templates.ClusterTemplateManager(self.http_client)
self.mservices = mservices.MServiceManager(self.http_client)
profile = kwargs.pop("profile", None)
if profiler and profile:
# Initialize the root of the future trace: the created trace ID
# will be used as the very first parent to which all related
# traces will be bound to. The given HMAC key must correspond to
# the one set in magnum-api magnum.conf, otherwise the latter
# will fail to check the request signature and will skip
# initialization of osprofiler on the server side.
profiler.init(profile)
self.stats = stats.StatsManager(self.http_client)
self.quotas = quotas.QuotasManager(self.http_client)
self.nodegroups = nodegroups.NodeGroupManager(self.http_client)
| apache-2.0 | -6,866,224,370,728,681,000 | 36.954338 | 77 | 0.611165 | false |
noxdafox/pebble | pebble/common.py | 1 | 5693 | # This file is part of Pebble.
# Copyright (c) 2013-2021, Matteo Cafasso
# Pebble is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version.
# Pebble is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with Pebble. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import os
import pickle
import signal
from threading import Thread
from traceback import format_exc
from concurrent.futures import Future
class ProcessExpired(OSError):
"""Raised when process dies unexpectedly."""
def __init__(self, msg, code=0):
super(ProcessExpired, self).__init__(msg)
self.exitcode = code
class PebbleFuture(Future):
# Same as base class, removed logline
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if set_result() or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
raise RuntimeError('Future in unexpected state')
class ProcessFuture(PebbleFuture):
def cancel(self):
"""Cancel the future.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it has already completed.
"""
with self._condition:
if self._state == FINISHED:
return False
if self._state in (CANCELLED, CANCELLED_AND_NOTIFIED):
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
class RemoteTraceback(Exception):
"""Traceback wrapper for exceptions in remote process.
Exception.__cause__ requires a BaseException subclass.
"""
def __init__(self, traceback):
self.traceback = traceback
def __str__(self):
return self.traceback
class RemoteException(object):
"""Pickling wrapper for exceptions in remote process."""
def __init__(self, exception, traceback):
self.exception = exception
self.traceback = traceback
def __reduce__(self):
return rebuild_exception, (self.exception, self.traceback)
def rebuild_exception(exception, traceback):
exception.__cause__ = RemoteTraceback(traceback)
return exception
def launch_thread(name, function, daemon, *args, **kwargs):
thread = Thread(target=function, name=name, args=args, kwargs=kwargs)
thread.daemon = daemon
thread.start()
return thread
def launch_process(name, function, daemon, mp_context, *args, **kwargs):
process = mp_context.Process(
target=function, name=name, args=args, kwargs=kwargs)
process.daemon = daemon
process.start()
return process
def stop_process(process):
"""Does its best to stop the process."""
process.terminate()
process.join(3)
if process.is_alive() and os.name != 'nt':
try:
os.kill(process.pid, signal.SIGKILL)
process.join()
except OSError:
return
if process.is_alive():
raise RuntimeError("Unable to terminate PID %d" % os.getpid())
def execute(function, *args, **kwargs):
"""Runs the given function returning its results or exception."""
try:
return function(*args, **kwargs)
except Exception as error:
error.traceback = format_exc()
return error
def process_execute(function, *args, **kwargs):
"""Runs the given function returning its results or exception."""
try:
return function(*args, **kwargs)
except Exception as error:
error.traceback = format_exc()
return RemoteException(error, error.traceback)
def send_result(pipe, data):
"""Send result handling pickling and communication errors."""
try:
pipe.send(data)
except (pickle.PicklingError, TypeError) as error:
error.traceback = format_exc()
pipe.send(RemoteException(error, error.traceback))
SLEEP_UNIT = 0.1
# Borrowed from concurrent.futures
PENDING = 'PENDING'
RUNNING = 'RUNNING'
FINISHED = 'FINISHED'
CANCELLED = 'CANCELLED'
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
| lgpl-3.0 | 3,995,260,078,226,853,400 | 28.194872 | 78 | 0.657825 | false |
derwentx/WooGenerator | tests/test_syncupdate.py | 1 | 2736 | import traceback
import unittest
from pprint import pformat
from context import get_testdata, TESTS_DATA_DIR, woogenerator
from woogenerator.namespace.core import (MatchNamespace, ParserNamespace,
SettingsNamespaceProto,
UpdateNamespace)
from woogenerator.conf.parser import ArgumentParserCommon
from woogenerator.utils import Registrar, TimeUtils
class TestSyncUpdateAbstract(unittest.TestCase):
config_file = None
settings_namespace_class = SettingsNamespaceProto
argument_parser_class = ArgumentParserCommon
local_work_dir = TESTS_DATA_DIR
override_args = ''
debug = False
def setUp(self):
self.import_name = TimeUtils.get_ms_timestamp()
self.settings = self.settings_namespace_class()
self.settings.local_work_dir = self.local_work_dir
self.settings.local_live_config = None
self.settings.local_test_config = self.config_file
self.settings.init_settings(self.override_args)
# with open(yaml_path) as stream:
# config = yaml.load(stream)
# merge_mode = config.get('merge-mode', 'sync')
# master_name = config.get('master-name', 'MASTER')
# slave_name = config.get('slave-name', 'SLAVE')
# default_last_sync = config.get('default-last-sync')
#
# SyncUpdateUsr.set_globals(
# master_name, slave_name, merge_mode, default_last_sync)
Registrar.DEBUG_ERROR = False
Registrar.DEBUG_WARN = False
Registrar.DEBUG_MESSAGE = False
Registrar.DEBUG_PROGRESS = False
if self.debug:
# FieldGroup.perform_post = True
# FieldGroup.DEBUG_WARN = True
# FieldGroup.DEBUG_MESSAGE = True
# FieldGroup.DEBUG_ERROR = True
# SyncUpdateUsr.DEBUG_WARN = True
# SyncUpdateUsr.DEBUG_MESSAGE = True
# SyncUpdateUsr.DEBUG_ERROR = True
Registrar.DEBUG_ERROR = True
Registrar.DEBUG_WARN = True
Registrar.DEBUG_MESSAGE = True
Registrar.DEBUG_PROGRESS = True
Registrar.DEBUG_UPDATE = True
# Registrar.DEBUG_USR = True
# Registrar.DEBUG_CONTACT = True
# Registrar.DEBUG_NAME = True
# FieldGroup.DEBUG_CONTACT = True
# FieldGroup.enforce_mandatory_keys = False
def fail_syncupdate_assertion(self, exc, sync_update):
msg = "failed assertion: %s\n%s\n%s" % (
pformat(sync_update.sync_warnings.items()),
sync_update.tabulate(tablefmt='simple'),
traceback.format_exc(exc),
)
raise AssertionError(msg)
| gpl-2.0 | 173,407,668,335,882,200 | 37.535211 | 73 | 0.617325 | false |
clubit/edi-workflow | edi_tools/wizard/edi_wizard_archive_incoming.py | 1 | 1327 | from openerp.osv import osv
from openerp.tools.translate import _
from openerp import netsvc
class edi_tools_edi_wizard_archive_incoming(osv.TransientModel):
_name = 'edi.tools.edi.wizard.archive.incoming'
_description = 'Archive EDI Documents'
''' edi.tools.edi.wizard.archive.incoming:archive()
--------------------------------------------------
This method is used by the EDI wizard to push
multiple documents to the workflow "archived" state.
---------------------------------------------------- '''
def archive(self, cr, uid, ids, context=None):
# Get the selected documents
# --------------------------
ids = context.get('active_ids',[])
if not ids:
raise osv.except_osv(_('Warning!'), _("You did not provide any documents to archive!"))
# Push each document to archived
# ------------------------------
wf_service = netsvc.LocalService("workflow")
for document in self.pool.get('edi.tools.edi.document.incoming').browse(cr, uid, ids, context):
if document.state in ['new','ready','processed','in_error']:
wf_service.trg_validate(uid, 'edi.tools.edi.document.incoming', document.id, 'button_to_archived', cr)
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 | 7,202,058,422,622,266,000 | 44.758621 | 118 | 0.562924 | false |
jamesroutley/formation | test/test_parameter.py | 1 | 2073 | # -*- coding: utf-8 -*-
import pytest
import formation.parameter
from formation.parameter import Parameter
@pytest.mark.parametrize("parameter,expected_output", [
(
Parameter("A"),
"Parameter(title='A', param_type='String', **{})"
),
(
Parameter("A", "Number"),
"Parameter(title='A', param_type='Number', **{})"
),
(
Parameter("A", "Number", description="My description"),
"Parameter(title='A', param_type='Number', "
"**{'description': 'My description'})"
),
(
Parameter("A", "Number", description="My description"),
"Parameter(title='A', param_type='Number', **{'description': "
"'My description'})"
)
])
def test_repr(parameter, expected_output):
assert parameter.__repr__() == expected_output
@pytest.mark.parametrize("left,right,output", [
(Parameter("A"), Parameter("A"), True),
(Parameter("A"), Parameter("B"), False),
(Parameter("A"), 1, False),
(Parameter("A", default="a"), Parameter("A", default="a"), True)
])
def test_eq(left, right, output):
assert (left == right) == output
@pytest.mark.parametrize("snake,camel", [
("", ""),
("my_words", "MyWords"),
("word_1", "Word1"),
(" ", " "),
("1_word", "1Word")
])
def test_snake_to_camel(snake, camel):
output = formation.parameter._snake_to_camel(snake)
assert output == camel
def test_validate_kwargs_with_expected_keywords():
allowed_properties = [
"allowed_pattern",
"allowed_values",
"constraint_description",
"default",
"description",
"max_length",
"max_value",
"min_length",
"min_value",
"no_echo"
]
kwargs = {
property_name: "mock_value"
for property_name in allowed_properties
}
formation.parameter._validate_kwargs(kwargs)
def test_validate_kwargs_with_unexpected_keyword():
kwargs = {"unexpected_keyword": "mock_value"}
with pytest.raises(TypeError):
formation.parameter._validate_kwargs(kwargs)
| apache-2.0 | 6,632,965,659,923,859,000 | 25.576923 | 70 | 0.581766 | false |
samuelchen/code-snippets | python/logger.py | 1 | 1906 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2013-12-12
@author: samuelchen
'''
import logging, logging.handlers
import os
import sys
class LogLevelFilter(object):
def __init__(self, level):
self.__level = level
def filter(self, logRecord):
return logRecord.levelno <= self.__level
def setLogPath(path='p2python.log'):
os.environ['P2PYTHON_LOG'] = path
fh = ch = eh = None
log_path = ''
def getLogger(name='P2Python'):
global fh, ch, eh, log_path
if not log_path and 'P2PYTHON_LOG' in os.environ:
log_path = os.environ['P2PYTHON_LOG']
else:
log_path = 'p2python.log'
setLogPath()
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter( \
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# file handler.
if not fh:
fh = logging.handlers.TimedRotatingFileHandler(log_path)
fh.suffix = "%Y%m%d.log"
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
# console handler
if not ch:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.addFilter(LogLevelFilter(logging.WARN))
ch.setFormatter(formatter)
# stderr handler
if not eh:
eh = logging.StreamHandler(stream=sys.stderr)
eh.setLevel(logging.ERROR)
eh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
logger.addHandler(eh)
logger.propagate = False
return logger
if __name__ == '__main__':
import logger
log = logger.getLogger()
log.debug('This is a debug message.')
log.info('This is a info message.')
log.error('This is a error message.')
| gpl-2.0 | 3,733,849,311,889,806,300 | 23.756757 | 67 | 0.605981 | false |
ai-se/Tree-Learner | dEvol.py | 1 | 8919 | #! /Users/rkrsn/miniconda/bin/python
from __future__ import print_function, division
from os import environ, getcwd
import sys
# Update PYTHONPATH
cwd = getcwd() # Current Directory
axe = cwd + '/axe/' # AXE
pystat = cwd + '/pystats/' # PySTAT
where = cwd + '/_imports/' # Where
sys.path.extend([axe, pystat, cwd, where])
from demos import *
import sk
from settings import *
from settingsWhere import *
from pdb import set_trace
from abcd import _Abcd
from Prediction import rforest, CART, Bugs, where2prd
from methods1 import explore
from methods1 import createTbl
from random import uniform as rand, randint as randi, choice as any
tree = treeings()
# set_trace()
def say(l):
sys.stdout.write(str(l))
def settings(**d):
return o(
name="Differention Evolution",
what="DE tuner. Tune the planner parameters.",
author="Rahul Krishna",
adaptation="https://github.com/ai-se/Rahul/blob/master/DEADANT/deadant.py",
copyleft="(c) 2014, MIT license, http://goo.gl/3UYBp",
seed=1,
np=10,
k=100,
tiny=0.01,
de=o(
np=5,
iter=5,
epsilon=1.01,
N=10,
f=0.5,
cf=0.4,
maxIter=20,
lives=5)).update(
**d)
The = settings()
class ABCD():
"Statistics Stuff, confusion matrix, all that jazz..."
def __init__(self, before, after):
self.actual = before
self.predicted = after
self.TP, self.TN, self.FP, self.FN = 0, 0, 0, 0
self.abcd()
def abcd(self):
for a, b in zip(self.actual, self.predicted):
if a == 1 and b == 1:
self.TP += 1
if a == 0 and b == 0:
self.TN += 1
if a == 0 and b == 1:
self.FP += 1
if a == 1 and b == 0:
self.FN += 1
def all(self):
Sen = self.TP / (self.TP + self.FN)
Spec = self.TN / (self.TN + self.FP)
Prec = self.TP / (self.TP + self.FP)
Acc = (self.TP + self.TN) / (self.TP + self.FN + self.TN + self.FP)
F1 = 2 * self.TP / (2 * self.TP + self.FP + self.FN)
G = 2 * Sen * Spec / (Sen + Spec)
G1 = Sen * Spec / (Sen + Spec)
return Sen, Spec, Prec, Acc, F1, G
class diffEvol(object):
"""
Differential Evolution
"""
def __init__(self, model, data):
self.frontier = []
self.model = model(data)
self.xbest = []
def new(self):
# Creates a new random instance
return [rand(d[0], d[1]) for d in self.model.indep()]
def initFront(self, N):
# Initialize frontier
for _ in xrange(N):
self.frontier.append(self.new())
def extrapolate(self, xbest, l1, l2):
try:
return [max(d[0],
min(d[1], y + The.de.f * (z - a))) for y, z, a,
d in zip(xbest, l1, l2, self.model.indep())]
except TypeError:
set_trace()
def one234(self, one, pop, f=lambda x: id(x)):
def oneOther():
x = any(pop)
while f(x) in seen:
x = any(pop)
seen.append(f(x))
return x
seen = [f(one)]
return oneOther(), oneOther()
# def top234(self, one, pop):
def dominates(self, one, two):
# set_trace()
return self.model.depen(one) > self.model.depen(two)
def dominates2(self, one, two):
"Binary Domination"
# set_trace()
return self.model.depen(
one)[0] > self.model.depen(
two)[0] and self.model.depen(
one)[1] > self.model.depen(
two)[1]
def sortbyscore(self):
# front = []
# for f in self.frontier:
# sc = self.model.depen(f)
# f.append(sc)
# front.append(f)
return sorted(
self.frontier, key=lambda F: self.model.depen(F), reverse=True)
def DE(self):
self.initFront(The.de.N)
lives = The.de.lives
iter = 0
while lives > 0 and iter < 30:
better = False
self.xbest = self.sortbyscore()[0]
# print('Iter = %d' % (iter))
for pos in xrange(len(self.frontier)):
iter += 1
# print('Pos: %d' % (pos))
# set_trace()
lives -= 1
l1, l2 = self.one234(self.frontier[pos], self.frontier)
new = self.extrapolate(self.xbest, l1, l2)
if self.dominates(new, self.frontier[pos]):
self.frontier.pop(pos)
self.frontier.insert(pos, new)
better = True
lives += 1
# print('!')
# print(lives)
if self.model.depen(new) > self.model.depen(self.xbest):
self.xbest = new
# print(self.model.depen(new))
elif self.dominates(self.frontier[pos], new):
# lives -= 1
# print('.')
# print(lives)
better = False
if self.model.depen(
self.frontier[pos]) > self.model.depen(
self.xbest):
self.xbest = self.frontier[pos]
# print(self.model.depen(new))
else:
self.frontier.append(new)
if self.model.depen(new) > self.model.depen(self.xbest):
self.xbest = new
better = True
lives += 1
# print(
# 'Non-Dominant. Lives: %d. Frontier Size= %d' %
# (lives, len(
# self.frontier)))
# self.frontier = self.sortbyscore()[:10]
# print(self.model.depen(self.xbest))
return self.xbest
class tuneRF(object):
# Tune RF
def __init__(self, data):
self.data = data
self.train = createTbl(data[:-1],
_smote=False,
isBin=True,
bugThres=1,
duplicate=True)
self.test = createTbl([data[-1]], isBin=True, bugThres=1)
# set_trace()
def depen(self, rows):
mod = rforest(self.train, self.test, tunings=rows, smoteit=True)
prec = ABCD(before=Bugs(self.test), after=mod).all()[2]
pdpf = ABCD(before=Bugs(self.test), after=mod).all()[:2]
return prec
def indep(self):
return [(50, 150) # n_estimators
, (1, 100) # max_features
, (1, 10) # min_samples_leaf
, (2, 10) # min_samples_split
, (2, 50) # max_leaf_nodes
]
class tuneWhere2(object):
# Tune where
def __init__(self, data):
self.train = data[:-1]
self.test = data[-1]
self.tree = treeings()
self.where = None
def depen(self, row):
# My where2pred() takes data in string format. Ex:
# '../Data/ant/ant-1.6.csv'
self.where = defaults().update(
minSize=row[4], depthMin=int(
row[5]), depthMax=int(
row[6]), prune=row[7] > 0.5)
self.tree.infoPrune = row[1]
self.tree.m = int(row[2])
self.tree.n = int(row[3])
self.tree.prune = row[8] > 0.5
actual = Bugs(createTbl([self.test], isBin=True))
preds = where2prd(
self.train, [
self.test], tunings=[
self.where, self.tree], thresh=row[0])
return _Abcd(before=actual, after=preds, show=False)[-1]
def indep(self):
return [(0, 1) # Threshold
, (0, 1) # InfoPrune
, (1, 10) # m
, (1, 10) # n
, (0, 1) # Min Size
, (1, 6) # Depth Min
, (1, 20) # Depth Max
, (0, 1) # Where Prune?
, (0, 1)] # Tree Prune?
class tuneCART(object):
# Tune CART
def __init__(self, data):
self.data = data
self.train = createTbl(data[-2:-1],
_smote=False,
isBin=True,
bugThres=1,
duplicate=True)
self.test = createTbl([data[-1]], isBin=True, bugThres=1)
def depen(self, rows):
mod = CART(self.train, self.test, tunings=rows, smoteit=False)
g = _Abcd(before=Bugs(self.test), after=mod, show=False)[-1]
return g
def indep(self):
return [(1, 50) # max_depth
, (2, 20) # min_samples_split
, (1, 20) # min_samples_leaf
, (1, 100) # max features
, (2, 1e3)] # max_leaf_nodes
def _test(data):
m = tuneRF(data)
vals = [(m.any()) for _ in range(10)]
vals1 = [m.score(v) for v in vals]
print(vals, vals1)
def _de(model, data):
"DE"
DE = diffEvol(model, data)
# set_trace()
res = DE.DE()
# print(model.depen(res))
return res
def tuner(model, data):
if model == rforest:
return _de(tuneRF, data)
elif model == CART:
return _de(tuneCART, data)
if __name__ == '__main__':
from timeit import time
data = explore(dir='../Data/')[0][5] # Only training data to tune.
print(data)
# set_trace()
for m in [tuneRF]:
t = time.time()
mdl = m(data)
# _test(data)
tunings = _de(m, data)
print(tunings)
print(mdl.depen(tunings))
print(time.time() - t)
# print _de()
# print main()
# import sk; xtile = sk.xtile
# print xtile(G)
# main(dir = 'Data/')
| unlicense | 5,297,623,826,683,390,000 | 25.703593 | 81 | 0.528086 | false |
simpeg/simpeg | tests/em/vrm/waveform_tests.py | 1 | 2876 | import unittest
import SimPEG.VRM as VRM
import numpy as np
class VRM_waveform_tests(unittest.TestCase):
def test_discrete(self):
"""
Test ensures that if all different waveform classes are used to
construct the same waveform, the characteristic decay they
produce should be the same.
"""
times = np.logspace(-4, -2, 3)
t = np.r_[-0.00200001, -0.002, -0.0000000001, 0.]
I = np.r_[0., 1., 1., 0.]
waveObj1 = VRM.WaveformVRM.SquarePulse(delt=0.002, t0=0.)
waveObj2 = VRM.WaveformVRM.ArbitraryDiscrete(t_wave=t, I_wave=I)
waveObj3 = VRM.WaveformVRM.ArbitraryPiecewise(t_wave=t, I_wave=I)
waveObj4 = VRM.WaveformVRM.Custom(
times=times, eta=waveObj1.getCharDecay('b', times))
decay1b = waveObj1.getCharDecay('b', times)
decay2b = waveObj2.getCharDecay('b', times)
decay3b = waveObj3.getCharDecay('b', times)
decay4b = waveObj4.getCharDecay()
decay1dbdt = waveObj1.getCharDecay('dbdt', times)
decay2dbdt = waveObj2.getCharDecay('dbdt', times)
decay3dbdt = waveObj3.getCharDecay('dbdt', times)
err1 = np.max(np.abs((decay2b-decay1b)/decay1b))
err2 = np.max(np.abs((decay3b-decay1b)/decay1b))
err3 = np.max(np.abs((decay4b-decay1b)/decay1b))
err4 = np.max(np.abs((decay2dbdt-decay1dbdt)/decay1dbdt))
err5 = np.max(np.abs((decay3dbdt-decay1dbdt)/decay1dbdt))
self.assertTrue(
err1 < 0.01 and err2 < 0.01 and err3 < 0.01 and err4 < 0.025 and err5 < 0.01
)
def test_loguniform(self):
"""
Tests to make sure log uniform decay and characteristic decay
match of the range in which the approximation is valid.
"""
times = np.logspace(-4, -2, 3)
waveObj1 = VRM.WaveformVRM.StepOff(t0=0.)
waveObj2 = VRM.WaveformVRM.SquarePulse(delt=0.02)
chi0 = np.array([0.])
dchi = np.array([0.01])
tau1 = np.array([1e-10])
tau2 = np.array([1e3])
decay1b = (dchi/np.log(tau2/tau1))*waveObj2.getCharDecay('b', times)
decay2b = waveObj2.getLogUniformDecay('b', times, chi0, dchi, tau1, tau2)
decay1dbdt = (dchi/np.log(tau2/tau1))*waveObj1.getCharDecay('dbdt', times)
decay2dbdt = waveObj1.getLogUniformDecay('dbdt', times, chi0, dchi, tau1, tau2)
decay3dbdt = (dchi/np.log(tau2/tau1))*waveObj2.getCharDecay('dbdt', times)
decay4dbdt = waveObj2.getLogUniformDecay('dbdt', times, chi0, dchi, tau1, tau2)
err1 = np.max(np.abs((decay2b-decay1b)/decay1b))
err2 = np.max(np.abs((decay2dbdt-decay1dbdt)/decay1dbdt))
err3 = np.max(np.abs((decay4dbdt-decay3dbdt)/decay3dbdt))
self.assertTrue(err1 < 0.01 and err2 < 0.01 and err3 < 0.01)
if __name__ == '__main__':
unittest.main()
| mit | 2,723,740,503,742,058,500 | 35.871795 | 88 | 0.620654 | false |
mverwe/UserCode | topConfigs/crabConfigPPMCWZTo3LNu.py | 1 | 1269 | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.section_('General')
config.General.requestName = 'MCWZTo3LNU_v2'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = False
config.section_('JobType')
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'runForestAOD_pp_MC_75X.py'
config.section_('Data')
config.Data.inputDataset = '/WZTo3LNU_NNPDF30_TuneCUETP8M1_5020GeV-powheg/HINppWinter16DR-75X_mcRun2_asymptotic_ppAt5TeV_v3-v1/AODSIM'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
#config.Data.splitting = "EventAwareLumiBased"
#config.Data.unitsPerJob = 40000
#config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions15/5TeV/Cert_262081-262273_5TeV_PromptReco_Collisions15_25ns_JSON_v2.txt'
config.Data.outLFNDirBase = '/store/group/cmst3/group/hintt/mverweij/PP5TeV/MC'
config.Data.publication = False #True
config.Data.outputDatasetTag = ''
config.section_('User')
config.section_('Site')
#config.Site.whitelist = ['T2_US_MIT']
#config.Site.blacklist = ['T2_US_Nebraska','T2_US_Florida','T2_US_Wisconsin','T2_US_Caltech']
config.Site.storageSite = 'T2_CH_CERN'
| cc0-1.0 | 6,060,302,450,106,162,000 | 37.454545 | 160 | 0.78093 | false |
nextgis/nextgisweb | setup.py | 1 | 4264 | # -*- coding: utf-8 -*-
import sys
import io
import os
import os.path
from stat import S_IXUSR, S_IXGRP, S_IXOTH
from subprocess import check_output, CalledProcessError
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
with io.open('VERSION', 'r') as fd:
VERSION = fd.read().rstrip()
try:
gv = check_output(['gdal-config', '--version'], universal_newlines=True).strip()
except CalledProcessError:
gv = None
requires = [
# Do not use a specific version of system-like packages because their presence is expected
'pip',
'six',
# Other dependencies
'alembic==1.4.2',
'pyramid==1.10.1',
'SQLAlchemy==1.2.16',
'transaction==2.4.0',
'pyramid_tm==2.2.1',
'pyramid_debugtoolbar==4.5.1',
'pyramid_mako==1.0.2',
'zope.sqlalchemy==1.1',
'zope.interface<5',
'zope.event<5',
'bunch==1.0.1',
'flufl.enum==4.1.1',
'waitress==1.2.0',
'pygdal' + (('==%s.*' % gv) if gv else ''), # TODO: Add >=2.3.0
'psycopg2==2.8.5',
'geoalchemy2==0.5.0',
'shapely==1.7.1',
'affine==2.2.2',
'geojson==2.4.1',
'pillow==5.4.1',
'lxml==4.3.0',
'passlib==1.7.1',
'requests[security]==2.22.0',
'babel==2.6.0',
'sentry-sdk==0.14.3',
'python-magic==0.4.15',
'backports.tempfile==1.0',
'pyproj<3',
'elasticsearch>=7.0.0,<8.0.0',
'elasticsearch-dsl>=7.1.0,<8.0.0',
'unicodecsv==0.14.1',
'flatdict==4.0.1',
'psutil==5.7.3',
'zipstream-new==1.1.7',
'cachetools==3.1.1',
'networkx',
# TODO: Move to dev or test dependencies
'freezegun',
'pytest',
'pytest-watch',
'pytest-flake8',
'webtest',
'flake8',
'flake8-future-import',
'modernize',
]
if sys.version_info[0:2] < (3, 6):
requires.append('python2-secrets')
requires.append('OWSLib==0.17.1')
else:
requires.append('OWSLib==0.24.1')
extras_require = {
'dev': ['pdbpp', 'ipython']
}
entry_points = {
'paste.app_factory': [
'main = nextgisweb:main'
],
'babel.extractors': [
'hbs = nextgisweb.i18n.hbs:extract',
],
'pytest11': [
'nextgisweb = nextgisweb.pytest',
'nextgisweb.core = nextgisweb.core.test',
'nextgisweb.pyramid = nextgisweb.pyramid.test',
'nextgiswev.auth = nextgisweb.auth.test',
'nextgiswev.resource = nextgisweb.resource.test',
],
'nextgisweb.packages': ['nextgisweb = nextgisweb:pkginfo', ],
'nextgisweb.amd_packages': [
'nextgisweb = nextgisweb:amd_packages',
],
}
class DevelopCommand(develop):
def run(self):
develop.run(self)
# Builtin console_scripts entrypoint scripts are very slow because of
# checking package requirement. So we use generated wrapper scripts.
bin_dir, _ = os.path.split(sys.executable)
for name, module, func in (
('nextgisweb', 'nextgisweb.script', 'main'),
('nextgisweb-config', 'nextgisweb.script', 'config'),
('nextgisweb-i18n', 'nextgisweb.i18n.script', 'main'),
):
sf = os.path.join(bin_dir, name)
with open(sf, 'w') as fd:
fd.write("#!{}\n".format(sys.executable))
fd.write("from {} import {} as main\n".format(module, func))
fd.write("if __name__ == '__main__': main()\n")
st = os.stat(sf)
os.chmod(sf, st.st_mode | S_IXUSR | S_IXGRP | S_IXOTH)
class InstallCommand(install):
def run(self):
raise RuntimeError(
"Only development mode installation "
"(pip install -e ...) is supported!")
install.run(self)
setup(
name='nextgisweb',
version=VERSION,
description='nextgisweb',
author='NextGIS',
author_email='[email protected]',
url='http://nextgis.com/nextgis-web',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
python_requires=">=2.7.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4",
install_requires=requires,
extras_require=extras_require,
entry_points=entry_points,
cmdclass=dict(
develop=DevelopCommand,
install=InstallCommand,
)
)
| gpl-3.0 | 6,692,886,225,111,922,000 | 25.65 | 94 | 0.582552 | false |
gppezzi/easybuild-framework | test/framework/toolchainvariables.py | 1 | 7445 | # #
# Copyright 2012-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for tools/toolchain/variables.py.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import sys
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered
from unittest import TextTestRunner
from easybuild.tools.toolchain.toolchainvariables import ToolchainVariables
from easybuild.tools.toolchain.variables import CommandFlagList
class ToolchainVariablesTest(EnhancedTestCase):
""" Baseclass for toolchain variables testcases """
def test_toolchainvariables(self):
# DEFAULTCLASS is FlagList
class TCV(ToolchainVariables):
LINKER_TOGGLE_START_STOP_GROUP = {
'start': '-Xstart',
'stop': '-Xstop',
}
LINKER_TOGGLE_STATIC_DYNAMIC = {
'static': '-Bstatic',
'dynamic': '-Bdynamic',
}
tcv = TCV()
self.assertEqual(str(tcv), "{}")
tcv['CC'] = 'gcc'
self.assertEqual(str(tcv), "{'CC': [['gcc']]}")
self.assertEqual(str(tcv['CC']), "gcc")
tcv.join('MPICC', 'CC')
self.assertEqual(str(tcv['MPICC']), "gcc")
tcv['F90'] = ['gfortran', 'foo', 'bar']
self.assertEqual(tcv['F90'].__repr__(), "[['gfortran', 'foo', 'bar']]")
self.assertEqual(str(tcv['F90']), "gfortran -foo -bar")
tcv.nappend('FLAGS', ['one', 'two'])
x = tcv.nappend('FLAGS', ['three', 'four'])
x.POSITION = -5 # sanitize will reorder, default POSITION is 0
self.assertEqual(tcv['FLAGS'].__repr__(), "[['one', 'two'], ['three', 'four']]")
tcv['FLAGS'].sanitize() # sort on position, called by __str__ also
self.assertEqual(tcv['FLAGS'].__repr__(), "[['three', 'four'], ['one', 'two']]")
self.assertEqual(str(tcv['FLAGS']), "-three -four -one -two")
# LIBBLAS is a LibraryList
lib = tcv.nappend('LIBBLAS', ['d', 'e', 'f'])
lib.POSITION = 5 # relative position after default
lib = tcv.nappend('LIBBLAS', ['a', 'b', 'c'])
tcv.add_begin_end_linkerflags(lib, toggle_startstopgroup=True, toggle_staticdynamic=True)
self.assertEqual(lib.BEGIN.__repr__(), "['-Bstatic', '-Xstart']")
self.assertEqual(tcv['LIBBLAS'].__repr__(), "[['d', 'e', 'f'], ['a', 'b', 'c']]")
# str calls sanitize
self.assertEqual(str(tcv['LIBBLAS']),
"-Wl,-Bstatic -Wl,-Xstart -la -lb -lc -Wl,-Xstop -Wl,-Bdynamic -ld -le -lf")
# sanitize is on self
self.assertEqual(tcv['LIBBLAS'].__repr__(), "[['a', 'b', 'c'], ['d', 'e', 'f']]")
# make copies for later
copy_blas = tcv['LIBBLAS'].copy()
copy_blas_2 = tcv['LIBBLAS'].copy()
self.assertEqual(str(tcv['LIBBLAS']), str(copy_blas))
# packed_linker
tcv.try_function_on_element('set_packed_linker_options') # don't use it like this (this is internal)
new_res = "-Wl,-Bstatic,-Xstart,-la,-lb,-lc,-Xstop,-Bdynamic -ld -le -lf"
self.assertEqual(str(tcv['LIBBLAS']), new_res)
# run it directly on copy of LIBBLAS, not through the tcv instance
copy_blas.try_function_on_element('set_packed_linker_options')
self.assertEqual(str(copy_blas), new_res)
# arbitrary example
kwargs = {
'prefix': '_P_',
'prefix_begin_end': '_p_',
'separator': ':',
'separator_begin_end': ';',
}
copy_blas.try_function_on_element('set_packed_linker_options', kwargs=kwargs)
self.assertEqual(str(copy_blas),
'_p_;-Bstatic;-Xstart:_P_a:_P_b:_P_c:-Xstop;-Bdynamic -ld -le -lf')
kwargs = {
'prefix': '_P_',
'prefix_begin_end': '_p_',
'separator': ':',
'separator_begin_end': ';',
}
copy_blas.try_function_on_element('change', kwargs=kwargs)
self.assertEqual(str(copy_blas),
'_p_;_p_-Bstatic;_p_-Xstart:_P_a:_P_b:_P_c:_p_-Xstop;_p_-Bdynamic _P_d:_P_e:_P_f')
# e.g. numpy and mkl blas
# -Wl:-Bstatic,-Wl:--start-group,mkl_intel_lp64,mkl_intel_thread,mkl_core,-Wl:--end-group,-Wl:-Bdynamic,iomp5
kwargs = {
'prefix': '',
'prefix_begin_end': '-Wl:',
'separator': ',',
'separator_begin_end': ',',
}
copy_blas_2.try_function_on_element('change', kwargs=kwargs)
copy_blas_2.SEPARATOR = ','
self.assertEqual(str(copy_blas_2),
'-Wl:-Bstatic,-Wl:-Xstart,a,b,c,-Wl:-Xstop,-Wl:-Bdynamic,d,e,f')
# test try remove
copy_blas.try_remove(['a', 'f'])
self.assertEqual(copy_blas.__repr__(), "[['b', 'c'], ['d', 'e']]")
# test join
tcv.join('LIBLAPACK', 'LIBBLAS')
self.assertEqual(tcv['LIBLAPACK'].__repr__(), "[['a', 'b', 'c'], ['d', 'e', 'f']]")
lib = tcv.nappend('LIBLAPACK', ['g', 'h'])
tcv.add_begin_end_linkerflags(lib, toggle_startstopgroup=True)
self.assertEqual(tcv['LIBLAPACK'].__repr__(), "[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h']]")
# sanitize will reorder wrt POSISTION but not join the start/stop group (blas has also statc/dynamic)
tcv['LIBLAPACK'].sanitize()
self.assertEqual(tcv['LIBLAPACK'].__repr__(), "[['a', 'b', 'c'], ['g', 'h'], ['d', 'e', 'f']]")
# run both toggle, not just static/dynamic one.
tcv.add_begin_end_linkerflags(lib, toggle_startstopgroup=True, toggle_staticdynamic=True)
# sanitize will reorder wrt POSISTION and join the start/stop group
tcv['LIBLAPACK'].sanitize()
self.assertEqual(tcv['LIBLAPACK'].__repr__(), "[['a', 'b', 'c', 'g', 'h'], ['d', 'e', 'f']]")
self.assertEqual(str(tcv['LIBLAPACK']), "-Wl,-Bstatic,-Xstart,-la,-lb,-lc,-lg,-lh,-Xstop,-Bdynamic -ld -le -lf")
tcv.nappend('MPICH_CC', 'icc', var_class=CommandFlagList)
self.assertEqual(str(tcv['MPICH_CC']), "icc")
tcv.nappend('MPICH_CC', 'test')
self.assertEqual(str(tcv['MPICH_CC']), "icc -test")
def suite():
""" return all the tests"""
return TestLoaderFiltered().loadTestsFromTestCase(ToolchainVariablesTest, sys.argv[1:])
if __name__ == '__main__':
res = TextTestRunner(verbosity=1).run(suite())
sys.exit(len(res.failures))
| gpl-2.0 | 1,708,847,449,100,512,800 | 42.034682 | 120 | 0.578106 | false |
kvar/ansible | lib/ansible/modules/cloud/amazon/execute_lambda.py | 1 | 10521 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: execute_lambda
short_description: Execute an AWS Lambda function
description:
- This module executes AWS Lambda functions, allowing synchronous and asynchronous
invocation.
version_added: "2.2"
extends_documentation_fragment:
- aws
- ec2
author: "Ryan Scott Brown (@ryansb) <[email protected]>"
requirements:
- python >= 2.6
- boto3
notes:
- Async invocation will always return an empty C(output) key.
- Synchronous invocation may result in a function timeout, resulting in an
empty C(output) key.
options:
name:
description:
- The name of the function to be invoked. This can only be used for
invocations within the calling account. To invoke a function in another
account, use I(function_arn) to specify the full ARN.
function_arn:
description:
- The name of the function to be invoked
tail_log:
description:
- If C(tail_log=yes), the result of the task will include the last 4 KB
of the CloudWatch log for the function execution. Log tailing only
works if you use synchronous invocation C(wait=yes). This is usually
used for development or testing Lambdas.
type: bool
default: 'no'
wait:
description:
- Whether to wait for the function results or not. If I(wait) is C(no),
the task will not return any results. To wait for the Lambda function
to complete, set C(wait=yes) and the result will be available in the
I(output) key.
type: bool
default: 'yes'
dry_run:
description:
- Do not *actually* invoke the function. A C(DryRun) call will check that
the caller has permissions to call the function, especially for
checking cross-account permissions.
type: bool
default: 'no'
version_qualifier:
description:
- Which version/alias of the function to run. This defaults to the
C(LATEST) revision, but can be set to any existing version or alias.
See U(https://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html)
for details.
default: LATEST
payload:
description:
- A dictionary in any form to be provided as input to the Lambda function.
default: {}
'''
EXAMPLES = '''
- execute_lambda:
name: test-function
# the payload is automatically serialized and sent to the function
payload:
foo: bar
value: 8
register: response
# Test that you have sufficient permissions to execute a Lambda function in
# another account
- execute_lambda:
function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function
dry_run: true
- execute_lambda:
name: test-function
payload:
foo: bar
value: 8
wait: true
tail_log: true
register: response
# the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda
# Pass the Lambda event payload as a json file.
- execute_lambda:
name: test-function
payload: "{{ lookup('file','lambda_event.json') }}"
register: response
- execute_lambda:
name: test-function
version_qualifier: PRODUCTION
'''
RETURN = '''
output:
description: Function output if wait=true and the function returns a value
returned: success
type: dict
sample: "{ 'output': 'something' }"
logs:
description: The last 4KB of the function logs. Only provided if I(tail_log) is true
type: str
returned: if I(tail_log) == true
status:
description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async)
type: int
sample: 200
returned: always
'''
import base64
import json
import traceback
try:
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils._text import to_native
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(),
function_arn=dict(),
wait=dict(default=True, type='bool'),
tail_log=dict(default=False, type='bool'),
dry_run=dict(default=False, type='bool'),
version_qualifier=dict(),
payload=dict(default={}, type='dict'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['name', 'function_arn'],
]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
name = module.params.get('name')
function_arn = module.params.get('function_arn')
await_return = module.params.get('wait')
dry_run = module.params.get('dry_run')
tail_log = module.params.get('tail_log')
version_qualifier = module.params.get('version_qualifier')
payload = module.params.get('payload')
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
if not (name or function_arn):
module.fail_json(msg="Must provide either a function_arn or a name to invoke.")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=HAS_BOTO3)
if not region:
module.fail_json(msg="The AWS region must be specified as an "
"environment variable or in the AWS credentials "
"profile.")
try:
client = boto3_conn(module, conn_type='client', resource='lambda',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json(msg="Failure connecting boto3 to AWS: %s" % to_native(e), exception=traceback.format_exc())
invoke_params = {}
if await_return:
# await response
invoke_params['InvocationType'] = 'RequestResponse'
else:
# fire and forget
invoke_params['InvocationType'] = 'Event'
if dry_run or module.check_mode:
# dry_run overrides invocation type
invoke_params['InvocationType'] = 'DryRun'
if tail_log and await_return:
invoke_params['LogType'] = 'Tail'
elif tail_log and not await_return:
module.fail_json(msg="The `tail_log` parameter is only available if "
"the invocation waits for the function to complete. "
"Set `wait` to true or turn off `tail_log`.")
else:
invoke_params['LogType'] = 'None'
if version_qualifier:
invoke_params['Qualifier'] = version_qualifier
if payload:
invoke_params['Payload'] = json.dumps(payload)
if function_arn:
invoke_params['FunctionName'] = function_arn
elif name:
invoke_params['FunctionName'] = name
try:
response = client.invoke(**invoke_params)
except botocore.exceptions.ClientError as ce:
if ce.response['Error']['Code'] == 'ResourceNotFoundException':
module.fail_json(msg="Could not find Lambda to execute. Make sure "
"the ARN is correct and your profile has "
"permissions to execute this function.",
exception=traceback.format_exc())
module.fail_json(msg="Client-side error when invoking Lambda, check inputs and specific error",
exception=traceback.format_exc())
except botocore.exceptions.ParamValidationError as ve:
module.fail_json(msg="Parameters to `invoke` failed to validate",
exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="Unexpected failure while invoking Lambda function",
exception=traceback.format_exc())
results = {
'logs': '',
'status': response['StatusCode'],
'output': '',
}
if response.get('LogResult'):
try:
# logs are base64 encoded in the API response
results['logs'] = base64.b64decode(response.get('LogResult', ''))
except Exception as e:
module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc())
if invoke_params['InvocationType'] == 'RequestResponse':
try:
results['output'] = json.loads(response['Payload'].read().decode('utf8'))
except Exception as e:
module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc())
if isinstance(results.get('output'), dict) and any(
[results['output'].get('stackTrace'), results['output'].get('errorMessage')]):
# AWS sends back stack traces and error messages when a function failed
# in a RequestResponse (synchronous) context.
template = ("Function executed, but there was an error in the Lambda function. "
"Message: {errmsg}, Type: {type}, Stack Trace: {trace}")
error_data = {
# format the stacktrace sent back as an array into a multiline string
'trace': '\n'.join(
[' '.join([
str(x) for x in line # cast line numbers to strings
]) for line in results.get('output', {}).get('stackTrace', [])]
),
'errmsg': results['output'].get('errorMessage'),
'type': results['output'].get('errorType')
}
module.fail_json(msg=template.format(**error_data), result=results)
module.exit_json(changed=True, result=results)
if __name__ == '__main__':
main()
| gpl-3.0 | 597,858,952,520,278,100 | 35.030822 | 116 | 0.639863 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/mercurial/win32.py | 1 | 15830 | # win32.py - utility functions that use win32 API
#
# Copyright 2005-2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import ctypes, errno, os, struct, subprocess, random
_kernel32 = ctypes.windll.kernel32
_advapi32 = ctypes.windll.advapi32
_user32 = ctypes.windll.user32
_BOOL = ctypes.c_long
_WORD = ctypes.c_ushort
_DWORD = ctypes.c_ulong
_UINT = ctypes.c_uint
_LONG = ctypes.c_long
_LPCSTR = _LPSTR = ctypes.c_char_p
_HANDLE = ctypes.c_void_p
_HWND = _HANDLE
_INVALID_HANDLE_VALUE = _HANDLE(-1).value
# GetLastError
_ERROR_SUCCESS = 0
_ERROR_INVALID_PARAMETER = 87
_ERROR_INSUFFICIENT_BUFFER = 122
# WPARAM is defined as UINT_PTR (unsigned type)
# LPARAM is defined as LONG_PTR (signed type)
if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
_WPARAM = ctypes.c_ulong
_LPARAM = ctypes.c_long
elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
_WPARAM = ctypes.c_ulonglong
_LPARAM = ctypes.c_longlong
class _FILETIME(ctypes.Structure):
_fields_ = [('dwLowDateTime', _DWORD),
('dwHighDateTime', _DWORD)]
class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
_fields_ = [('dwFileAttributes', _DWORD),
('ftCreationTime', _FILETIME),
('ftLastAccessTime', _FILETIME),
('ftLastWriteTime', _FILETIME),
('dwVolumeSerialNumber', _DWORD),
('nFileSizeHigh', _DWORD),
('nFileSizeLow', _DWORD),
('nNumberOfLinks', _DWORD),
('nFileIndexHigh', _DWORD),
('nFileIndexLow', _DWORD)]
# CreateFile
_FILE_SHARE_READ = 0x00000001
_FILE_SHARE_WRITE = 0x00000002
_FILE_SHARE_DELETE = 0x00000004
_OPEN_EXISTING = 3
# SetFileAttributes
_FILE_ATTRIBUTE_NORMAL = 0x80
_FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000
# Process Security and Access Rights
_PROCESS_QUERY_INFORMATION = 0x0400
# GetExitCodeProcess
_STILL_ACTIVE = 259
# registry
_HKEY_CURRENT_USER = 0x80000001L
_HKEY_LOCAL_MACHINE = 0x80000002L
_KEY_READ = 0x20019
_REG_SZ = 1
_REG_DWORD = 4
class _STARTUPINFO(ctypes.Structure):
_fields_ = [('cb', _DWORD),
('lpReserved', _LPSTR),
('lpDesktop', _LPSTR),
('lpTitle', _LPSTR),
('dwX', _DWORD),
('dwY', _DWORD),
('dwXSize', _DWORD),
('dwYSize', _DWORD),
('dwXCountChars', _DWORD),
('dwYCountChars', _DWORD),
('dwFillAttribute', _DWORD),
('dwFlags', _DWORD),
('wShowWindow', _WORD),
('cbReserved2', _WORD),
('lpReserved2', ctypes.c_char_p),
('hStdInput', _HANDLE),
('hStdOutput', _HANDLE),
('hStdError', _HANDLE)]
class _PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [('hProcess', _HANDLE),
('hThread', _HANDLE),
('dwProcessId', _DWORD),
('dwThreadId', _DWORD)]
_DETACHED_PROCESS = 0x00000008
_STARTF_USESHOWWINDOW = 0x00000001
_SW_HIDE = 0
class _COORD(ctypes.Structure):
_fields_ = [('X', ctypes.c_short),
('Y', ctypes.c_short)]
class _SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short)]
class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', _COORD),
('dwCursorPosition', _COORD),
('wAttributes', _WORD),
('srWindow', _SMALL_RECT),
('dwMaximumWindowSize', _COORD)]
_STD_ERROR_HANDLE = _DWORD(-12).value
# types of parameters of C functions used (required by pypy)
_kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
_DWORD, _DWORD, _HANDLE]
_kernel32.CreateFileA.restype = _HANDLE
_kernel32.GetFileInformationByHandle.argtypes = [_HANDLE, ctypes.c_void_p]
_kernel32.GetFileInformationByHandle.restype = _BOOL
_kernel32.CloseHandle.argtypes = [_HANDLE]
_kernel32.CloseHandle.restype = _BOOL
try:
_kernel32.CreateHardLinkA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p]
_kernel32.CreateHardLinkA.restype = _BOOL
except AttributeError:
pass
_kernel32.SetFileAttributesA.argtypes = [_LPCSTR, _DWORD]
_kernel32.SetFileAttributesA.restype = _BOOL
_kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD]
_kernel32.OpenProcess.restype = _HANDLE
_kernel32.GetExitCodeProcess.argtypes = [_HANDLE, ctypes.c_void_p]
_kernel32.GetExitCodeProcess.restype = _BOOL
_kernel32.GetLastError.argtypes = []
_kernel32.GetLastError.restype = _DWORD
_kernel32.GetModuleFileNameA.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD]
_kernel32.GetModuleFileNameA.restype = _DWORD
_kernel32.CreateProcessA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p,
ctypes.c_void_p, _BOOL, _DWORD, ctypes.c_void_p, _LPCSTR, ctypes.c_void_p,
ctypes.c_void_p]
_kernel32.CreateProcessA.restype = _BOOL
_kernel32.ExitProcess.argtypes = [_UINT]
_kernel32.ExitProcess.restype = None
_kernel32.GetCurrentProcessId.argtypes = []
_kernel32.GetCurrentProcessId.restype = _DWORD
_SIGNAL_HANDLER = ctypes.WINFUNCTYPE(_BOOL, _DWORD)
_kernel32.SetConsoleCtrlHandler.argtypes = [_SIGNAL_HANDLER, _BOOL]
_kernel32.SetConsoleCtrlHandler.restype = _BOOL
_kernel32.GetStdHandle.argtypes = [_DWORD]
_kernel32.GetStdHandle.restype = _HANDLE
_kernel32.GetConsoleScreenBufferInfo.argtypes = [_HANDLE, ctypes.c_void_p]
_kernel32.GetConsoleScreenBufferInfo.restype = _BOOL
_advapi32.RegOpenKeyExA.argtypes = [_HANDLE, _LPCSTR, _DWORD, _DWORD,
ctypes.c_void_p]
_advapi32.RegOpenKeyExA.restype = _LONG
_advapi32.RegQueryValueExA.argtypes = [_HANDLE, _LPCSTR, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
_advapi32.RegQueryValueExA.restype = _LONG
_advapi32.RegCloseKey.argtypes = [_HANDLE]
_advapi32.RegCloseKey.restype = _LONG
_advapi32.GetUserNameA.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_advapi32.GetUserNameA.restype = _BOOL
_user32.GetWindowThreadProcessId.argtypes = [_HANDLE, ctypes.c_void_p]
_user32.GetWindowThreadProcessId.restype = _DWORD
_user32.ShowWindow.argtypes = [_HANDLE, ctypes.c_int]
_user32.ShowWindow.restype = _BOOL
_WNDENUMPROC = ctypes.WINFUNCTYPE(_BOOL, _HWND, _LPARAM)
_user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM]
_user32.EnumWindows.restype = _BOOL
def _raiseoserror(name):
err = ctypes.WinError()
raise OSError(err.errno, '%s: %s' % (name, err.strerror))
def _getfileinfo(name):
fh = _kernel32.CreateFileA(name, 0,
_FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
None, _OPEN_EXISTING, 0, None)
if fh == _INVALID_HANDLE_VALUE:
_raiseoserror(name)
try:
fi = _BY_HANDLE_FILE_INFORMATION()
if not _kernel32.GetFileInformationByHandle(fh, ctypes.byref(fi)):
_raiseoserror(name)
return fi
finally:
_kernel32.CloseHandle(fh)
def oslink(src, dst):
try:
if not _kernel32.CreateHardLinkA(dst, src, None):
_raiseoserror(src)
except AttributeError: # Wine doesn't support this function
_raiseoserror(src)
def nlinks(name):
'''return number of hardlinks for the given file'''
return _getfileinfo(name).nNumberOfLinks
def samefile(fpath1, fpath2):
'''Returns whether fpath1 and fpath2 refer to the same file. This is only
guaranteed to work for files, not directories.'''
res1 = _getfileinfo(fpath1)
res2 = _getfileinfo(fpath2)
return (res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
and res1.nFileIndexHigh == res2.nFileIndexHigh
and res1.nFileIndexLow == res2.nFileIndexLow)
def samedevice(fpath1, fpath2):
'''Returns whether fpath1 and fpath2 are on the same device. This is only
guaranteed to work for files, not directories.'''
res1 = _getfileinfo(fpath1)
res2 = _getfileinfo(fpath2)
return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
def testpid(pid):
'''return True if pid is still running or unable to
determine, False otherwise'''
h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid)
if h:
try:
status = _DWORD()
if _kernel32.GetExitCodeProcess(h, ctypes.byref(status)):
return status.value == _STILL_ACTIVE
finally:
_kernel32.CloseHandle(h)
return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER
def lookupreg(key, valname=None, scope=None):
''' Look up a key/value name in the Windows registry.
valname: value name. If unspecified, the default value for the key
is used.
scope: optionally specify scope for registry lookup, this can be
a sequence of scopes to look up in order. Default (CURRENT_USER,
LOCAL_MACHINE).
'''
byref = ctypes.byref
if scope is None:
scope = (_HKEY_CURRENT_USER, _HKEY_LOCAL_MACHINE)
elif not isinstance(scope, (list, tuple)):
scope = (scope,)
for s in scope:
kh = _HANDLE()
res = _advapi32.RegOpenKeyExA(s, key, 0, _KEY_READ, ctypes.byref(kh))
if res != _ERROR_SUCCESS:
continue
try:
size = _DWORD(600)
type = _DWORD()
buf = ctypes.create_string_buffer(size.value + 1)
res = _advapi32.RegQueryValueExA(kh.value, valname, None,
byref(type), buf, byref(size))
if res != _ERROR_SUCCESS:
continue
if type.value == _REG_SZ:
# string is in ANSI code page, aka local encoding
return buf.value
elif type.value == _REG_DWORD:
fmt = '<L'
s = ctypes.string_at(byref(buf), struct.calcsize(fmt))
return struct.unpack(fmt, s)[0]
finally:
_advapi32.RegCloseKey(kh.value)
def executablepath():
'''return full path of hg.exe'''
size = 600
buf = ctypes.create_string_buffer(size + 1)
len = _kernel32.GetModuleFileNameA(None, ctypes.byref(buf), size)
if len == 0:
raise ctypes.WinError()
elif len == size:
raise ctypes.WinError(_ERROR_INSUFFICIENT_BUFFER)
return buf.value
def getuser():
'''return name of current user'''
size = _DWORD(300)
buf = ctypes.create_string_buffer(size.value + 1)
if not _advapi32.GetUserNameA(ctypes.byref(buf), ctypes.byref(size)):
raise ctypes.WinError()
return buf.value
_signalhandler = []
def setsignalhandler():
'''Register a termination handler for console events including
CTRL+C. python signal handlers do not work well with socket
operations.
'''
def handler(event):
_kernel32.ExitProcess(1)
if _signalhandler:
return # already registered
h = _SIGNAL_HANDLER(handler)
_signalhandler.append(h) # needed to prevent garbage collection
if not _kernel32.SetConsoleCtrlHandler(h, True):
raise ctypes.WinError()
def hidewindow():
def callback(hwnd, pid):
wpid = _DWORD()
_user32.GetWindowThreadProcessId(hwnd, ctypes.byref(wpid))
if pid == wpid.value:
_user32.ShowWindow(hwnd, _SW_HIDE)
return False # stop enumerating windows
return True
pid = _kernel32.GetCurrentProcessId()
_user32.EnumWindows(_WNDENUMPROC(callback), pid)
def termwidth():
# cmd.exe does not handle CR like a unix console, the CR is
# counted in the line length. On 80 columns consoles, if 80
# characters are written, the following CR won't apply on the
# current line but on the new one. Keep room for it.
width = 79
# Query stderr to avoid problems with redirections
screenbuf = _kernel32.GetStdHandle(
_STD_ERROR_HANDLE) # don't close the handle returned
if screenbuf is None or screenbuf == _INVALID_HANDLE_VALUE:
return width
csbi = _CONSOLE_SCREEN_BUFFER_INFO()
if not _kernel32.GetConsoleScreenBufferInfo(
screenbuf, ctypes.byref(csbi)):
return width
width = csbi.srWindow.Right - csbi.srWindow.Left
return width
def spawndetached(args):
# No standard library function really spawns a fully detached
# process under win32 because they allocate pipes or other objects
# to handle standard streams communications. Passing these objects
# to the child process requires handle inheritance to be enabled
# which makes really detached processes impossible.
si = _STARTUPINFO()
si.cb = ctypes.sizeof(_STARTUPINFO)
si.dwFlags = _STARTF_USESHOWWINDOW
si.wShowWindow = _SW_HIDE
pi = _PROCESS_INFORMATION()
env = ''
for k in os.environ:
env += "%s=%s\0" % (k, os.environ[k])
if not env:
env = '\0'
env += '\0'
args = subprocess.list2cmdline(args)
# Not running the command in shell mode makes python26 hang when
# writing to hgweb output socket.
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
res = _kernel32.CreateProcessA(
None, args, None, None, False, _DETACHED_PROCESS,
env, os.getcwd(), ctypes.byref(si), ctypes.byref(pi))
if not res:
raise ctypes.WinError()
return pi.dwProcessId
def unlink(f):
'''try to implement POSIX' unlink semantics on Windows'''
# POSIX allows to unlink and rename open files. Windows has serious
# problems with doing that:
# - Calling os.unlink (or os.rename) on a file f fails if f or any
# hardlinked copy of f has been opened with Python's open(). There is no
# way such a file can be deleted or renamed on Windows (other than
# scheduling the delete or rename for the next reboot).
# - Calling os.unlink on a file that has been opened with Mercurial's
# posixfile (or comparable methods) will delay the actual deletion of
# the file for as long as the file is held open. The filename is blocked
# during that time and cannot be used for recreating a new file under
# that same name ("zombie file"). Directories containing such zombie files
# cannot be removed or moved.
# A file that has been opened with posixfile can be renamed, so we rename
# f to a random temporary name before calling os.unlink on it. This allows
# callers to recreate f immediately while having other readers do their
# implicit zombie filename blocking on a temporary name.
for tries in xrange(10):
temp = '%s-%08x' % (f, random.randint(0, 0xffffffff))
try:
os.rename(f, temp) # raises OSError EEXIST if temp exists
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
else:
raise IOError, (errno.EEXIST, "No usable temporary filename found")
try:
os.unlink(temp)
except OSError:
# The unlink might have failed because the READONLY attribute may heave
# been set on the original file. Rename works fine with READONLY set,
# but not os.unlink. Reset all attributes and try again.
_kernel32.SetFileAttributesA(temp, _FILE_ATTRIBUTE_NORMAL)
try:
os.unlink(temp)
except OSError:
# The unlink might have failed due to some very rude AV-Scanners.
# Leaking a tempfile is the lesser evil than aborting here and
# leaving some potentially serious inconsistencies.
pass
def makedir(path, notindexed):
os.mkdir(path)
if notindexed:
_kernel32.SetFileAttributesA(path, _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED)
| gpl-3.0 | -3,631,045,314,598,587,400 | 34.099778 | 80 | 0.6482 | false |
zstackio/zstack-woodpecker | zstackwoodpecker/zstackwoodpecker/operations/datamigrate_operations.py | 1 | 3332 | '''
All data migrate operations for test.
@author: Legion
'''
import apibinding.api_actions as api_actions
import zstackwoodpecker.test_util as test_util
import account_operations
import apibinding.inventory as inventory
def ps_migrage_vm(dst_ps_uuid, vm_uuid, session_uuid=None, withDataVolumes=False, withSnapshots=False):
action = api_actions.PrimaryStorageMigrateVmAction()
action.dstPrimaryStorageUuid = dst_ps_uuid
action.vmInstanceUuid = vm_uuid
action.timeout = 7200000
action.withDataVolumes = withDataVolumes
action.withSnapshots = withSnapshots
test_util.action_logger('Migrate [vm uuid: %s] to [Primary Storage: %s]' % (vm_uuid, dst_ps_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def ps_migrage_volume(dst_ps_uuid, vol_uuid, volume_type=None, session_uuid=None):
action = api_actions.PrimaryStorageMigrateVolumeAction()
action.dstPrimaryStorageUuid = dst_ps_uuid
action.volumeUuid = vol_uuid
action.timeout = 7200000
test_util.action_logger('Migrate [%s Volume: %s] to [Primary Storage: %s]' % (volume_type, vol_uuid, dst_ps_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def ps_migrage_root_volume(dst_ps_uuid, vol_uuid, session_uuid=None):
evt_inv = ps_migrage_volume(dst_ps_uuid=dst_ps_uuid, vol_uuid=vol_uuid, volume_type='Root', session_uuid=session_uuid)
return evt_inv
def ps_migrage_data_volume(dst_ps_uuid, vol_uuid, session_uuid=None):
evt_inv = ps_migrage_volume(dst_ps_uuid=dst_ps_uuid, vol_uuid=vol_uuid, volume_type='Data', session_uuid=session_uuid)
return evt_inv
def bs_migrage_image(dst_bs_uuid, src_bs_uuid, image_uuid, session_uuid=None):
action = api_actions.BackupStorageMigrateImageAction()
action.dstBackupStorageUuid = dst_bs_uuid
action.srcBackupStorageUuid = src_bs_uuid
action.imageUuid = image_uuid
test_util.action_logger('Migrate [Image: %s] from [Backup Storage: %s ]to [Backup Storage: %s]' % (image_uuid, src_bs_uuid, dst_bs_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def get_ps_candidate_for_vol_migration(vol_uuid, session_uuid=None):
action = api_actions.GetPrimaryStorageCandidatesForVolumeMigrationAction()
action.volumeUuid = vol_uuid
test_util.action_logger('Get Primary Storage Candidates for Volume Migration')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def get_bs_candidate_for_image_migration(src_bs_uuid, session_uuid=None):
action = api_actions.GetBackupStorageCandidatesForImageMigrationAction()
action.srcBackupStorageUuid = src_bs_uuid
test_util.action_logger('Get Backup Storage Candidates for Volume Migration')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def get_ps_candidate_for_vm_migration(vm_uuid, session_uuid=None):
action = api_actions.GetPrimaryStorageCandidatesForVmMigrationAction()
action.vmInstanceUuid = vm_uuid
test_util.action_logger('Get Primary Storage Candidates for Vm Migration')
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
| apache-2.0 | 3,511,064,920,320,922,000 | 47.289855 | 141 | 0.752401 | false |
jeKnowledge/horarios-inforestudante | DataParser.py | 1 | 3411 | import csv
import datetime
import re
from Structs import AulaDataRaw
from Structs import AulaDataSripped
def openFile(filePath):
return open(filePath)
def csvIntoRawArray(csvFile):
# Array of arrays(lines) with data
filereader = csv.reader(csvFile)
# We will be returning an array of AulaDataRaw
# each corresponding to a line
aulaDataRawArray = []
for row in filereader:
# Skip labels row
try:
int(row[0])
except ValueError:
continue
aulaDataRawArray.append(AulaDataRaw(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]))
return aulaDataRawArray
def rawArrayToStrippedArray(rawArray):
result = []
for raw in rawArray:
# Skip label
if raw.aulaId == "FE_ID":
continue
result.append(rawToStripped(raw))
return result
# Converts from Structs.AulaDataRaw to Stucts.AulaDataStripped
def rawToStripped(dataRaw):
# Semestre:
# 1, 2 ou Anual (0)
if dataRaw.semestre == "Anual":
semestre = 0
else:
semestre = int(dataRaw.semestre[0]) # 1o caractere (1/2)
aulaCodigo = int(dataRaw.aulaId)
turmaId = int(dataRaw.turmaId)
dia = dmytimeToDayOfWeek(dataRaw.dataInicio)
horaInicio = dmytimeToTime(dataRaw.dataInicio)
horaFim = dmytimeToTime(dataRaw.dataFim)
turma = dataRaw.turma
tipo = getClassType(dataRaw.turma)
aulaNome = dataRaw.aulaNome
return AulaDataSripped(aulaCodigo, semestre, turmaId, dia, horaInicio, horaFim, turma, tipo, aulaNome)
# "10-JAN-2015 20:30:30" -> 20.55 (date to decimal time)
def dmytimeToTime(timeString):
timeStr = re.search("\d\d:\d\d:\d\d", timeString).group(0)
return int(timeStr[:2]) + int(timeStr[3:5])/60 + int(timeStr[6:8]) / 3600
# Monday -> 0
# Sunday -> 6
def dmytimeToDayOfWeek(timeString):
day = int(re.search("\d\d(?=-\w\w\w-\d\d\d\d)", timeString).group(0))
monthStr = re.search("(?<=\d\d-)\w\w\w(?=-\d\d\d\d)", timeString).group(0)
month = monthStrToNumber(monthStr)
year = int(re.search("(?<=\d\d-\w\w\w-)\d\d\d\d", timeString).group(0))
return datetime.datetime(year, month, day).weekday()
# Converts JAN -> 1
# FEB -> 2 ...
def monthStrToNumber(monthString):
upperString = str(monthString).upper()
# Oh, no switch statements. Of course.
us = upperString
if us == "JAN":
return 1
if us == "FEV" or us == "FEB":
return 2
if us == "MAR":
return 3
if us == "ABR":
return 4
if us == "MAI":
return 5
if us == "JUN":
return 6
if us == "JUL":
return 7
if us == "AGO":
return 8
if us == "SET":
return 9
if us == "OUT":
return 10
if us == "NOV":
return 11
if us == "DEZ":
return 12
return -1
# Returns array of classes in strippedArray that match classIds in
# classIdArray.
# Caution: WILL return all classes (i.e., TP1, TP2, T1, T2, ...)
def getClasses(strippedArray, semester, classIdArray):
myClasses = []
for data in strippedArray:
if data.aulaId not in classIdArray or data.semestre != semester:
continue
myClasses.append(data)
return myClasses
# Returns class "type" from turma
# i.e., TP1 => TP
# O/S/T5 => O/S/T
def getClassType(turma):
return re.search(".+(?=\d)", turma).group(0)
| mit | -2,165,584,668,745,276,400 | 25.238462 | 116 | 0.613603 | false |
ermo/privateer_wcu | modules/wrong_escort.py | 1 | 2543 | import escort_mission
import faction_ships
class wrong_escort (escort_mission.escort_mission):
def __init__ (self,factionname,missiondifficulty,distance_from_base,creds,numsysaway,jumps=(),var_to_set='',dynfg='',dyntype='',alternatesystems=(),alternatefactions=(),alternateflightgroups=(),alternatetypes=(),alternategreeting=(),alternatevariable='diverted'):
escort_mission.escort_mission.__init__(self,factionname,missiondifficulty,0,0,distance_from_base,creds,0,numsysaway,jumps,var_to_set,dynfg,dyntype)
self.alternatesystems=alternatesystems
self.alternatevariable=alternatevariable
self.alternateflightgroups=alternateflightgroups
self.alternatetypes=alternatetypes
self.alternategreeting=alternategreeting
self.alternatefactions=alternatefactions
import quest
import VS
self.cp = VS.getCurrentPlayer()
quest.removeQuest(self.cp,alternatevariable,-1)
def Execute(self):
escort_mission.escort_mission.Execute(self)
sys = self.escortee.getUnitSystemFile()
if sys in self.alternatesystems:
for i in range(len(self.alternatesystems)):
if sys==self.alternatesystems[i]:
import quest
quest.removeQuest(self.cp,self.alternatevariable,i)
quest.removeQuest(self.cp,self.var_to_set,-1)
import VS
import launch
L = launch.Launch()
L.fg="Escorts"
L.faction=self.alternatefactions[i]
L.dynfg=self.alternateflightgroups[i]
L.type=self.alternatetypes[i]
L.ai="default"
L.num=6
L.minradius=3000.0
L.maxradius=4000.0
try:
L.minradius*=faction_ships.launch_distance_factor
L.minradius*=faction_ships.launch_distance_factor
except:
pass
launched=L.launch(self.escortee)
self.escortee.setFgDirective('f')
self.escortee.setFlightgroupLeader(launched)
self.escortee.setFactionName(self.alternatefactions[i])
import universe
universe.greet(self.alternategreeting[i],launched,VS.getPlayerX(self.cp))
VS.terminateMission(1)
return
| gpl-2.0 | 8,869,745,354,992,858,000 | 49.86 | 267 | 0.585529 | false |
goodmami/pydelphin | tests/mrs_eds_test.py | 1 | 11908 |
import pytest
from delphin.mrs import simplemrs, eds
from delphin.mrs.components import Node, Pred, Lnk
from delphin.mrs.config import CVARSORT
# empty
empty = simplemrs.loads_one('''[ ]''')
# "It rains."
it_rains = simplemrs.loads_one('''
[ LTOP: h0
INDEX: e2 [ e SF: prop TENSE: pres MOOD: indicative PROG: - PERF: - ]
RELS: < [ "_rain_v_1_rel"<3:9> LBL: h1 ARG0: e2 ] >
HCONS: < h0 qeq h1 > ]
''')
dogs_chase_Kim = simplemrs.loads_one('''
[ LTOP: h0
INDEX: e2 [ e SF: prop TENSE: pres MOOD: indicative PROG: - PERF: - ]
RELS: < [ udef_q_rel<0:4> LBL: h4 ARG0: x3 RSTR: h5 BODY: h6 ]
[ _dog_n_1_rel<0:4> LBL: h7 ARG0: x3 [ x PERS: 3 NUM: pl IND: + ] ]
[ _chase_v_1_rel<5:10> LBL: h1 ARG0: e2 ARG1: x3 ARG2: x8 ]
[ proper_q<11:15> LBL: h9 ARG0: x8 RSTR: h10 BODY: h11 ]
[ named_rel<11:15> LBL: h12 ARG0: x8 [ x PERS: 3 NUM: sg IND: + ] CARG: "Kim" ] >
HCONS: < h0 qeq h1 h5 qeq h7 h10 qeq h12 > ]
''')
# example from Jacy with duplicate ARG0s
kotaenakatta = simplemrs.loads_one('''
[ TOP: h0
INDEX: e2 [ e TENSE: past MOOD: indicative PROG: - PERF: - ASPECT: default_aspect PASS: - SF: prop ]
RELS: < [ "_kotaeru_v_3_rel"<0:2> LBL: h4 ARG0: e2 ARG1: i3 ]
[ "_neg_v_rel"<3:6> LBL: h1 ARG0: e2 ARG1: h5 ] >
HCONS: < h0 qeq h1 h5 qeq h4 > ]
'''
)
nearly_every_dog_barked = simplemrs.loads_one('''
[ LTOP: h0
INDEX: e2 [ e SF: prop TENSE: past MOOD: indicative PROG: - PERF: - ]
RELS: < [ _nearly_x_deg_rel<0:6> LBL: h4 ARG0: e5 [ e SF: prop TENSE: untensed MOOD: indicative PROG: - PERF: - ] ARG1: u6 ]
[ _every_q_rel<7:12> LBL: h4 ARG0: x3 RSTR: h7 BODY: h8 ]
[ _dog_n_1_rel<13:16> LBL: h9 ARG0: x3 [ x PERS: 3 NUM: sg IND: + ] ]
[ _bark_v_1_rel<17:24> LBL: h1 ARG0: e2 ARG1: x3 ] >
HCONS: < h0 qeq h1 h7 qeq h9 > ]
'''
)
# ltop different from index
kim_probably_sleeps = simplemrs.loads_one('''
[ LTOP: h0
INDEX: e2 [ e SF: prop TENSE: pres MOOD: indicative PROG: - PERF: - ]
RELS: < [ proper_q<0:3> LBL: h4 ARG0: x3 [ x PERS: 3 NUM: sg IND: + ] RSTR: h5 BODY: h6 ]
[ named<0:3> LBL: h7 CARG: "Kim" ARG0: x3 ]
[ _probable_a_1<4:12> LBL: h1 ARG0: e9 [ e SF: prop TENSE: untensed MOOD: indicative PROG: - PERF: - ] ARG1: h10 ]
[ _sleep_v_1<13:20> LBL: h11 ARG0: e2 ARG1: x3 ] >
HCONS: < h0 qeq h1 h5 qeq h7 h10 qeq h11 > ]
''')
@pytest.fixture
def eds_empty():
return eds.Eds()
@pytest.fixture
def eds_it_rains():
return eds.Eds(
top='e2',
nodes=[
Node(
'e2',
Pred.surface('"_rain_v_1_rel"'),
sortinfo={
'SF': 'prop', 'TENSE': 'pres', 'MOOD': 'indicative',
'PROG': '-', 'PERF': '-', CVARSORT: 'e'},
lnk=Lnk.charspan(3, 9)
)
],
edges=[]
)
@pytest.fixture
def eds_dogs_chase_Kim():
return eds.Eds(
top='e2',
nodes=[
Node('_1', Pred.surface('udef_q_rel')),
Node('x4', Pred.surface('"_dog_n_1_rel"')),
Node('e2', Pred.surface('"_chase_v_1_rel"')),
Node('_2', Pred.surface('proper_q_rel')),
Node('x6', Pred.surface('named_rel'), carg='Kim')
],
edges=[
('_1', 'BV', 'x4'),
('_2', 'BV', 'x6'),
('e2', 'ARG1', 'x4'),
('e2', 'ARG2', 'x6')
]
)
@pytest.fixture
def eds_kim_probably_sleeps():
return eds.Eds(
top='e9',
nodes=[
Node('_1', Pred.surface('proper_q_rel')),
Node('x3', Pred.surface('named_rel'), carg='Kim'),
Node('e9', Pred.surface('_probable_a_1_rel')),
Node('e2', Pred.surface('_sleep_v_1_rel')),
],
edges=[
('_1', 'BV', 'x3'),
('e9', 'ARG1', 'e2'),
('e2', 'ARG1', 'x3')
]
)
class TestEds(object):
def test_init(self, eds_empty, eds_it_rains, eds_dogs_chase_Kim, eds_kim_probably_sleeps):
assert eds_empty.top is None
assert len(eds_empty.nodes()) == 0
assert eds_it_rains.top == 'e2'
assert len(eds_it_rains.nodes()) == 1
assert eds_it_rains.node('e2').pred == '"_rain_v_1_rel"'
assert len(eds_it_rains.edges('e2')) == 0
assert eds_dogs_chase_Kim.top == 'e2'
assert len(eds_dogs_chase_Kim.nodes()) == 5
assert eds_dogs_chase_Kim.nodeids() == ['_1', 'x4', 'e2', '_2', 'x6']
assert eds_dogs_chase_Kim.node('e2').pred == '"_chase_v_1_rel"'
assert eds_dogs_chase_Kim.edges('e2') == {'ARG1': 'x4', 'ARG2': 'x6'}
assert eds_dogs_chase_Kim.node('x6').carg == 'Kim'
assert eds_kim_probably_sleeps.top == 'e9'
assert len(eds_kim_probably_sleeps.nodes()) == 4
assert eds_kim_probably_sleeps.nodeids() == ['_1', 'x3', 'e9', 'e2']
assert eds_kim_probably_sleeps.node('e2').pred == '"_sleep_v_1_rel"'
assert eds_kim_probably_sleeps.edges('e2') == {'ARG1': 'x3'}
assert eds_kim_probably_sleeps.node('x3').carg == 'Kim'
def test_to_dict(self, eds_empty, eds_it_rains, eds_dogs_chase_Kim, eds_kim_probably_sleeps):
assert eds_empty.to_dict() == {'top': None, 'nodes': {}}
assert eds_it_rains.to_dict() == {
'top': 'e2',
'nodes': {
'e2': {
'label': '_rain_v_1',
'lnk': {'from': 3, 'to': 9},
'properties': {
'SF': 'prop', 'TENSE': 'pres', 'MOOD': 'indicative',
'PROG': '-', 'PERF': '-'
},
'type': 'e',
'edges': {}
}
}
}
assert eds_it_rains.to_dict(properties=False) == {
'top': 'e2',
'nodes': {
'e2': {
'label': '_rain_v_1',
'lnk': {'from': 3, 'to': 9},
'edges': {}
}
}
}
assert eds_dogs_chase_Kim.to_dict() == {
'top': 'e2',
'nodes': {
'_1': {'label': 'udef_q', 'edges': {'BV': 'x4'}},
'x4': {'label': '_dog_n_1', 'edges': {}},
'e2': {'label': '_chase_v_1',
'edges': {'ARG1': 'x4', 'ARG2': 'x6'}},
'_2': {'label': 'proper_q', 'edges': {'BV': 'x6'}},
'x6': {'label': 'named', 'edges': {}, 'carg': 'Kim'}
}
}
assert eds_kim_probably_sleeps.to_dict() == {
'top': 'e9',
'nodes': {
'_1': {'label': 'proper_q', 'edges': {'BV': 'x3'}},
'x3': {'label': 'named', 'edges': {}, 'carg': 'Kim'},
'e9': {'label': '_probable_a_1', 'edges': {'ARG1': 'e2'}},
'e2': {'label': '_sleep_v_1', 'edges': {'ARG1': 'x3'}},
}
}
def test_deserialize():
e = eds.loads_one('{}')
assert e.top is None
assert len(e.nodes()) == 0
e = eds.loads_one('{:}')
assert e.top is None
assert len(e.nodes()) == 0
e = eds.loads_one('{e2: e2:_rain_v_1<3:9>[]}')
assert e.top == 'e2'
assert len(e.nodes()) == 1
assert e.nodes()[0].pred == '_rain_v_1_rel'
e = eds.loads_one('{: e2:_rain_v_1<3:9>[]}')
assert e.top is None
assert len(e.nodes()) == 1
assert e.nodes()[0].pred == '_rain_v_1_rel'
e = eds.loads_one(
'{e2:\n'
' e2:_rain_v_1<3:9>{e SF prop, TENSE pres}[]\n'
'}'
)
assert e.top == 'e2'
assert len(e.nodes()) == 1
assert e.nodes()[0].properties == {'SF': 'prop', 'TENSE': 'pres'}
e = eds.loads_one(
'{e2: (fragmented)\n'
'|e5:_nearly_x_deg<0:6>[]\n'
' _1:_every_q<7:12>[BV x3]\n'
' x3:_dog_n_1<13:16>[]\n'
' e2:_bark_v_1<17:24>[ARG1 x3]\n'
'}'
)
assert e.top == 'e2'
assert len(e.nodes()) == 4
# GitHub issue #203
# _thing_n_of-about was tripping up the parser due to the hyphen,
# and the empty property list of _business_n_1 does not have a space
# before } (without the space is better, I think)
e = eds.loads_one(
'{e3:\n'
' _1:udef_q<0:35>[BV x6]\n'
' e9:_successful_a_1<0:10>{e SF prop, TENSE untensed, MOOD indicative, PROG -, PERF -}[ARG1 x6]\n'
' e10:_american_a_1<11:19>{e SF prop, TENSE untensed, MOOD indicative, PROG -, PERF -}[ARG1 x6]\n'
' e12:compound<20:35>{e SF prop, TENSE untensed, MOOD indicative, PROG -, PERF -}[ARG1 x6, ARG2 x11]\n'
' _2:udef_q<20:28>[BV x11]\n'
' x11:_business_n_1<20:28>{x}[]\n'
' x6:_owner_n_of<29:35>{x PERS 3, NUM pl, IND +}[]\n'
' e3:_do_v_1<36:38>{e SF prop, TENSE pres, MOOD indicative, PROG -, PERF -}[ARG1 x6, ARG2 x18]\n'
' _3:_the_q<39:42>[BV x18]\n'
' e23:_same_a_as<43:47>{e SF prop, TENSE untensed, MOOD indicative, PROG -, PERF -}[ARG1 x18]\n'
' e25:comp_equal<43:47>{e SF prop, TENSE untensed, MOOD indicative, PROG -, PERF -}[ARG1 e23]\n'
' x18:_thing_n_of-about<48:54>{x PERS 3, NUM sg, IND +}[]\n'
'}'
)
assert e.top == 'e3'
assert len(e.nodes()) == 12
assert e.nodes()[5].properties == {}
assert e.nodes()[11].pred == '_thing_n_of-about'
def test_serialize():
assert eds.dumps_one(empty, pretty_print=False) == '{:}'
assert eds.dumps_one(empty, pretty_print=True) == '{:\n}'
assert eds.dumps_one(empty) == '{:\n}' # default pretty-print
assert eds.dumps_one(it_rains) == (
'{e2:\n'
' e2:_rain_v_1<3:9>[]\n'
'}'
)
assert eds.dumps_one(it_rains, properties=True) == (
'{e2:\n'
' e2:_rain_v_1<3:9>{e MOOD indicative, PERF -, PROG -, SF prop, TENSE pres}[]\n'
'}'
)
assert eds.dumps_one(dogs_chase_Kim) == (
'{e2:\n'
' _1:udef_q<0:4>[BV x3]\n'
' x3:_dog_n_1<0:4>[]\n'
' e2:_chase_v_1<5:10>[ARG1 x3, ARG2 x8]\n'
' _2:proper_q<11:15>[BV x8]\n'
' x8:named<11:15>("Kim")[]\n'
'}'
)
assert eds.dumps_one(kotaenakatta) == (
'{_1:\n'
' e2:_kotaeru_v_3<0:2>[]\n'
' _1:_neg_v<3:6>[ARG1 e2]\n'
'}'
)
assert eds.dumps_one(nearly_every_dog_barked) == (
'{e2:\n'
' e5:_nearly_x_deg<0:6>[]\n'
' _1:_every_q<7:12>[BV x3]\n'
' x3:_dog_n_1<13:16>[]\n'
' e2:_bark_v_1<17:24>[ARG1 x3]\n'
'}'
)
assert eds.dumps_one(nearly_every_dog_barked, show_status=True) == (
'{e2: (fragmented)\n'
'|e5:_nearly_x_deg<0:6>[]\n'
' _1:_every_q<7:12>[BV x3]\n'
' x3:_dog_n_1<13:16>[]\n'
' e2:_bark_v_1<17:24>[ARG1 x3]\n'
'}'
)
assert eds.dumps_one(nearly_every_dog_barked, predicate_modifiers=True) == (
'{e2:\n'
' e5:_nearly_x_deg<0:6>[ARG1 _1]\n'
' _1:_every_q<7:12>[BV x3]\n'
' x3:_dog_n_1<13:16>[]\n'
' e2:_bark_v_1<17:24>[ARG1 x3]\n'
'}'
)
assert eds.dumps_one(nearly_every_dog_barked,
predicate_modifiers=eds.non_argument_modifiers(role='MOD')) == (
'{e2:\n'
' e5:_nearly_x_deg<0:6>[MOD _1]\n'
' _1:_every_q<7:12>[BV x3]\n'
' x3:_dog_n_1<13:16>[]\n'
' e2:_bark_v_1<17:24>[ARG1 x3]\n'
'}'
)
assert eds.dumps_one(kim_probably_sleeps) == (
'{e9:\n'
' _1:proper_q<0:3>[BV x3]\n'
' x3:named<0:3>("Kim")[]\n'
' e9:_probable_a_1<4:12>[ARG1 e2]\n'
' e2:_sleep_v_1<13:20>[ARG1 x3]\n'
'}'
)
def test_serialize_list():
assert eds.dumps([it_rains, it_rains]) == (
'{e2:\n'
' e2:_rain_v_1<3:9>[]\n'
'}\n'
'{e2:\n'
' e2:_rain_v_1<3:9>[]\n'
'}'
)
assert eds.dumps([it_rains, it_rains], pretty_print=False) == (
'{e2: e2:_rain_v_1<3:9>[]} {e2: e2:_rain_v_1<3:9>[]}'
)
| mit | -8,983,562,707,083,588,000 | 32.925926 | 126 | 0.477578 | false |
faneshion/MatchZoo | matchzoo/auto/tuner/callbacks/load_embedding_matrix.py | 1 | 1541 | from matchzoo.engine.base_model import BaseModel
from matchzoo.auto.tuner.callbacks.callback import Callback
class LoadEmbeddingMatrix(Callback):
"""
Load a pre-trained embedding after the model is built.
Used with tuner to load a pre-trained embedding matrix for each newly built
model instance.
:param embedding_matrix: Embedding matrix to load.
Example:
>>> import matchzoo as mz
>>> model = mz.models.ArcI()
>>> prpr = model.get_default_preprocessor()
>>> data = mz.datasets.toy.load_data()
>>> data = prpr.fit_transform(data, verbose=0)
>>> embed = mz.datasets.toy.load_embedding()
>>> term_index = prpr.context['vocab_unit'].state['term_index']
>>> matrix = embed.build_matrix(term_index)
>>> callback = mz.auto.tuner.callbacks.LoadEmbeddingMatrix(matrix)
>>> model.params.update(prpr.context)
>>> model.params['task'] = mz.tasks.Ranking()
>>> model.params['embedding_output_dim'] = embed.output_dim
>>> result = mz.auto.tune(
... params=model.params,
... train_data=data,
... test_data=data,
... num_runs=1,
... callbacks=[callback],
... verbose=0
... )
"""
def __init__(self, embedding_matrix):
"""Init."""
self._embedding_matrix = embedding_matrix
def on_build_end(self, tuner, model: BaseModel):
"""`on_build_end`."""
model.load_embedding_matrix(self._embedding_matrix)
| apache-2.0 | -8,370,876,919,457,573,000 | 33.244444 | 79 | 0.595717 | false |
credativUK/vdirsyncer | vdirsyncer/sync.py | 1 | 11250 | # -*- coding: utf-8 -*-
'''
The function in `vdirsyncer.sync` can be called on two instances of `Storage`
to synchronize them. Due to the abstract API storage classes are implementing,
the two given instances don't have to be of the same exact type. This allows us
not only to synchronize a local vdir with a CalDAV server, but also synchronize
two CalDAV servers or two local vdirs.
The algorithm is based on the blogpost "How OfflineIMAP works" by Edward Z.
Yang. http://blog.ezyang.com/2012/08/how-offlineimap-works/
'''
import itertools
from . import exceptions, log
from .utils import uniq
from .utils.compat import iteritems, text_type
sync_logger = log.get(__name__)
class SyncError(exceptions.Error):
'''Errors related to synchronization.'''
class SyncConflict(SyncError):
'''
Two items changed since the last sync, they now have different contents and
no conflict resolution method was given.
:param ident: The ident of the item.
:param href_a: The item's href on side A.
:param href_b: The item's href on side B.
'''
ident = None
href_a = None
href_b = None
class IdentConflict(SyncError):
'''
Multiple items on the same storage have the same UID.
:param storage: The affected storage.
:param hrefs: List of affected hrefs on `storage`.
'''
storage = None
_hrefs = None
@property
def hrefs(self):
return self._hrefs
@hrefs.setter
def hrefs(self, val):
val = set(val)
assert len(val) > 1
self._hrefs = val
class StorageEmpty(SyncError):
'''
One storage unexpectedly got completely empty between two synchronizations.
The first argument is the empty storage.
:param empty_storage: The empty
:py:class:`vdirsyncer.storage.base.Storage`.
'''
empty_storage = None
class BothReadOnly(SyncError):
'''
Both storages are marked as read-only. Synchronization is therefore not
possible.
'''
class StorageInfo(object):
'''A wrapper class that holds prefetched items, the status and other
things.'''
def __init__(self, storage, status):
'''
:param status: {ident: (href, etag)}
'''
self.storage = storage
self.status = status
self.idents = None
def prepare_idents(self, other_read_only):
href_to_status = dict((href, (ident, etag))
for ident, (href, etag)
in iteritems(self.status))
hrefs_to_download = []
self.idents = {}
for href, etag in self.storage.list():
if href in href_to_status:
ident, old_etag = href_to_status[href]
self.idents[ident] = {
'etag': etag,
'href': href,
'ident': ident
}
if etag != old_etag and not other_read_only:
hrefs_to_download.append(href)
else:
hrefs_to_download.append(href)
# Prefetch items
for href, item, etag in (self.storage.get_multi(hrefs_to_download) if
hrefs_to_download else ()):
props = self.idents.setdefault(item.ident, {})
props['item'] = item
props['ident'] = item.ident
if props.setdefault('href', href) != href:
raise IdentConflict(storage=self.storage,
hrefs=[props['href'], href])
if props.setdefault('etag', etag) != etag:
raise SyncError('Etag changed during sync.')
def sync(storage_a, storage_b, status, conflict_resolution=None,
force_delete=False):
'''Synchronizes two storages.
:param storage_a: The first storage
:type storage_a: :class:`vdirsyncer.storage.base.Storage`
:param storage_b: The second storage
:type storage_b: :class:`vdirsyncer.storage.base.Storage`
:param status: {ident: (href_a, etag_a, href_b, etag_b)}
metadata about the two storages for detection of changes. Will be
modified by the function and should be passed to it at the next sync.
If this is the first sync, an empty dictionary should be provided.
:param conflict_resolution: Either 'a wins' or 'b wins'. If none is
provided, the sync function will raise
:py:exc:`SyncConflict`.
:param force_delete: When one storage got completely emptied between two
syncs, :py:exc:`StorageEmpty` is raised for
safety. Setting this parameter to ``True`` disables this safety
measure.
'''
if storage_a.read_only and storage_b.read_only:
raise BothReadOnly()
a_info = StorageInfo(storage_a, dict(
(ident, (href_a, etag_a))
for ident, (href_a, etag_a, href_b, etag_b) in iteritems(status)
))
b_info = StorageInfo(storage_b, dict(
(ident, (href_b, etag_b))
for ident, (href_a, etag_a, href_b, etag_b) in iteritems(status)
))
a_info.prepare_idents(storage_b.read_only)
b_info.prepare_idents(storage_a.read_only)
if bool(a_info.idents) != bool(b_info.idents) \
and status and not force_delete:
raise StorageEmpty(
empty_storage=(storage_b if a_info.idents else storage_a))
actions = list(_get_actions(a_info, b_info))
with storage_a.at_once():
with storage_b.at_once():
for action in actions:
action(a_info, b_info, conflict_resolution)
status.clear()
for ident in uniq(itertools.chain(a_info.status, b_info.status)):
href_a, etag_a = a_info.status[ident]
href_b, etag_b = b_info.status[ident]
status[ident] = href_a, etag_a, href_b, etag_b
def _action_upload(ident, source, dest):
def inner(a, b, conflict_resolution):
sync_logger.info('Copying (uploading) item {0} to {1}'
.format(ident, dest.storage))
source_meta = source.idents[ident]
if dest.storage.read_only:
sync_logger.warning('{dest} is read-only. Skipping update...'
.format(dest=dest.storage))
dest_href = dest_etag = None
else:
item = source_meta['item']
dest_href, dest_etag = dest.storage.upload(item)
source.status[ident] = source_meta['href'], source_meta['etag']
dest.status[ident] = dest_href, dest_etag
return inner
def _action_update(ident, source, dest):
def inner(a, b, conflict_resolution):
sync_logger.info('Copying (updating) item {0} to {1}'
.format(ident, dest.storage))
source_meta = source.idents[ident]
if dest.storage.read_only:
sync_logger.info('{dest} is read-only. Skipping update...'
.format(dest=dest.storage))
dest_href = dest_etag = None
else:
dest_meta = dest.idents[ident]
dest_href = dest_meta['href']
dest_etag = dest.storage.update(dest_href, source_meta['item'],
dest_meta['etag'])
assert isinstance(dest_etag, (bytes, text_type))
source.status[ident] = source_meta['href'], source_meta['etag']
dest.status[ident] = dest_href, dest_etag
return inner
def _action_delete(ident, info):
storage = info.storage
idents = info.idents
def inner(a, b, conflict_resolution):
sync_logger.info('Deleting item {0} from {1}'.format(ident, storage))
if storage.read_only:
sync_logger.warning('{0} is read-only, skipping deletion...'
.format(storage))
else:
meta = idents[ident]
etag = meta['etag']
href = meta['href']
storage.delete(href, etag)
del a.status[ident]
del b.status[ident]
return inner
def _action_delete_status(ident):
def inner(a, b, conflict_resolution):
sync_logger.info('Deleting status info for nonexisting item {0}'
.format(ident))
del a.status[ident]
del b.status[ident]
return inner
def _action_conflict_resolve(ident):
def inner(a, b, conflict_resolution):
sync_logger.info('Doing conflict resolution for item {0}...'
.format(ident))
meta_a = a.idents[ident]
meta_b = b.idents[ident]
if meta_a['item'].raw == meta_b['item'].raw:
sync_logger.info('...same content on both sides.')
a.status[ident] = meta_a['href'], meta_a['etag']
b.status[ident] = meta_b['href'], meta_b['etag']
elif conflict_resolution is None:
raise SyncConflict(ident=ident, href_a=meta_a['href'],
href_b=meta_b['href'])
elif conflict_resolution == 'a wins':
sync_logger.info('...{0} wins.'.format(a.storage))
_action_update(ident, a, b)(a, b, conflict_resolution)
elif conflict_resolution == 'b wins':
sync_logger.info('...{0} wins.'.format(b.storage))
_action_update(ident, b, a)(a, b, conflict_resolution)
else:
raise ValueError('Invalid conflict resolution mode: {0}'
.format(conflict_resolution))
return inner
def _get_actions(a_info, b_info):
for ident in uniq(itertools.chain(a_info.idents, b_info.idents,
a_info.status)):
a = a_info.idents.get(ident, None)
b = b_info.idents.get(ident, None)
assert not a or a['etag'] is not None
assert not b or b['etag'] is not None
_, status_etag_a = a_info.status.get(ident, (None, None))
_, status_etag_b = b_info.status.get(ident, (None, None))
if a and b:
if a['etag'] != status_etag_a and b['etag'] != status_etag_b:
# item was modified on both sides
# OR: missing status
yield _action_conflict_resolve(ident)
elif a['etag'] != status_etag_a:
# item was only modified in a
yield _action_update(ident, a_info, b_info)
elif b['etag'] != status_etag_b:
# item was only modified in b
yield _action_update(ident, b_info, a_info)
elif a and not b:
if a['etag'] != status_etag_a:
# was deleted from b but modified on a
# OR: new item was created in a
yield _action_upload(ident, a_info, b_info)
else:
# was deleted from b and not modified on a
yield _action_delete(ident, a_info)
elif not a and b:
if b['etag'] != status_etag_b:
# was deleted from a but modified on b
# OR: new item was created in b
yield _action_upload(ident, b_info, a_info)
else:
# was deleted from a and not changed on b
yield _action_delete(ident, b_info)
elif not a and not b:
# was deleted from a and b, clean up status
yield _action_delete_status(ident)
| mit | -7,033,694,881,106,067,000 | 33.829721 | 79 | 0.573778 | false |
cgqyh/pyalgotrade-mod | pyalgotrade/optimizer/server.py | 1 | 8087 | # PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
import SimpleXMLRPCServer
import threading
import time
import pickle
import pyalgotrade.logger
class AutoStopThread(threading.Thread):
def __init__(self, server):
threading.Thread.__init__(self)
self.__server = server
def run(self):
while self.__server.jobsPending():
time.sleep(1)
self.__server.stop()
class Results(object):
"""The results of the strategy executions."""
def __init__(self, parameters, result):
self.__parameters = parameters
self.__result = result
def getParameters(self):
"""Returns a sequence of parameter values."""
return self.__parameters
def getResult(self):
"""Returns the result for a given set of parameters."""
return self.__result
class Job(object):
def __init__(self, strategyParameters):
self.__strategyParameters = strategyParameters
self.__bestResult = None
self.__bestParameters = None
self.__id = id(self)
def getId(self):
return self.__id
def getNextParameters(self):
ret = None
if len(self.__strategyParameters):
ret = self.__strategyParameters.pop()
return ret
def getBestParameters(self):
return self.__bestParameters
def getBestResult(self):
return self.__bestResult
def getBestWorkerName(self):
return self.__bestWorkerName
def setBestResult(self, result, parameters, workerName):
self.__bestResult = result
self.__bestParameters = parameters
self.__bestWorkerName = workerName
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
rpc_paths = ('/PyAlgoTradeRPC',)
class Server(SimpleXMLRPCServer.SimpleXMLRPCServer):
defaultBatchSize = 200
def __init__(self, address, port, autoStop=True):
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, (address, port), requestHandler=RequestHandler, logRequests=False, allow_none=True)
self.__instrumentsAndBars = None # Pickle'd instruments and bars for faster retrieval.
self.__barsFreq = None
self.__activeJobs = {}
self.__activeJobsLock = threading.Lock()
self.__parametersLock = threading.Lock()
self.__bestJob = None
self.__parametersIterator = None
self.__logger = pyalgotrade.logger.getLogger("server")
if autoStop:
self.__autoStopThread = AutoStopThread(self)
else:
self.__autoStopThread = None
self.register_introspection_functions()
self.register_function(self.getInstrumentsAndBars, 'getInstrumentsAndBars')
self.register_function(self.getBarsFrequency, 'getBarsFrequency')
self.register_function(self.getNextJob, 'getNextJob')
self.register_function(self.pushJobResults, 'pushJobResults')
self.__forcedStop = False
def __getNextParams(self):
ret = []
# Get the next set of parameters.
with self.__parametersLock:
if self.__parametersIterator is not None:
try:
for i in xrange(Server.defaultBatchSize):
ret.append(self.__parametersIterator.next())
except StopIteration:
self.__parametersIterator = None
return ret
def getLogger(self):
return self.__logger
def getInstrumentsAndBars(self):
return self.__instrumentsAndBars
def getBarsFrequency(self):
return str(self.__barsFreq)
def getBestJob(self):
return self.__bestJob
def getNextJob(self):
ret = None
params = []
# Get the next set of parameters.
params = self.__getNextParams()
# Map the active job
if len(params):
ret = Job(params)
with self.__activeJobsLock:
self.__activeJobs[ret.getId()] = ret
return pickle.dumps(ret)
def jobsPending(self):
if self.__forcedStop:
return False
with self.__parametersLock:
jobsPending = self.__parametersIterator is not None
with self.__activeJobsLock:
activeJobs = len(self.__activeJobs) > 0
return jobsPending or activeJobs
def pushJobResults(self, jobId, result, parameters, workerName):
jobId = pickle.loads(jobId)
result = pickle.loads(result)
parameters = pickle.loads(parameters)
workerName = pickle.loads(workerName)
job = None
# Get the active job and remove the mapping.
with self.__activeJobsLock:
try:
job = self.__activeJobs[jobId]
del self.__activeJobs[jobId]
except KeyError:
# The job's results were already submitted.
return
# Save the job with the best result
if self.__bestJob is None or result > self.__bestJob.getBestResult():
job.setBestResult(result, parameters, workerName)
self.__bestJob = job
self.getLogger().info("Partial result %s with parameters: %s from %s" % (result, parameters, workerName))
def stop(self):
self.shutdown()
def serve(self, barFeed, strategyParameters):
ret = None
try:
# Initialize instruments, bars and parameters.
self.getLogger().info("Loading bars")
loadedBars = []
for dateTime, bars in barFeed:
loadedBars.append(bars)
instruments = barFeed.getRegisteredInstruments()
self.__instrumentsAndBars = pickle.dumps((instruments, loadedBars))
self.__barsFreq = barFeed.getFrequency()
self.__parametersIterator = iter(strategyParameters)
if self.__autoStopThread:
self.__autoStopThread.start()
self.getLogger().info("Waiting for workers")
self.serve_forever()
if self.__autoStopThread:
self.__autoStopThread.join()
# Show the best result.
bestJob = self.getBestJob()
if bestJob:
self.getLogger().info("Best final result %s with parameters: %s from client %s" % (bestJob.getBestResult(), bestJob.getBestParameters(), bestJob.getBestWorkerName()))
ret = Results(bestJob.getBestParameters(), bestJob.getBestResult())
else:
self.getLogger().error("No jobs processed")
finally:
self.__forcedStop = True
return ret
def serve(barFeed, strategyParameters, address, port):
"""Executes a server that will provide bars and strategy parameters for workers to use.
:param barFeed: The bar feed that each worker will use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param address: The address to listen for incoming worker connections.
:type address: string.
:param port: The port to listen for incoming worker connections.
:type port: int.
:rtype: A :class:`Results` instance with the best results found.
"""
s = Server(address, port)
return s.serve(barFeed, strategyParameters)
| apache-2.0 | -4,952,820,957,967,452,000 | 32.556017 | 182 | 0.634599 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.