ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a437d8f3dc03a3a3b76761cbe27ac86deae6655
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import re import unittest from unittest.mock import MagicMock, PropertyMock, patch from airflow.providers.apache.hive.transfers.hive_to_mysql import HiveToMySqlOperator from airflow.utils import timezone from airflow.utils.operator_helpers import context_to_airflow_vars from tests.providers.apache.hive import TestHiveEnvironment from tests.test_utils.mock_hooks import MockHiveServer2Hook, MockMySqlHook DEFAULT_DATE = timezone.datetime(2015, 1, 1) class TestHiveToMySqlTransfer(TestHiveEnvironment): def setUp(self): self.kwargs = dict( sql='sql', mysql_table='table', hiveserver2_conn_id='hiveserver2_default', mysql_conn_id='mysql_default', task_id='test_hive_to_mysql', ) super().setUp() @patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook') @patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook') def test_execute(self, mock_hive_hook, mock_mysql_hook): HiveToMySqlOperator(**self.kwargs).execute(context={}) mock_hive_hook.assert_called_once_with(hiveserver2_conn_id=self.kwargs['hiveserver2_conn_id']) mock_hive_hook.return_value.get_records.assert_called_once_with('sql', hive_conf={}) mock_mysql_hook.assert_called_once_with(mysql_conn_id=self.kwargs['mysql_conn_id']) mock_mysql_hook.return_value.insert_rows.assert_called_once_with( table=self.kwargs['mysql_table'], rows=mock_hive_hook.return_value.get_records.return_value ) @patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook') @patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook') def test_execute_mysql_preoperator(self, mock_hive_hook, mock_mysql_hook): self.kwargs.update(dict(mysql_preoperator='preoperator')) HiveToMySqlOperator(**self.kwargs).execute(context={}) mock_mysql_hook.return_value.run.assert_called_once_with(self.kwargs['mysql_preoperator']) @patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook') @patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook') def test_execute_with_mysql_postoperator(self, mock_hive_hook, mock_mysql_hook): self.kwargs.update(dict(mysql_postoperator='postoperator')) HiveToMySqlOperator(**self.kwargs).execute(context={}) mock_mysql_hook.return_value.run.assert_called_once_with(self.kwargs['mysql_postoperator']) @patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook') @patch('airflow.providers.apache.hive.transfers.hive_to_mysql.NamedTemporaryFile') @patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook') def test_execute_bulk_load(self, mock_hive_hook, mock_tmp_file, mock_mysql_hook): type(mock_tmp_file).name = PropertyMock(return_value='tmp_file') context = {} self.kwargs.update(dict(bulk_load=True)) HiveToMySqlOperator(**self.kwargs).execute(context=context) mock_tmp_file.assert_called_once_with() mock_hive_hook.return_value.to_csv.assert_called_once_with( self.kwargs['sql'], mock_tmp_file.return_value.name, delimiter='\t', lineterminator='\n', output_header=False, hive_conf=context_to_airflow_vars(context) ) mock_mysql_hook.return_value.bulk_load.assert_called_once_with( table=self.kwargs['mysql_table'], tmp_file=mock_tmp_file.return_value.name ) mock_tmp_file.return_value.close.assert_called_once_with() @patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook') def test_execute_with_hive_conf(self, mock_mysql_hook): context = {} mock_hive_hook = MockHiveServer2Hook() mock_hive_hook.get_records = MagicMock(return_value='test_hive_results') self.kwargs.update(dict(hive_conf={'mapreduce.job.queuename': 'fake_queue'})) with patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook', return_value=mock_hive_hook): HiveToMySqlOperator(**self.kwargs).execute(context=context) hive_conf = context_to_airflow_vars(context) hive_conf.update(self.kwargs['hive_conf']) mock_hive_hook.get_records.assert_called_once_with( self.kwargs['sql'], hive_conf=hive_conf ) @unittest.skipIf( 'AIRFLOW_RUNALL_TESTS' not in os.environ, "Skipped because AIRFLOW_RUNALL_TESTS is not set") def test_hive_to_mysql(self): test_hive_results = 'test_hive_results' mock_hive_hook = MockHiveServer2Hook() mock_hive_hook.get_records = MagicMock(return_value=test_hive_results) mock_mysql_hook = MockMySqlHook() mock_mysql_hook.run = MagicMock() mock_mysql_hook.insert_rows = MagicMock() with patch('airflow.providers.apache.hive.transfers.hive_to_mysql.HiveServer2Hook', return_value=mock_hive_hook): with patch('airflow.providers.apache.hive.transfers.hive_to_mysql.MySqlHook', return_value=mock_mysql_hook): op = HiveToMySqlOperator( mysql_conn_id='airflow_db', task_id='hive_to_mysql_check', sql=""" SELECT name FROM airflow.static_babynames LIMIT 100 """, mysql_table='test_static_babynames', mysql_preoperator=[ 'DROP TABLE IF EXISTS test_static_babynames;', 'CREATE TABLE test_static_babynames (name VARCHAR(500))', ], dag=self.dag) op.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) raw_select_name_query = mock_hive_hook.get_records.call_args_list[0][0][0] actual_select_name_query = re.sub(r'\s{2,}', ' ', raw_select_name_query).strip() expected_select_name_query = 'SELECT name FROM airflow.static_babynames LIMIT 100' self.assertEqual(expected_select_name_query, actual_select_name_query) actual_hive_conf = mock_hive_hook.get_records.call_args_list[0][1]['hive_conf'] expected_hive_conf = {'airflow.ctx.dag_owner': 'airflow', 'airflow.ctx.dag_id': 'test_dag_id', 'airflow.ctx.task_id': 'hive_to_mysql_check', 'airflow.ctx.execution_date': '2015-01-01T00:00:00+00:00'} self.assertEqual(expected_hive_conf, actual_hive_conf) expected_mysql_preoperator = ['DROP TABLE IF EXISTS test_static_babynames;', 'CREATE TABLE test_static_babynames (name VARCHAR(500))'] mock_mysql_hook.run.assert_called_with(expected_mysql_preoperator) mock_mysql_hook.insert_rows.assert_called_with(table='test_static_babynames', rows=test_hive_results)
py
1a437fe5c8c23b416fa462eef6819c511eb242d8
# https://my.slack.com/apps/new/A0F7YS25R-bots API_TOKEN = "<your-api-token>" DEFAULT_REPLY = "Sorry but I didn't understand you" ERRORS_TO = "sandbox-foosbot"
py
1a438019b514627b3d84aafa94d7271108507509
import scrapy class SpiderSauraus(scrapy.Spider): name = 'spidersauraus' start_urls = ['https://en.wikipedia.org/wiki/List_of_dinosaur_genera'] def parse(self, response): filename = 'dinosaurs.txt' dinos = set() count = 0 with open(filename, 'w') as f: for dino in response.css('ul>li'): dino_name = dino.css('i > a ::text').extract_first() if dino_name != None: dinos.add(dino_name) if (count+1) == len(dinos): f.write(dino_name) f.write('\n') count += 1 print ('{} Dinosaurs found!'.format(count))
py
1a4380b87a6b173abfabcc72669c62e0e7a77861
############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Bootstrap a buildout-based project Simply run this script in a directory containing a buildout.cfg. The script accepts buildout command-line options, so you can use the -c option to specify an alternate configuration file. """ import os import shutil import sys import tempfile from optparse import OptionParser tmpeggs = tempfile.mkdtemp() usage = '''\ [DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options] Bootstraps a buildout-based project. Simply run this script in a directory containing a buildout.cfg, using the Python that you want bin/buildout to use. Note that by using --find-links to point to local resources, you can keep this script from going over the network. ''' parser = OptionParser(usage=usage) parser.add_option("-v", "--version", help="use a specific zc.buildout version") parser.add_option("-t", "--accept-buildout-test-releases", dest='accept_buildout_test_releases', action="store_true", default=False, help=("Normally, if you do not specify a --version, the " "bootstrap script and buildout gets the newest " "*final* versions of zc.buildout and its recipes and " "extensions for you. If you use this flag, " "bootstrap and buildout will get the newest releases " "even if they are alphas or betas.")) parser.add_option("-c", "--config-file", help=("Specify the path to the buildout configuration " "file to be used.")) parser.add_option("-f", "--find-links", help=("Specify a URL to search for buildout releases")) options, args = parser.parse_args() ###################################################################### # load/install setuptools to_reload = False try: import pkg_resources import setuptools except ImportError: ez = {} try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen # XXX use a more permanent ez_setup.py URL when available. exec(urlopen('http://bitbucket.org/pypa/setuptools/raw/0.7.2/ez_setup.py' ).read(), ez) setup_args = dict(to_dir=tmpeggs, download_delay=0) ez['use_setuptools'](**setup_args) if to_reload: reload(pkg_resources) import pkg_resources # This does not (always?) update the default working set. We will # do it. for path in sys.path: if path not in pkg_resources.working_set.entries: pkg_resources.working_set.add_entry(path) ###################################################################### # Install buildout ws = pkg_resources.working_set cmd = [sys.executable, '-c', 'from setuptools.command.easy_install import main; main()', '-mZqNxd', tmpeggs] find_links = os.environ.get( 'bootstrap-testing-find-links', options.find_links or ('http://downloads.buildout.org/' if options.accept_buildout_test_releases else None) ) if find_links: cmd.extend(['-f', find_links]) setuptools_path = ws.find( pkg_resources.Requirement.parse('setuptools')).location requirement = 'zc.buildout' version = options.version if version is None and not options.accept_buildout_test_releases: # Figure out the most recent final version of zc.buildout. import setuptools.package_index _final_parts = '*final-', '*final' def _final_version(parsed_version): for part in parsed_version: if (part[:1] == '*') and (part not in _final_parts): return False return True index = setuptools.package_index.PackageIndex( search_path=[setuptools_path]) if find_links: index.add_find_links((find_links,)) req = pkg_resources.Requirement.parse(requirement) if index.obtain(req) is not None: best = [] bestv = None for dist in index[req.project_name]: distv = dist.parsed_version if _final_version(distv): if bestv is None or distv > bestv: best = [dist] bestv = distv elif distv == bestv: best.append(dist) if best: best.sort() version = best[-1].version if version: requirement = '=='.join((requirement, version)) cmd.append(requirement) import subprocess if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0: raise Exception( "Failed to execute command:\n%s", repr(cmd)[1:-1]) ###################################################################### # Import and run buildout ws.add_entry(tmpeggs) ws.require(requirement) import zc.buildout.buildout if not [a for a in args if '=' not in a]: args.append('bootstrap') # if -c was provided, we push it back into args for buildout' main function if options.config_file is not None: args[0:0] = ['-c', options.config_file] zc.buildout.buildout.main(args) shutil.rmtree(tmpeggs)
py
1a4380cdffa3a0a4e99bb387b411fc8c3eb0a98f
# Define here the models for your spider middleware # # See documentation in: # https://docs.scrapy.org/en/latest/topics/spider-middleware.html # useful for handling different item types with a single interface # from itemadapter import ItemAdapter, is_item from scrapy import signals class BooksSpiderMiddleware: # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, or item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Request or item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info("Spider opened: %s" % spider.name) class BooksDownloaderMiddleware: # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called return None def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info("Spider opened: %s" % spider.name)
py
1a43817039ae8725c90cbd915bea3997cb5e0d2d
""" Tests for exam models """ from unittest import TestCase from exams.models import ( ExamAuthorization, ExamProfile, ) class ExamProfileTest(TestCase): """Tests for ExamProfiles""" def test_exam_profile_str(self): """ Test method ExamProfile.__str__ prints correctly """ ep = ExamProfile(id=1, status=ExamProfile.PROFILE_IN_PROGRESS) assert str(ep) == 'Exam Profile "1" with status "in-progress"' class ExamAuthorizationTest(TestCase): """Tests for ExamAuthorizationss""" def test_exam_authorization_str(self): """ Test method ExamAuthorization.__str__ prints correctly """ auth = ExamAuthorization(id=1, status=ExamProfile.PROFILE_IN_PROGRESS, user_id=2) assert str(auth) == 'Exam Authorization "1" with status "in-progress" for user 2'
py
1a4381b8be573ddb8ea56317f4c7c95da69495ed
#Practice using PortAudio in python #The hope is to eventually create a way of creating/handling formants directly in python import sounddevice as sd import numpy as np #Random noises for numpy array data = np.random.uniform(-1,1,44100) fs = 44100 sd.play(data, fs)
py
1a4382a30bf5e2c4f119df80542a59b4f0e2e67e
from dagster import check, RuntimeType, seven PICKLE_PROTOCOL = 2 def is_json_serializable(value): try: seven.json.dumps(value) return True except TypeError: return False def read_value(runtime_type, value): check.inst_param(runtime_type, 'runtime_type', RuntimeType) if runtime_type.is_scalar: return value elif runtime_type.is_any and is_json_serializable(value): return value else: return runtime_type.serialization_strategy.deserialize_from_file(value) def write_value(runtime_type, value, target_file): check.inst_param(runtime_type, 'runtime_type', RuntimeType) if runtime_type.is_scalar: return value elif runtime_type.is_any and is_json_serializable(value): return value else: runtime_type.serialization_strategy.serialize_to_file(value, target_file) return target_file
py
1a43833817c3422be41292d286b26801fe09263f
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Builds the CIFAR-10 network with additional variables to support pruning. Summary of available functions: # Compute input images and labels for training. If you would like to run # evaluations, use inputs() instead. inputs, labels = distorted_inputs() # Compute inference on the model inputs to make a prediction. predictions = inference(inputs) # Compute the total loss of the prediction with respect to the labels. loss = loss(predictions, labels) # Create a graph to run one step of training with respect to the loss. train_op = train(loss, global_step) """ # pylint: disable=missing-docstring from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re import sys import tarfile from six.moves import urllib import tensorflow.compat.v1 as tf from model_pruning.examples.cifar10 import cifar10_input from model_pruning.python import pruning_interface # Global constants describing the CIFAR-10 data set. IMAGE_SIZE = cifar10_input.IMAGE_SIZE NUM_CLASSES = cifar10_input.NUM_CLASSES NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN # pylint: disable=line-too-long NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL BATCH_SIZE = 128 DATA_DIR = '/tmp/cifar10_data' # Constants describing the training process. MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average. NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays. LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor. INITIAL_LEARNING_RATE = 0.1 # Initial learning rate. # If a model is trained with multiple GPUs, prefix all Op names with tower_name # to differentiate the operations. Note that this prefix is removed from the # names of the summaries when visualizing a model. TOWER_NAME = 'tower' DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz' def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measures the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.summary.histogram(tensor_name + '/activations', x) tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) def _variable_on_cpu(name, shape, initializer): """Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor """ with tf.device('/cpu:0'): dtype = tf.float32 var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var def _variable_with_weight_decay(name, shape, stddev, wd): """Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor """ dtype = tf.float32 var = _variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var def distorted_inputs(): """Construct distorted input for CIFAR training using the Reader ops. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir """ if not DATA_DIR: raise ValueError('Please supply a data_dir') data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin') images, labels = cifar10_input.distorted_inputs( data_dir=data_dir, batch_size=BATCH_SIZE) return images, labels def inputs(eval_data): """Construct input for CIFAR evaluation using the Reader ops. Args: eval_data: bool, indicating if one should use the train or eval data set. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir """ if not DATA_DIR: raise ValueError('Please supply a data_dir') data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin') images, labels = cifar10_input.inputs( eval_data=eval_data, data_dir=data_dir, batch_size=BATCH_SIZE) return images, labels def inference(images, matrix_compression_obj): """Build the CIFAR-10 model. Args: images: Images returned from distorted_inputs() or inputs(). matrix_compression_obj: A Pruning or compression_lib.compression_op.ApplyCompression object. Returns: Logits. """ # We instantiate all variables using tf.compat.v1.get_variable() instead of # tf.Variable() in order to share variables across multiple GPU training runs. # If we only ran this model on a single GPU, we could simplify this function # by replacing all instances of tf.compat.v1.get_variable() with # tf.Variable(). # # While instantiating conv and local layers, we add mask and threshold # variables to the layer by calling the # pruning_interface.apply_matrix_compression function. # Note that the masks are applied only to the weight tensors # conv1 with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay( 'weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0) conv = tf.nn.conv2d( images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) pre_activation = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv1) # pool1 pool1 = tf.nn.max_pool( conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') # norm1 norm1 = tf.nn.lrn( pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') # conv2 with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay( 'weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0) conv = tf.nn.conv2d( norm1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1)) pre_activation = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv2) # norm2 norm2 = tf.nn.lrn( conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') # pool2 pool2 = tf.nn.max_pool( norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') # local3 with tf.variable_scope('local3') as scope: # Move everything into depth so we can perform a single matrix multiply. reshape = tf.reshape(pool2, [BATCH_SIZE, -1]) dim = reshape.get_shape()[1].value weights = _variable_with_weight_decay( 'weights', shape=[dim, 384], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) local3 = tf.nn.relu( tf.matmul(reshape, pruning_interface.apply_matrix_compression( matrix_compression_obj, weights, scope)) + biases, name=scope.name) _activation_summary(local3) # local4 with tf.variable_scope('local4') as scope: weights = _variable_with_weight_decay( 'weights', shape=[384, 192], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local4 = tf.nn.relu( tf.matmul(local3, pruning_interface.apply_matrix_compression( matrix_compression_obj, weights, scope)) + biases, name=scope.name) _activation_summary(local4) # linear layer(WX + b), # We don't apply softmax here because # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits # and performs the softmax internally for efficiency. with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay( 'weights', [192, NUM_CLASSES], stddev=1 / 192.0, wd=0.0) biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0)) softmax_linear = tf.add( tf.matmul(local4, pruning_interface.apply_matrix_compression( matrix_compression_obj, weights, scope)), biases, name=scope.name) _activation_summary(softmax_linear) return softmax_linear def loss(logits, labels): """Add L2Loss to all the trainable variables. Add summary for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] Returns: Loss tensor of type float. """ # Calculate the average cross entropy loss across the batch. labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) # The total loss is defined as the cross entropy loss plus all of the weight # decay terms (L2 loss). return tf.add_n(tf.get_collection('losses'), name='total_loss') def _add_loss_summaries(total_loss): """Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.summary.scalar(l.op.name + ' (raw)', l) tf.summary.scalar(l.op.name, loss_averages.average(l)) return loss_averages_op def train(total_loss, global_step): """Train CIFAR-10 model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Variables that affect learning rate. num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / BATCH_SIZE decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY) # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay( INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.summary.scalar('learning_rate', lr) # Generate moving averages of all losses and associated summaries. loss_averages_op = _add_loss_summaries(total_loss) # Compute gradients. with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) # Apply gradients. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) # Add histograms for gradients. for grad, var in grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op def maybe_download_and_extract(): """Download and extract the tarball from Alex's website.""" dest_directory = DATA_DIR if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = DATA_URL.split('/')[-1] filepath = os.path.join(dest_directory, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write( '\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory)
py
1a43835de5c7dcc6a319f7371684d3430970febc
#!/usr/bin/python3 -OO # Copyright 2007-2019 The SABnzbd-Team <[email protected]> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ tests.test_newsunpack - Tests of various functions in newspack """ import pytest from sabnzbd.newsunpack import * class TestNewsUnpack: @pytest.mark.parametrize( "test_input, expected_output", [ (["cmd1", 9, "cmd3"], '"cmd1" "9" "cmd3"'), # sending all commands as valid string (["", "cmd1", "5"], '"" "cmd1" "5"'), # sending blank string (["cmd1", None, "cmd3", "tail -f"], '"cmd1" "" "cmd3" "tail -f"'), # sending None in command (["cmd1", 0, "ps ux"], '"cmd1" "" "ps ux"'), # sending 0 ], ) def test_list_to_cmd(self, test_input, expected_output): """ Test to convert list to a cmd.exe-compatible command string """ res = list2cmdline(test_input) # Make sure the output is cmd.exe-compatible assert res == expected_output
py
1a4383ea2bdb2ada79a25d263c4bdf405dc6ea7f
""" @name: Modules/House/Lighting/lighting.py @author: D. Brian Kimmel @contact: [email protected] @copyright: (c) 2010-2020 by D. Brian Kimmel @note: Created on Apr 2, 2010 @license: MIT License @summary: Handle the home lighting system automation. PyHouse.House.Lighting. Buttons Controllers Lights Outlets """ __updated__ = '2020-02-21' __version_info__ = (20, 1, 25) __version__ = '.'.join(map(str, __version_info__)) # Import system type stuff # Import PyHouse files from Modules.Core.Config.config_tools import Api as configApi from Modules.House.Lighting import MODULES, LightingClass from Modules.Core.Utilities.debug_tools import PrettyFormatAny from Modules.Core import logging_pyh as Logger LOG = Logger.getLogger('PyHouse.Lighting ') class LocalConfig: """ """ m_config = None m_pyhouse_obj = None m_schedule_altered = False def __init__(self, p_pyhouse_obj): self.m_pyhouse_obj = p_pyhouse_obj self.m_config = configApi(p_pyhouse_obj) self.m_schedule_altered = False def _update_lighting_from_yaml(self, _p_pyhouse_obj, p_node_yaml): """ """ l_lighting = {} try: l_yaml = p_node_yaml['Lighting'] except: LOG.error('The "Lighting" tag is missing in the "lighting.yaml" file!') return None for l_key, l_val in l_yaml.items(): LOG.debug('\n\tKey: {}\n\tValue: {}'.format(l_key, PrettyFormatAny.form(l_val, 'Lighting.Update', 190))) return l_lighting # For testing. def load_yaml_config(self, p_pyhouse_obj): """ Read the lighting.yaml file. It contains lighting data for the house. """ pass # ---------- def save_yaml_config(self, _p_pyhouse_obj): """ """ LOG.info('Saving Config - Version:{}'.format(__version__)) class Api: """ Handles all the components of the lighting sub-system. """ m_config_tools = None m_local_config = None m_pyhouse_obj = None m_module_apis = None def __init__(self, p_pyhouse_obj) -> None: LOG.info("Initialing - Version:{}".format(__version__)) self.m_pyhouse_obj = p_pyhouse_obj self._add_storage() self.m_local_config = LocalConfig(p_pyhouse_obj) self.m_config_tools = configApi(p_pyhouse_obj) l_path = 'Modules.House.Lighting.' l_modules = self.m_config_tools.find_module_list(MODULES) self.m_module_apis = self.m_config_tools.import_module_list(l_modules, l_path) p_pyhouse_obj.House.Lighting._Apis = self.m_module_apis LOG.info("Initialized - Version:{}".format(__version__)) def _add_storage(self) -> None: self.m_pyhouse_obj.House.Lighting = LightingClass() def LoadConfig(self): LOG.info('Loading all Lighting config files.') LOG.debug(PrettyFormatAny.form(self.m_module_apis, 'Apis')) for l_module in self.m_module_apis.values(): l_module.LoadConfig() LOG.info('Loaded Lighting config files.') def Start(self): LOG.info("Starting.") for l_module in self.m_module_apis.values(): l_module.Start() LOG.info("Started.") def SaveConfig(self): LOG.info('SaveConfig') for l_module in self.m_module_apis.values(): l_module.SaveConfig() LOG.info("Saved Lighting Config.") return def Stop(self): for l_module in self.m_module_apis.values(): l_module.Stop() LOG.info("Stopped.") def Control(self, p_device_obj, p_controller_obj, p_control): """ Insteon specific version of control light All that Insteon can control is Brightness and Fade Rate. @param p_controller_obj: optional ==> ControllerInformation @param p_device_obj: the device being controlled @param p_control: the idealized light control params """ if self.m_plm == None: LOG.info('No PLM was defined - Quitting.') return self.m_plm.Control(p_device_obj, p_controller_obj, p_control) def MqttDispatch(self, p_msg): """ """ LOG.debug(PrettyFormatAny.form(p_msg, 'Msg')) p_msg.LogMessage += '\tLighting: {}\n'.format(self.m_pyhouse_obj.House.Name) l_topic = p_msg.UnprocessedTopic[0].lower() p_msg.UnprocessedTopic = p_msg.UnprocessedTopic[1:] if l_topic in self.m_module_apis: self.m_module_apis[l_topic].MqttDispatch(p_msg) else: p_msg.LogMessage += '\tUnknown sub-topic: "{}"'.format(l_topic) LOG.warning('Unknown lighting Topic: {}\n\tTopic: {}\n\tMessge: {}'.format(l_topic, p_msg.Topic, p_msg.Payload)) LOG.debug(PrettyFormatAny.form(self.m_module_apis, 'Modules')) # ## END DBK
py
1a43841e43ff08cfc1ee4088903437a53dd8100c
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import copy import os import weakref from absl.testing import parameterized import six from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import template from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import save as saved_model_save from tensorflow.python.training import checkpoint_management from tensorflow.python.training import saver as saver_lib from tensorflow.python.training.saving import checkpoint_options from tensorflow.python.training.tracking import base from tensorflow.python.training.tracking import graph_view from tensorflow.python.training.tracking import tracking from tensorflow.python.training.tracking import util as trackable_utils class NonLayerTrackable(tracking.AutoTrackable): def __init__(self): super(NonLayerTrackable, self).__init__() self.a_variable = trackable_utils.add_variable( self, name="a_variable", shape=[]) class InterfaceTests(test.TestCase): @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testAddVariable(self): obj = NonLayerTrackable() with self.assertRaisesRegex(ValueError, "do not specify shape"): trackable_utils.add_variable( obj, name="shape_specified_twice", shape=[], initializer=1) constant_initializer = trackable_utils.add_variable( obj, name="constant_initializer", initializer=1) with variable_scope.variable_scope("some_variable_scope"): ones_initializer = trackable_utils.add_variable( obj, name="ones_initializer", shape=[2], initializer=init_ops.ones_initializer(dtype=dtypes.float32)) bare_initializer = trackable_utils.add_variable( obj, name="bare_initializer", shape=[2, 2], dtype=dtypes.float64, initializer=init_ops.zeros_initializer) # Even in graph mode, there are no naming conflicts between objects, only # naming conflicts within an object. other_duplicate = resource_variable_ops.ResourceVariable( name="duplicate", initial_value=1.) duplicate = trackable_utils.add_variable( obj, name="duplicate", shape=[]) with self.assertRaisesRegex(ValueError, "'duplicate'.*already declared"): trackable_utils.add_variable(obj, name="duplicate", shape=[]) self.evaluate(trackable_utils.gather_initializers(obj)) self.assertEqual("constant_initializer:0", constant_initializer.name) self.assertEqual(1, self.evaluate(constant_initializer)) self.assertEqual("some_variable_scope/ones_initializer:0", ones_initializer.name) self.assertAllEqual([1, 1], self.evaluate(ones_initializer)) self.assertAllEqual([[0., 0.], [0., 0.]], self.evaluate(bare_initializer)) self.assertEqual("a_variable:0", obj.a_variable.name) self.assertEqual("duplicate:0", other_duplicate.name) if context.executing_eagerly(): # When executing eagerly, there's no uniquification of variable names. The # checkpoint name will be the same. self.assertEqual("duplicate:0", duplicate.name) else: # The .name attribute may be globally influenced, but the checkpoint name # won't be (tested below). self.assertEqual("duplicate_1:0", duplicate.name) named_variables, _, _ = ( graph_view.ObjectGraphView(obj).serialize_object_graph()) expected_checkpoint_names = ( "a_variable/.ATTRIBUTES/VARIABLE_VALUE", "bare_initializer/.ATTRIBUTES/VARIABLE_VALUE", "constant_initializer/.ATTRIBUTES/VARIABLE_VALUE", "duplicate/.ATTRIBUTES/VARIABLE_VALUE", "ones_initializer/.ATTRIBUTES/VARIABLE_VALUE", ) six.assertCountEqual( self, expected_checkpoint_names, [v.name for v in named_variables]) def testInitNotCalled(self): class NoInit(tracking.AutoTrackable): def __init__(self): pass # __init__ for Trackable will be called implicitly. trackable_utils.add_variable(NoInit(), "var", shape=[]) def testShapeDtype(self): root = tracking.AutoTrackable() v1 = trackable_utils.add_variable( root, name="v1", initializer=3., dtype=dtypes.float64) self.assertEqual(dtypes.float64, v1.dtype) v2 = trackable_utils.add_variable( root, name="v2", shape=[3], initializer=init_ops.ones_initializer, dtype=dtypes.float64) self.assertEqual(dtypes.float64, v2.dtype) self.assertAllEqual([1., 1., 1.], self.evaluate(v2)) class _MirroringSaveable(saver_lib.BaseSaverBuilder.SaveableObject): def __init__(self, primary_variable, mirrored_variable, name): self._primary_variable = primary_variable self._mirrored_variable = mirrored_variable tensor = self._primary_variable.read_value() spec = saver_lib.BaseSaverBuilder.SaveSpec( tensor=tensor, slice_spec="", name=name) super(_MirroringSaveable, self).__init__( tensor, [spec], name) def restore(self, restored_tensors, restored_shapes): """Restore the same value into both variables.""" tensor, = restored_tensors return control_flow_ops.group( self._primary_variable.assign(tensor), self._mirrored_variable.assign(tensor)) class _OwnsMirroredVariables(base.Trackable): """A Trackable object which returns a more complex SaveableObject.""" def __init__(self): self.non_dep_variable = variable_scope.get_variable( name="non_dep_variable", initializer=6., use_resource=True) self.mirrored = variable_scope.get_variable( name="mirrored", initializer=15., use_resource=True) def _gather_saveables_for_checkpoint(self): def _saveable_factory(name=self.non_dep_variable.name): return _MirroringSaveable( primary_variable=self.non_dep_variable, mirrored_variable=self.mirrored, name=name) return {base.VARIABLE_VALUE_KEY: _saveable_factory} # The Saver sorts by name before parsing, so we need a name property. @property def name(self): return self.non_dep_variable.name class CheckpointingTests(parameterized.TestCase, test.TestCase): @test_util.run_in_graph_and_eager_modes def testMoreComplexSaveableReturned(self): v = _OwnsMirroredVariables() checkpoint = trackable_utils.Checkpoint(v=v) test_dir = self.get_temp_dir() prefix = os.path.join(test_dir, "ckpt") self.evaluate(v.non_dep_variable.assign(42.)) save_path = checkpoint.save(prefix) self.evaluate(v.non_dep_variable.assign(43.)) self.evaluate(v.mirrored.assign(44.)) checkpoint.restore(save_path).assert_consumed().initialize_or_restore() self.assertEqual(42., self.evaluate(v.non_dep_variable)) self.assertEqual(42., self.evaluate(v.mirrored)) self.evaluate(v.non_dep_variable.assign(44.)) save_path = checkpoint.save(prefix) self.evaluate(v.non_dep_variable.assign(45.)) checkpoint.restore(save_path).assert_consumed().initialize_or_restore() self.assertEqual(44., self.evaluate(v.non_dep_variable)) self.assertEqual(44., self.evaluate(v.mirrored)) @test_util.run_in_graph_and_eager_modes def testMoreComplexSaveableReturnedWithGlobalName(self): # The same object can also be saved using the name-based saver. v = _OwnsMirroredVariables() saver = saver_lib.Saver(var_list=[v]) test_dir = self.get_temp_dir() prefix = os.path.join(test_dir, "ckpt") with self.cached_session() as sess: self.evaluate(v.non_dep_variable.assign(42.)) save_path = saver.save(sess, prefix) self.evaluate(v.non_dep_variable.assign(43.)) self.evaluate(v.mirrored.assign(44.)) saver.restore(sess, save_path) self.assertEqual(42., self.evaluate(v.non_dep_variable)) self.assertEqual(42., self.evaluate(v.mirrored)) @test_util.run_in_graph_and_eager_modes def testAssertConsumedNoCheckpoint(self): prefix = os.path.join(self.get_temp_dir(), "ckpt") v = variable_scope.get_variable(name="v", initializer=0.) self.evaluate(v.initializer) ckpt = trackable_utils.Checkpoint(v=v) self.evaluate(trackable_utils.gather_initializers(ckpt)) save_path = ckpt.save(file_prefix=prefix) status = ckpt.restore(save_path=save_path) del ckpt status.assert_consumed() def testDeepCopyCheckpoint(self): prefix = os.path.join(self.get_temp_dir(), "ckpt") v = variables_lib.Variable(1.) original_ckpt = trackable_utils.Checkpoint(v=v) copied_ckpt = copy.deepcopy(original_ckpt) copied_ckpt.v.assign(2.) self.assertAllClose(1., v) save_path = copied_ckpt.save(file_prefix=prefix) original_ckpt.restore(save_path=save_path).assert_consumed() self.assertAllClose(2., v) @test_util.run_in_graph_and_eager_modes def testPassingCheckpointOptions(self): localhost = "/job:localhost/device:CPU:0" options = checkpoint_options.CheckpointOptions( experimental_io_device=localhost) prefix = os.path.join(self.get_temp_dir(), "ckpt") v = variable_scope.get_variable(name="v", initializer=0.) self.evaluate(v.initializer) ckpt = trackable_utils.Checkpoint(v=v) self.evaluate(trackable_utils.gather_initializers(ckpt)) save_path = ckpt.save(file_prefix=prefix, options=options) status = ckpt.restore(save_path=save_path, options=options) del ckpt status.assert_consumed() # In graph mode, verify that the save and restore ops were set to run on # localhost. if not context.executing_eagerly(): for op in ops.get_default_graph().get_operations(): if op.type in ("SaveV2", "RestoreV2"): self.assertEqual(localhost, op.device) @test_util.run_in_graph_and_eager_modes def testFreezing(self): with test_util.use_gpu(): # Save an object-based checkpoint using a frozen saver directory = self.get_temp_dir() prefix = os.path.join(directory, "ckpt") v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64) checkpoint = trackable_utils.Checkpoint(v=v) self.evaluate(v.assign(3)) # Create the save counter so assert_consumed doesn't complain about it not # existing in the checkpoint on restore. self.evaluate(checkpoint.save_counter.assign(12)) saver = trackable_utils.frozen_saver(checkpoint) with ops.device("cpu:0"): prefix_tensor = constant_op.constant(prefix) self.evaluate(saver.save(prefix_tensor)) self.evaluate(v.assign(10)) # Use the frozen saver to restore the same object graph self.evaluate(saver.restore(prefix_tensor)) self.assertEqual(3, self.evaluate(v)) # Restore using another frozen saver on an identical object graph del v, checkpoint, saver v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64) checkpoint = trackable_utils.Checkpoint(v=v) saver = trackable_utils.frozen_saver(checkpoint) self.evaluate(saver.restore(prefix_tensor)) self.assertEqual(3, self.evaluate(v)) # Restore as an object-based checkpoint del v, checkpoint, saver checkpoint = trackable_utils.Checkpoint() status = checkpoint.restore(prefix) v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64) if context.executing_eagerly(): self.assertEqual(12, self.evaluate(checkpoint.save_counter)) self.assertEqual(0, self.evaluate(v)) checkpoint.v = v status.assert_consumed().run_restore_ops() self.assertEqual(3, self.evaluate(v)) self.assertEqual(12, self.evaluate(checkpoint.save_counter)) @test_util.run_in_graph_and_eager_modes def testCustomNumbering(self): directory = self.get_temp_dir() prefix = os.path.join(directory, "ckpt") step = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64) checkpoint = trackable_utils.Checkpoint(step=step) self.evaluate(step.initializer) for i in range(5): path = checkpoint.write("%s-%d" % (prefix, self.evaluate(step))) expected_suffix = "-%d" % (2 * i,) if not path.endswith(expected_suffix): self.fail("%s should have suffix %s" % (path, expected_suffix)) self.evaluate(step.assign_add(2)) def testPartialRestoreWarningAttribute(self): with context.eager_mode(): original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.), v2=variables_lib.Variable(3.)) prefix = os.path.join(self.get_temp_dir(), "ckpt") save_path = original_root.save(prefix) partial_root = trackable_utils.Checkpoint(v1=base.Trackable(), v2=variables_lib.Variable(0.)) weak_partial_root = weakref.ref(partial_root) with test.mock.patch.object(logging, "warning") as mock_log: # Note: Unlike in testPartialRestoreWarningObject, the warning actually # prints immediately here, since all of the objects have been created # and there's no deferred restoration sitting around. partial_root.restore(save_path) self.assertEqual(3., partial_root.v2.numpy()) del partial_root self.assertIsNone(weak_partial_root()) messages = str(mock_log.call_args_list) self.assertIn("(root).v1", messages) self.assertNotIn("(root).v2", messages) self.assertIn("expect_partial()", messages) def testAttributeException(self): with context.eager_mode(): original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.), v2=variables_lib.Variable(3.)) prefix = os.path.join(self.get_temp_dir(), "ckpt") save_path = original_root.save(prefix) partial_root = trackable_utils.Checkpoint(v1=base.Trackable(), v2=variables_lib.Variable(0.)) status = partial_root.restore(save_path) with self.assertRaisesRegex(AssertionError, r"Unused attributes(.|\n)*\(root\).v1"): status.assert_consumed() def testSilencePartialWarning(self): with context.eager_mode(): original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.), v2=variables_lib.Variable(3.)) prefix = os.path.join(self.get_temp_dir(), "ckpt") save_path = original_root.save(prefix) partial_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(0.)) weak_partial_root = weakref.ref(partial_root) weak_v1 = weakref.ref(partial_root.v1) partial_root.restore(save_path).expect_partial() self.assertEqual(2., partial_root.v1.numpy()) with test.mock.patch.object(logging, "warning") as mock_log: del partial_root self.assertIsNone(weak_partial_root()) self.assertIsNone(weak_v1()) self.assertEmpty(mock_log.call_args_list) def _get_checkpoint_name(self, name): root = tracking.AutoTrackable() trackable_utils.add_variable( root, name=name, shape=[1, 2], dtype=dtypes.float64) (named_variable,), _, _ = graph_view.ObjectGraphView( root).serialize_object_graph() with ops.name_scope("root/" + named_variable.name): pass # Make sure we can use this as an op name if we prefix it. return named_variable.name @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testVariableNameEscaping(self): suffix = "/.ATTRIBUTES/VARIABLE_VALUE" self.assertEqual(r"a.Sb.Sc" + suffix, self._get_checkpoint_name(r"a/b/c")) self.assertEqual(r"b" + suffix, self._get_checkpoint_name(r"b")) self.assertEqual(r"c.S" + suffix, self._get_checkpoint_name(r"c/")) self.assertEqual(r"d.S..S" + suffix, self._get_checkpoint_name(r"d/.S")) self.assertEqual(r"d.S..ATTRIBUTES.Sf" + suffix, self._get_checkpoint_name(r"d/.ATTRIBUTES/f")) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testNumberedPath(self): root = tracking.AutoTrackable() leaf = tracking.AutoTrackable() root.leaf = leaf trackable_utils.add_variable(leaf, name="v", shape=[]) (named_variable,), _, _ = graph_view.ObjectGraphView( root).serialize_object_graph() self.assertEqual(r"leaf/v/.ATTRIBUTES/VARIABLE_VALUE", named_variable.name) @test_util.run_in_graph_and_eager_modes def testLocalNameValidation(self): root = tracking.AutoTrackable() leaf = tracking.AutoTrackable() # Dots are escaped, which avoids conflicts with reserved names. root._track_trackable(leaf, name=".ATTRIBUTES") trackable_utils.add_variable(trackable=leaf, name="a", shape=[]) (named_variable,), _, _ = graph_view.ObjectGraphView( root).serialize_object_graph() self.assertEqual("..ATTRIBUTES/a/.ATTRIBUTES/VARIABLE_VALUE", named_variable.name) @test_util.run_in_graph_and_eager_modes def testLateDependencyTracking(self): class Dependency(tracking.AutoTrackable): def build(self): self.var = trackable_utils.add_variable( self, "var", initializer=0.) class LateDependencies(trackable_utils.Checkpoint): def add_dep(self): self.dep = Dependency() self.dep.build() original = LateDependencies() original.add_dep() self.evaluate(state_ops.assign(original.dep.var, 123.)) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = original.save(checkpoint_prefix) load_into = LateDependencies() status = load_into.restore(save_path) status.assert_existing_objects_matched() with self.assertRaises(AssertionError): status.assert_consumed() load_into.add_dep() status.assert_consumed() status.assert_existing_objects_matched().run_restore_ops() self.assertEqual(123., self.evaluate(load_into.dep.var)) @test_util.run_in_graph_and_eager_modes def testDepAfterVar(self): class Dependency(tracking.AutoTrackable): def build(self): self.var = trackable_utils.add_variable( self, "var", initializer=0.) class DepAfterVar(trackable_utils.Checkpoint): def add_dep(self): dep = Dependency() dep.build() self.dep = dep dep_after_var = DepAfterVar() dep_after_var.add_dep() self.evaluate(state_ops.assign(dep_after_var.dep.var, -14.)) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = dep_after_var.save(checkpoint_prefix) loaded_dep_after_var = DepAfterVar() status = loaded_dep_after_var.restore(save_path) loaded_dep_after_var.add_dep() status.assert_consumed() status.run_restore_ops() self.assertEqual(-14., self.evaluate(loaded_dep_after_var.dep.var)) @test_util.run_in_graph_and_eager_modes def testOverlappingRestores(self): checkpoint_directory = self.get_temp_dir() save_root = trackable_utils.Checkpoint() save_root.dep = tracking.AutoTrackable() save_root.dep.var = trackable_utils.add_variable( save_root.dep, name="var", initializer=0.) self.evaluate(state_ops.assign(save_root.dep.var, 12.)) first_path = save_root.save(os.path.join(checkpoint_directory, "first")) self.evaluate(state_ops.assign(save_root.dep.var, 13.)) second_path = save_root.save(os.path.join(checkpoint_directory, "second")) first_root = trackable_utils.Checkpoint() second_root = trackable_utils.Checkpoint() first_status = first_root.restore(first_path) second_status = second_root.restore(second_path) load_dep = tracking.AutoTrackable() load_dep.var = trackable_utils.add_variable( load_dep, name="var", shape=[]) first_root.dep = load_dep first_status.assert_consumed() first_status.run_restore_ops() self.assertEqual(12., self.evaluate(load_dep.var)) second_root.dep = load_dep second_status.assert_consumed() second_status.run_restore_ops() self.assertEqual(13., self.evaluate(load_dep.var)) # Try again with the order of the restore() reversed. The last restore # determines the final value. first_root = trackable_utils.Checkpoint() second_root = trackable_utils.Checkpoint() second_status = second_root.restore(second_path) first_status = first_root.restore(first_path) load_dep = tracking.AutoTrackable() load_dep.var = trackable_utils.add_variable( load_dep, name="var", shape=[]) first_root.dep = load_dep first_status.assert_consumed() first_status.run_restore_ops() self.assertEqual(12., self.evaluate(load_dep.var)) second_root.dep = load_dep second_status.assert_consumed() second_status.run_restore_ops() self.assertEqual(12., self.evaluate(load_dep.var)) @test_util.run_in_graph_and_eager_modes def testAmbiguousLoad(self): # Not OK to split one checkpoint object into two checkpoint_directory = self.get_temp_dir() save_root = trackable_utils.Checkpoint() save_root.dep_one = tracking.AutoTrackable() save_root.dep_two = tracking.AutoTrackable() dep_three = tracking.AutoTrackable() save_root.dep_one.dep_three = dep_three save_root.dep_two.dep_three = dep_three trackable_utils.add_variable(dep_three, name="var", initializer=0.) self.evaluate(trackable_utils.gather_initializers(save_root)) save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt")) load_root = trackable_utils.Checkpoint() status = load_root.restore(save_path) load_root.dep_one = tracking.AutoTrackable() load_root.dep_two = tracking.AutoTrackable() load_root.dep_one.dep_three = tracking.AutoTrackable() load_root.dep_two.dep_three = tracking.AutoTrackable() trackable_utils.add_variable( load_root.dep_one.dep_three, name="var", initializer=0.) trackable_utils.add_variable( load_root.dep_two.dep_three, name="var", initializer=0.) with self.assertRaises(AssertionError): status.assert_consumed() with self.assertRaises(AssertionError): status.assert_existing_objects_matched() @test_util.run_in_graph_and_eager_modes def testObjectsCombined(self): # Currently fine to load two checkpoint objects into one Python object checkpoint_directory = self.get_temp_dir() save_root = trackable_utils.Checkpoint() save_root.dep_one = tracking.AutoTrackable() save_root.dep_two = tracking.AutoTrackable() trackable_utils.add_variable( save_root.dep_one, name="var1", initializer=32., dtype=dtypes.float64) trackable_utils.add_variable( save_root.dep_two, name="var2", initializer=64., dtype=dtypes.float64) self.evaluate(trackable_utils.gather_initializers(save_root)) save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt")) load_root = trackable_utils.Checkpoint() load_root.dep_one = tracking.AutoTrackable() load_root.dep_two = load_root.dep_one v1 = trackable_utils.add_variable( load_root.dep_one, name="var1", shape=[], dtype=dtypes.float64) v2 = trackable_utils.add_variable( load_root.dep_one, name="var2", shape=[], dtype=dtypes.float64) status = load_root.restore( save_path).assert_consumed().assert_existing_objects_matched() status.run_restore_ops() self.assertEqual(32., self.evaluate(v1)) self.assertEqual(64., self.evaluate(v2)) @test_util.run_in_graph_and_eager_modes def testEmptyContainersIgnored(self): checkpoint_directory = self.get_temp_dir() save_root = trackable_utils.Checkpoint(a=[]) path = save_root.save(checkpoint_directory) load_root = trackable_utils.Checkpoint(b=[]) load_root.dep = [] load_root.dep.append([]) status = load_root.restore(path) status.assert_consumed() status.assert_existing_objects_matched() status.assert_nontrivial_match() @test_util.run_in_graph_and_eager_modes def testDependencyLoop(self): # Note: this test creates garbage during eager execution because it # purposefully creates a reference cycle. first = trackable_utils.Checkpoint() second = trackable_utils.Checkpoint() first.second = second second.first = first first.v = trackable_utils.add_variable( first, "v1", initializer=[3., 1., 4.]) second.v = trackable_utils.add_variable( second, "v2", initializer=[1., 1., 2., 3.]) self.evaluate(trackable_utils.gather_initializers(first)) checkpoint_directory = self.get_temp_dir() save_path = first.save(os.path.join(checkpoint_directory, "ckpt")) # Test deferred loading first_load = trackable_utils.Checkpoint() status = first_load.restore(save_path) second_load = tracking.AutoTrackable() first_load.second = second_load second_load.first = first_load with self.assertRaises(AssertionError): status.assert_consumed() first_load.v = trackable_utils.add_variable( first_load, "v1", shape=[3]) second_load.v = trackable_utils.add_variable( second_load, "v2", shape=[4]) status.assert_consumed() status.run_restore_ops() self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v)) self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v)) # Test loading when variables have already been created self.evaluate(first_load.v.assign([2., 7., 1.])) self.assertAllEqual([2., 7., 1.], self.evaluate(first_load.v)) self.evaluate(second_load.v.assign([2., 7., 1., 8.])) self.assertAllEqual([2., 7., 1., 8.], self.evaluate(second_load.v)) status = first_load.restore(save_path).assert_consumed() status.run_restore_ops() self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v)) self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v)) @test_util.run_in_graph_and_eager_modes def testRestoreOnAssign(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") first = trackable_utils.Checkpoint() first.var1 = variables_lib.Variable(0., name="outside_var") first.var2 = variables_lib.Variable(0., name="blah") self.evaluate(first.var1.assign(4.)) self.evaluate(first.var2.assign(8.)) save_path = first.save(checkpoint_prefix) second = trackable_utils.Checkpoint() second.var2 = variables_lib.Variable(0., name="blah") status = second.restore(save_path) recreated_var1 = variables_lib.Variable(0., name="outside_var") status.run_restore_ops() self.assertEqual(8., self.evaluate(second.var2)) self.evaluate(recreated_var1.assign(-2.)) self.assertEqual(-2., self.evaluate(recreated_var1)) second.var1 = recreated_var1 status.run_restore_ops() self.assertEqual(4., self.evaluate(recreated_var1)) @test_util.run_in_graph_and_eager_modes def testCheckpointState(self): # No checkpoints are deleted by default checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") obj = tracking.AutoTrackable() obj.var = variable_scope.get_variable(name="v", initializer=0.) self.evaluate(trackable_utils.gather_initializers(obj)) saver = trackable_utils.Checkpoint(obj=obj) for _ in range(10): saver.save(checkpoint_prefix) expected_filenames = ["checkpoint"] for checkpoint_number in range(1, 11): expected_filenames.append("ckpt-%d.index" % (checkpoint_number,)) self.assertEmpty( set(expected_filenames) - set(os.listdir(checkpoint_directory))) @test_util.run_in_graph_and_eager_modes def testCheckpointStateChangingVarList(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") obj = tracking.AutoTrackable() obj.var = variable_scope.get_variable(name="v", initializer=0.) self.evaluate(trackable_utils.gather_initializers(obj)) checkpoint = trackable_utils.Checkpoint(obj=obj) looped_variables = [] for iteration in range(10): new_variable = resource_variable_ops.ResourceVariable(iteration) self.evaluate(new_variable.initializer) setattr(checkpoint, "var_%d" % iteration, new_variable) checkpoint.save(checkpoint_prefix) looped_variables.append(new_variable) expected_filenames = ["checkpoint"] # We've copied the saver each time, but checkpoint management should still # be consistent. Nothing gets deleted. for checkpoint_number in range(1, 11): expected_filenames.append("ckpt-%d.index" % (checkpoint_number,)) self.assertEmpty( set(expected_filenames) - set(os.listdir(checkpoint_directory))) self.assertEqual( checkpoint_prefix + "-10", checkpoint_management.latest_checkpoint(checkpoint_directory)) # The checkpoint list only contains the most recent checkpoint, but they're # all on disk. This means we won't eventually run into proto size limits. self.assertEqual( [checkpoint_prefix + "-10"], (checkpoint_management.get_checkpoint_state(checkpoint_directory) .all_model_checkpoint_paths)) for v in looped_variables: self.evaluate(v.assign(314)) checkpoint.restore(checkpoint_prefix + "-6").run_restore_ops() self.assertEqual(314, self.evaluate(checkpoint.var_9)) self.assertEqual(314, self.evaluate(checkpoint.var_8)) self.assertEqual(314, self.evaluate(checkpoint.var_6)) self.assertEqual(5, self.evaluate(checkpoint.var_5)) self.assertEqual(1, self.evaluate(checkpoint.var_1)) self.assertEqual(0, self.evaluate(checkpoint.var_0)) checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops() self.assertEqual(9, self.evaluate(checkpoint.var_9)) self.assertEqual(8, self.evaluate(checkpoint.var_8)) self.assertEqual(1, self.evaluate(checkpoint.var_1)) self.assertEqual(0, self.evaluate(checkpoint.var_0)) @test_util.run_in_graph_and_eager_modes def test_restore_after_adding_empty_trackable_data_structure(self): model = NonLayerTrackable() checkpoint = trackable_utils.Checkpoint(model=model) checkpoint.restore(None).initialize_or_restore() checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = checkpoint.save(checkpoint_prefix) del model, checkpoint model = NonLayerTrackable() model.dict = {"a": 1} model.list = {"b": 1} checkpoint = trackable_utils.Checkpoint(model=model) load_status = checkpoint.restore(save_path) load_status.assert_existing_objects_matched().run_restore_ops() @test_util.run_in_graph_and_eager_modes def test_write_checkpoint_path_str_from_function(self): checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt") save_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(1.)) @def_function.function def _write_checkpoint(): save_path = save_checkpoint.write(checkpoint_prefix) return save_path self.evaluate([save_checkpoint.v.initializer]) self.evaluate(_write_checkpoint()) load_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(0.)) # Use read() instead of restore() which allows us to check that all # existing objects were loaded. status = load_checkpoint.read(checkpoint_prefix) status.assert_existing_objects_matched() status.assert_consumed() status.run_restore_ops() self.assertEqual(1., self.evaluate(load_checkpoint.v)) self.evaluate(save_checkpoint.v.assign(3.)) self.evaluate(_write_checkpoint()) self.evaluate(save_checkpoint.v.assign(0.)) status = load_checkpoint.read(checkpoint_prefix) status.assert_existing_objects_matched() status.assert_consumed() status.run_restore_ops() self.assertEqual(3., self.evaluate(load_checkpoint.v)) @test_util.run_in_graph_and_eager_modes def test_write_checkpoint_path_tensor_from_function(self): # Same as the previous test, but the path is a tensor not a python string. checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt") checkpoint_prefix_tensor = constant_op.constant(checkpoint_prefix) save_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(1.)) @def_function.function def _write_checkpoint(prefix): save_path = save_checkpoint.write(prefix) return save_path self.evaluate([save_checkpoint.v.initializer]) self.evaluate(_write_checkpoint(checkpoint_prefix_tensor)) load_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(0.)) # Use read() instead of restore() which allows us to check that all # existing objects were loaded. status = load_checkpoint.read(checkpoint_prefix) status.assert_existing_objects_matched() status.assert_consumed() status.run_restore_ops() self.assertEqual(1., self.evaluate(load_checkpoint.v)) self.evaluate(save_checkpoint.v.assign(3.)) self.evaluate(_write_checkpoint(checkpoint_prefix_tensor)) self.evaluate(save_checkpoint.v.assign(0.)) status = load_checkpoint.read(checkpoint_prefix) status.assert_existing_objects_matched() status.assert_consumed() status.run_restore_ops() self.assertEqual(3., self.evaluate(load_checkpoint.v)) @test_util.run_in_graph_and_eager_modes def test_write_checkpoint_path_tensor_does_not_exist_from_function(self): # Same as the previous test, but the path is a tensor not a python string. checkpoint_prefix = os.path.join( self.get_temp_dir(), "DOES_NOT_EXIST", "ckpt") checkpoint_prefix_tensor = constant_op.constant(checkpoint_prefix) save_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(1.)) @def_function.function def _write_checkpoint(prefix): save_path = save_checkpoint.write(prefix) return save_path self.evaluate([save_checkpoint.v.initializer]) with self.assertRaises(errors_impl.NotFoundError): self.evaluate(_write_checkpoint(checkpoint_prefix_tensor)) def test_inititialize_with_data_structures(self): checkpoint = trackable_utils.Checkpoint( a=[variables_lib.Variable(0.), variables_lib.Variable(1.)], b={"a": variables_lib.Variable(2.), "b": variables_lib.Variable(3.)}) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = checkpoint.save(checkpoint_prefix) load_checkpoint = trackable_utils.Checkpoint( a=[variables_lib.Variable(4.), variables_lib.Variable(5.)], b={"a": variables_lib.Variable(6.), "b": variables_lib.Variable(7.)}) load_checkpoint.restore(save_path) self.assertAllClose(self.evaluate(load_checkpoint.a), [0, 1]) self.assertAllClose(self.evaluate(load_checkpoint.b), {"a": 2, "b": 3}) def _create_trackable(self): class Model(tracking.AutoTrackable): def __init__(self): self.v = variables_lib.Variable(2.) def __call__(self, x): return self.v * x return Model() def test_initialize_with_root_object(self): model = self._create_trackable() input_value = constant_op.constant([[3.]]) expected_output = self.evaluate(model(input_value)) model.deferred_variable = variables_lib.Variable(5.) checkpoint = trackable_utils.Checkpoint(model) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = checkpoint.save(checkpoint_prefix) new_model = self._create_trackable() load_checkpoint = trackable_utils.Checkpoint(new_model) load_checkpoint.restore(save_path) self.assertAllClose(expected_output, new_model(input_value)) new_model.deferred_variable = variables_lib.Variable(1.) self.assertEqual(self.evaluate(new_model.deferred_variable), 5) def test_initialize_with_root_object_and_kwargs(self): model = self._create_trackable() model.v.assign(3.) separate_variable = variables_lib.Variable(5.) with self.assertRaisesRegex(ValueError, "root.v already exists"): trackable_utils.Checkpoint(model, v=separate_variable) checkpoint = trackable_utils.Checkpoint( model, separate_variable=separate_variable) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = checkpoint.save(checkpoint_prefix) # Case 1: Loading checkpoint with same configuration. new_model = self._create_trackable() separate_variable = variables_lib.Variable(1.) load_checkpoint = trackable_utils.Checkpoint( new_model, separate_variable=separate_variable) load_checkpoint.restore(save_path).assert_consumed() self.assertEqual(self.evaluate(new_model.v), 3) self.assertEqual(self.evaluate(separate_variable), 5) self.assertEqual(self.evaluate(load_checkpoint.save_counter), 1) # Case 2: Loading checkpoint where v and separate_variable are swapped: # v is not attached to the root, while separate variable is attached to root new_model = tracking.AutoTrackable() new_model.separate_variable = variables_lib.Variable(200.) v = variables_lib.Variable(100.) load_checkpoint = trackable_utils.Checkpoint(new_model, v=v) load_checkpoint.restore(save_path).assert_consumed() self.assertEqual(self.evaluate(v), 3) self.assertEqual(self.evaluate(new_model.separate_variable), 5) self.assertEqual(self.evaluate(load_checkpoint.save_counter), 1) # Case 3: Loading checkpoint where no root object is specified separate_variable = variables_lib.Variable(200.) v = variables_lib.Variable(100.) load_checkpoint = trackable_utils.Checkpoint( v=v, separate_variable=separate_variable) load_checkpoint.restore(save_path).assert_consumed() self.assertEqual(self.evaluate(v), 3) self.assertEqual(self.evaluate(new_model.separate_variable), 5) self.assertEqual(self.evaluate(load_checkpoint.save_counter), 1) def test_checkpoint_saved_model_compatibility(self): model = self._create_trackable() input_value = constant_op.constant([[3.]]) expected_output = self.evaluate(model(input_value)) model.deferred_variable = variables_lib.Variable(5.) saved_model_dir = os.path.join(self.get_temp_dir(), "saved_model") saved_model_save.save(model, saved_model_dir) new_model = self._create_trackable() load_checkpoint = trackable_utils.Checkpoint(new_model) with self.assertRaisesRegex( errors_impl.NotFoundError, "Error when restoring from checkpoint or SavedModel"): load_checkpoint.restore(saved_model_dir + "no").expect_partial() load_checkpoint.restore(saved_model_dir).expect_partial() self.assertAllClose(expected_output, new_model(input_value)) new_model.deferred_variable = variables_lib.Variable(1.) self.assertEqual(self.evaluate(new_model.deferred_variable), 5) def test_deferred_dependency_avoids_reference_cycles(self): # Tests that there are no reference cycles when running garbage collection. # Python uses reference counts as the primary garbage collector, which will # not delete and finalize (__del__) objects in a cycle. The deletion is # eventually triggered by gc, which only runs when the garbage has reached # a certain threshold. delete_counter = 0 class TrackableWithDel(tracking.AutoTrackable): def __del__(self): nonlocal delete_counter delete_counter += 1 x = tracking.AutoTrackable() x.v = variables_lib.Variable(100.) x.has_del = TrackableWithDel() checkpoint = trackable_utils.Checkpoint(x) checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt") save_path = checkpoint.save(checkpoint_prefix) self.assertEqual(delete_counter, 0) del checkpoint del x self.assertEqual(delete_counter, 1) no_v = tracking.AutoTrackable() no_v.has_del = TrackableWithDel() checkpoint = trackable_utils.Checkpoint(no_v) checkpoint.restore(save_path).expect_partial() del checkpoint del no_v self.assertEqual(delete_counter, 2) def test_defer_objects_with_values_only(self): # Tests that deferred dependencies are only added if the node in the # object graph has children or checkpointed values. root = tracking.AutoTrackable() root.branch_with_value = tracking.AutoTrackable() root.branch_with_value.v = variables_lib.Variable(5.0) root.branch_no_value = tracking.AutoTrackable() root.branch_no_value.child = tracking.AutoTrackable() root.v = variables_lib.Variable(1.0) checkpoint = trackable_utils.Checkpoint(model=root) checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt") save_path = checkpoint.save(checkpoint_prefix) new_root = tracking.AutoTrackable() checkpoint = trackable_utils.Checkpoint(model=new_root) checkpoint.restore(save_path) # root should have two nodes with values/children (`branch-with_value`/`v`). self.assertLen(new_root._deferred_dependencies, 2) new_root.branch_no_value = tracking.AutoTrackable() self.assertLen(new_root._deferred_dependencies, 2) new_root.branch_with_value = tracking.AutoTrackable() self.assertLen(new_root._deferred_dependencies, 1) new_root.v = variables_lib.Variable(1.0) self.assertEmpty(new_root._deferred_dependencies, 1) class TemplateTests(parameterized.TestCase, test.TestCase): @test_util.run_in_graph_and_eager_modes def test_trackable_save_restore_nested(self): def _inner_template(): v = variable_scope.get_variable( "v", shape=[1], initializer=init_ops.zeros_initializer()) return v def _outer_template(): first_inner = template.make_template("i1", _inner_template) second_inner = template.make_template("i2", _inner_template) v1 = first_inner() v2 = second_inner() v3 = second_inner() return (first_inner, second_inner), (v1, v2, v3) with variable_scope.variable_scope("ignored"): save_template = template.make_template("s1", _outer_template) save_root = trackable_utils.Checkpoint(my_template=save_template) (inner_template_one, inner_template_two), _ = save_template() self.evaluate(inner_template_one.variables[0].assign([20.])) self.evaluate(inner_template_two.variables[0].assign([25.])) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = save_root.save(checkpoint_prefix) load_template = template.make_template("s2", _outer_template) load_root = trackable_utils.Checkpoint(my_template=load_template) status = load_root.restore(save_path) (inner_template_one, inner_template_two), (v1, v2, v3) = load_template() outer_template_dependencies = load_root.my_template._trackable_children() self.assertLen(outer_template_dependencies, 2) self.assertDictEqual({"i1": inner_template_one, "i2": inner_template_two}, outer_template_dependencies) self.assertLen(inner_template_one._trackable_children(), 1) self.assertIn("v", inner_template_one._trackable_children()) self.assertLen(inner_template_two._trackable_children(), 1) self.assertIn("v", inner_template_two._trackable_children()) status.assert_consumed().run_restore_ops() self.assertAllEqual([20.], self.evaluate(v1)) self.assertAllEqual([25.], self.evaluate(v2)) self.assertAllEqual([25.], self.evaluate(v3)) if __name__ == "__main__": ops.enable_eager_execution() test.main()
py
1a4384347c7f63d5951f187c52f969e8b9b0e2d4
# -*- coding: utf-8 -*- """Convolutional-recurrent layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from .. import backend as K from .. import activations from .. import initializers from .. import regularizers from .. import constraints from .recurrent import _generate_dropout_mask from .recurrent import _standardize_args import numpy as np import warnings from ..engine.base_layer import InputSpec, Layer from ..utils import conv_utils from ..legacy import interfaces from ..legacy.layers import Recurrent, ConvRecurrent2D from .recurrent import RNN from ..utils.generic_utils import has_arg class ConvRNN2D(RNN): """Base class for convolutional-recurrent layers. # Arguments cell: A RNN cell instance. A RNN cell is a class that has: - a `call(input_at_t, states_at_t)` method, returning `(output_at_t, states_at_t_plus_1)`. The call method of the cell can also take the optional argument `constants`, see section "Note on passing external constants" below. - a `state_size` attribute. This can be a single integer (single state) in which case it is the number of channels of the recurrent state (which should be the same as the number of channels of the cell output). This can also be a list/tuple of integers (one size per state). In this case, the first entry (`state_size[0]`) should be the same as the size of the cell output. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. input_shape: Use this argument to specify the shape of the input when this layer is the first one in a model. # Input shape 5D tensor with shape: `(samples, timesteps, channels, rows, cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, rows, cols, channels)` if data_format='channels_last'. # Output shape - if `return_state`: a list of tensors. The first tensor is the output. The remaining tensors are the last states, each 5D tensor with shape: `(samples, timesteps, filters, new_rows, new_cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. - if `return_sequences`: 5D tensor with shape: `(samples, timesteps, filters, new_rows, new_cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, new_rows, new_cols, filters)` if data_format='channels_last'. - else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. # Masking This layer supports masking for input data with a variable number of timesteps. To introduce masks to your data, use an [Embedding](embeddings.md) layer with the `mask_zero` parameter set to `True`. # Note on using statefulness in RNNs You can set RNN layers to be 'stateful', which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches. To enable statefulness: - specify `stateful=True` in the layer constructor. - specify a fixed batch size for your model, by passing - if sequential model: `batch_input_shape=(...)` to the first layer in your model. - if functional model with 1 or more Input layers: `batch_shape=(...)` to all the first layers in your model. This is the expected shape of your inputs *including the batch size*. It should be a tuple of integers, e.g. `(32, 10, 100, 100, 32)`. Note that the number of rows and columns should be specified too. - specify `shuffle=False` when calling fit(). To reset the states of your model, call `.reset_states()` on either a specific layer, or on your entire model. # Note on specifying the initial state of RNNs You can specify the initial state of RNN layers symbolically by calling them with the keyword argument `initial_state`. The value of `initial_state` should be a tensor or list of tensors representing the initial state of the RNN layer. You can specify the initial state of RNN layers numerically by calling `reset_states` with the keyword argument `states`. The value of `states` should be a numpy array or list of numpy arrays representing the initial state of the RNN layer. # Note on passing external constants to RNNs You can pass "external" constants to the cell using the `constants` keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This requires that the `cell.call` method accepts the same keyword argument `constants`. Such constants can be used to condition the cell transformation on additional static inputs (not changing over time), a.k.a. an attention mechanism. """ def __init__(self, cell, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, **kwargs): if unroll: raise TypeError('Unrolling isn\'t possible with ' 'convolutional RNNs.') if isinstance(cell, (list, tuple)): # The StackedConvRNN2DCells isn't implemented yet. raise TypeError('It is not possible at the moment to' 'stack convolutional cells.') super(ConvRNN2D, self).__init__(cell, return_sequences, return_state, go_backwards, stateful, unroll, **kwargs) self.input_spec = [InputSpec(ndim=5)] def compute_output_shape(self, input_shape): if isinstance(input_shape, list): input_shape = input_shape[0] cell = self.cell if cell.data_format == 'channels_first': rows = input_shape[3] cols = input_shape[4] elif cell.data_format == 'channels_last': rows = input_shape[2] cols = input_shape[3] rows = conv_utils.conv_output_length(rows, cell.kernel_size[0], padding=cell.padding, stride=cell.strides[0], dilation=cell.dilation_rate[0]) cols = conv_utils.conv_output_length(cols, cell.kernel_size[1], padding=cell.padding, stride=cell.strides[1], dilation=cell.dilation_rate[1]) if cell.data_format == 'channels_first': output_shape = input_shape[:2] + (cell.filters, rows, cols) elif cell.data_format == 'channels_last': output_shape = input_shape[:2] + (rows, cols, cell.filters) if not self.return_sequences: output_shape = output_shape[:1] + output_shape[2:] if self.return_state: output_shape = [output_shape] if cell.data_format == 'channels_first': output_shape += [(input_shape[0], cell.filters, rows, cols) for _ in range(2)] elif cell.data_format == 'channels_last': output_shape += [(input_shape[0], rows, cols, cell.filters) for _ in range(2)] return output_shape def build(self, input_shape): # Note input_shape will be list of shapes of initial states and # constants if these are passed in __call__. if self._num_constants is not None: constants_shape = input_shape[-self._num_constants:] else: constants_shape = None if isinstance(input_shape, list): input_shape = input_shape[0] batch_size = input_shape[0] if self.stateful else None self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:5]) # allow cell (if layer) to build before we set or validate state_spec if isinstance(self.cell, Layer): step_input_shape = (input_shape[0],) + input_shape[2:] if constants_shape is not None: self.cell.build([step_input_shape] + constants_shape) else: self.cell.build(step_input_shape) # set or validate state_spec if hasattr(self.cell.state_size, '__len__'): state_size = list(self.cell.state_size) else: state_size = [self.cell.state_size] if self.state_spec is not None: # initial_state was passed in call, check compatibility if self.cell.data_format == 'channels_first': ch_dim = 1 elif self.cell.data_format == 'channels_last': ch_dim = 3 if not [spec.shape[ch_dim] for spec in self.state_spec] == state_size: raise ValueError( 'An initial_state was passed that is not compatible with ' '`cell.state_size`. Received `state_spec`={}; ' 'However `cell.state_size` is ' '{}'.format([spec.shape for spec in self.state_spec], self.cell.state_size)) else: if self.cell.data_format == 'channels_first': self.state_spec = [InputSpec(shape=(None, dim, None, None)) for dim in state_size] elif self.cell.data_format == 'channels_last': self.state_spec = [InputSpec(shape=(None, None, None, dim)) for dim in state_size] if self.stateful: self.reset_states() self.built = True def get_initial_state(self, inputs): # (samples, timesteps, rows, cols, filters) initial_state = K.zeros_like(inputs) # (samples, rows, cols, filters) initial_state = K.sum(initial_state, axis=1) shape = list(self.cell.kernel_shape) shape[-1] = self.cell.filters initial_state = self.cell.input_conv(initial_state, K.zeros(tuple(shape)), padding=self.cell.padding) # Fix for Theano because it needs # K.int_shape to work in call() with initial_state. keras_shape = list(K.int_shape(inputs)) keras_shape.pop(1) if K.image_data_format() == 'channels_first': indices = 2, 3 else: indices = 1, 2 for i, j in enumerate(indices): keras_shape[j] = conv_utils.conv_output_length( keras_shape[j], shape[i], padding=self.cell.padding, stride=self.cell.strides[i], dilation=self.cell.dilation_rate[i]) initial_state._keras_shape = keras_shape if hasattr(self.cell.state_size, '__len__'): return [initial_state for _ in self.cell.state_size] else: return [initial_state] def __call__(self, inputs, initial_state=None, constants=None, **kwargs): inputs, initial_state, constants = _standardize_args( inputs, initial_state, constants, self._num_constants) if initial_state is None and constants is None: return super(ConvRNN2D, self).__call__(inputs, **kwargs) # If any of `initial_state` or `constants` are specified and are Keras # tensors, then add them to the inputs and temporarily modify the # input_spec to include them. additional_inputs = [] additional_specs = [] if initial_state is not None: kwargs['initial_state'] = initial_state additional_inputs += initial_state self.state_spec = [] for state in initial_state: try: shape = K.int_shape(state) # Fix for Theano except TypeError: shape = tuple(None for _ in range(K.ndim(state))) self.state_spec.append(InputSpec(shape=shape)) additional_specs += self.state_spec if constants is not None: kwargs['constants'] = constants additional_inputs += constants self.constants_spec = [InputSpec(shape=K.int_shape(constant)) for constant in constants] self._num_constants = len(constants) additional_specs += self.constants_spec # at this point additional_inputs cannot be empty for tensor in additional_inputs: if K.is_keras_tensor(tensor) != K.is_keras_tensor(additional_inputs[0]): raise ValueError('The initial state or constants of an RNN' ' layer cannot be specified with a mix of' ' Keras tensors and non-Keras tensors') if K.is_keras_tensor(additional_inputs[0]): # Compute the full input spec, including state and constants full_input = [inputs] + additional_inputs full_input_spec = self.input_spec + additional_specs # Perform the call with temporarily replaced input_spec original_input_spec = self.input_spec self.input_spec = full_input_spec output = super(ConvRNN2D, self).__call__(full_input, **kwargs) self.input_spec = original_input_spec return output else: return super(ConvRNN2D, self).__call__(inputs, **kwargs) def call(self, inputs, mask=None, training=None, initial_state=None, constants=None): # note that the .build() method of subclasses MUST define # self.input_spec and self.state_spec with complete input shapes. if isinstance(inputs, list): inputs = inputs[0] if initial_state is not None: pass elif self.stateful: initial_state = self.states else: initial_state = self.get_initial_state(inputs) if isinstance(mask, list): mask = mask[0] if len(initial_state) != len(self.states): raise ValueError('Layer has ' + str(len(self.states)) + ' states but was passed ' + str(len(initial_state)) + ' initial states.') timesteps = K.int_shape(inputs)[1] kwargs = {} if has_arg(self.cell.call, 'training'): kwargs['training'] = training if constants: if not has_arg(self.cell.call, 'constants'): raise ValueError('RNN cell does not support constants') def step(inputs, states): constants = states[-self._num_constants:] states = states[:-self._num_constants] return self.cell.call(inputs, states, constants=constants, **kwargs) else: def step(inputs, states): return self.cell.call(inputs, states, **kwargs) last_output, outputs, states = K.rnn(step, inputs, initial_state, constants=constants, go_backwards=self.go_backwards, mask=mask, input_length=timesteps) if self.stateful: updates = [] for i in range(len(states)): updates.append((self.states[i], states[i])) self.add_update(updates, inputs) if self.return_sequences: output = outputs else: output = last_output # Properly set learning phase if getattr(last_output, '_uses_learning_phase', False): output._uses_learning_phase = True if self.return_state: if not isinstance(states, (list, tuple)): states = [states] else: states = list(states) return [output] + states else: return output def reset_states(self, states=None): if not self.stateful: raise AttributeError('Layer must be stateful.') input_shape = self.input_spec[0].shape state_shape = self.compute_output_shape(input_shape) if self.return_state: state_shape = state_shape[0] if self.return_sequences: state_shape = state_shape[:1] + state_shape[2:] if None in state_shape: raise ValueError('If a RNN is stateful, it needs to know ' 'its batch size. Specify the batch size ' 'of your input tensors: \n' '- If using a Sequential model, ' 'specify the batch size by passing ' 'a `batch_input_shape` ' 'argument to your first layer.\n' '- If using the functional API, specify ' 'the time dimension by passing a ' '`batch_shape` argument to your Input layer.\n' 'The same thing goes for the number of rows and columns.') # helper function def get_tuple_shape(nb_channels): result = list(state_shape) if self.cell.data_format == 'channels_first': result[1] = nb_channels elif self.cell.data_format == 'channels_last': result[3] = nb_channels else: raise KeyError return tuple(result) # initialize state if None if self.states[0] is None: if hasattr(self.cell.state_size, '__len__'): self.states = [K.zeros(get_tuple_shape(dim)) for dim in self.cell.state_size] else: self.states = [K.zeros(get_tuple_shape(self.cell.state_size))] elif states is None: if hasattr(self.cell.state_size, '__len__'): for state, dim in zip(self.states, self.cell.state_size): K.set_value(state, np.zeros(get_tuple_shape(dim))) else: K.set_value(self.states[0], np.zeros(get_tuple_shape(self.cell.state_size))) else: if not isinstance(states, (list, tuple)): states = [states] if len(states) != len(self.states): raise ValueError('Layer ' + self.name + ' expects ' + str(len(self.states)) + ' states, ' 'but it received ' + str(len(states)) + ' state values. Input received: ' + str(states)) for index, (value, state) in enumerate(zip(states, self.states)): if hasattr(self.cell.state_size, '__len__'): dim = self.cell.state_size[index] else: dim = self.cell.state_size if value.shape != get_tuple_shape(dim): raise ValueError('State ' + str(index) + ' is incompatible with layer ' + self.name + ': expected shape=' + str(get_tuple_shape(dim)) + ', found shape=' + str(value.shape)) # TODO: consider batch calls to `set_value`. K.set_value(state, value) class ConvLSTM2DCell(Layer): """Cell class for the ConvLSTM2D layer. # Arguments filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the dimensions of the convolution window. strides: An integer or tuple/list of n integers, specifying the strides of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step (see [activations](../activations.md)). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. (see [initializers](../initializers.md)). recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Use in combination with `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see [regularizer](../regularizers.md)). recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). kernel_constraint: Constraint function applied to the `kernel` weights matrix (see [constraints](../constraints.md)). recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., **kwargs): super(ConvLSTM2DCell, self).__init__(**kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate') self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.unit_forget_bias = unit_forget_bias self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) if K.backend() == 'theano' and (dropout or recurrent_dropout): warnings.warn( 'RNN dropout is no longer supported with the Theano backend ' 'due to technical limitations. ' 'You can either set `dropout` and `recurrent_dropout` to 0, ' 'or use the TensorFlow backend.') dropout = 0. recurrent_dropout = 0. self.dropout = min(1., max(0., dropout)) self.recurrent_dropout = min(1., max(0., recurrent_dropout)) self.state_size = (self.filters, self.filters) self._dropout_mask = None self._recurrent_dropout_mask = None def build(self, input_shape): if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis] kernel_shape = self.kernel_size + (input_dim, self.filters * 4) self.kernel_shape = kernel_shape recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4) self.kernel = self.add_weight(shape=kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.recurrent_kernel = self.add_weight( shape=recurrent_kernel_shape, initializer=self.recurrent_initializer, name='recurrent_kernel', regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) if self.use_bias: if self.unit_forget_bias: def bias_initializer(_, *args, **kwargs): return K.concatenate([ self.bias_initializer((self.filters,), *args, **kwargs), initializers.Ones()((self.filters,), *args, **kwargs), self.bias_initializer((self.filters * 2,), *args, **kwargs), ]) else: bias_initializer = self.bias_initializer self.bias = self.add_weight(shape=(self.filters * 4,), name='bias', initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None self.kernel_i = self.kernel[:, :, :, :self.filters] self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.filters] self.kernel_f = self.kernel[:, :, :, self.filters: self.filters * 2] self.recurrent_kernel_f = self.recurrent_kernel[:, :, :, self.filters: self.filters * 2] self.kernel_c = self.kernel[:, :, :, self.filters * 2: self.filters * 3] self.recurrent_kernel_c = self.recurrent_kernel[:, :, :, self.filters * 2: self.filters * 3] self.kernel_o = self.kernel[:, :, :, self.filters * 3:] self.recurrent_kernel_o = self.recurrent_kernel[:, :, :, self.filters * 3:] if self.use_bias: self.bias_i = self.bias[:self.filters] self.bias_f = self.bias[self.filters: self.filters * 2] self.bias_c = self.bias[self.filters * 2: self.filters * 3] self.bias_o = self.bias[self.filters * 3:] else: self.bias_i = None self.bias_f = None self.bias_c = None self.bias_o = None self.built = True def call(self, inputs, states, training=None): if 0 < self.dropout < 1 and self._dropout_mask is None: self._dropout_mask = _generate_dropout_mask( K.ones_like(inputs), self.dropout, training=training, count=4) if (0 < self.recurrent_dropout < 1 and self._recurrent_dropout_mask is None): self._recurrent_dropout_mask = _generate_dropout_mask( K.ones_like(states[1]), self.recurrent_dropout, training=training, count=4) # dropout matrices for input units dp_mask = self._dropout_mask # dropout matrices for recurrent units rec_dp_mask = self._recurrent_dropout_mask h_tm1 = states[0] # previous memory state c_tm1 = states[1] # previous carry state if 0 < self.dropout < 1.: inputs_i = inputs * dp_mask[0] inputs_f = inputs * dp_mask[1] inputs_c = inputs * dp_mask[2] inputs_o = inputs * dp_mask[3] else: inputs_i = inputs inputs_f = inputs inputs_c = inputs inputs_o = inputs if 0 < self.recurrent_dropout < 1.: h_tm1_i = h_tm1 * rec_dp_mask[0] h_tm1_f = h_tm1 * rec_dp_mask[1] h_tm1_c = h_tm1 * rec_dp_mask[2] h_tm1_o = h_tm1 * rec_dp_mask[3] else: h_tm1_i = h_tm1 h_tm1_f = h_tm1 h_tm1_c = h_tm1 h_tm1_o = h_tm1 x_i = self.input_conv(inputs_i, self.kernel_i, self.bias_i, padding=self.padding) x_f = self.input_conv(inputs_f, self.kernel_f, self.bias_f, padding=self.padding) x_c = self.input_conv(inputs_c, self.kernel_c, self.bias_c, padding=self.padding) x_o = self.input_conv(inputs_o, self.kernel_o, self.bias_o, padding=self.padding) h_i = self.recurrent_conv(h_tm1_i, self.recurrent_kernel_i) h_f = self.recurrent_conv(h_tm1_f, self.recurrent_kernel_f) h_c = self.recurrent_conv(h_tm1_c, self.recurrent_kernel_c) h_o = self.recurrent_conv(h_tm1_o, self.recurrent_kernel_o) i = self.recurrent_activation(x_i + h_i) f = self.recurrent_activation(x_f + h_f) c = f * c_tm1 + i * self.activation(x_c + h_c) o = self.recurrent_activation(x_o + h_o) h = o * self.activation(c) if 0 < self.dropout + self.recurrent_dropout: if training is None: h._uses_learning_phase = True return h, [h, c] def input_conv(self, x, w, b=None, padding='valid'): conv_out = K.conv2d(x, w, strides=self.strides, padding=padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if b is not None: conv_out = K.bias_add(conv_out, b, data_format=self.data_format) return conv_out def recurrent_conv(self, x, w): conv_out = K.conv2d(x, w, strides=(1, 1), padding='same', data_format=self.data_format) return conv_out def get_config(self): config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout} base_config = super(ConvLSTM2DCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) class ConvLSTM2D(ConvRNN2D): """Convolutional LSTM. It is similar to an LSTM layer, but the input transformations and recurrent transformations are both convolutional. # Arguments filters: Integer, the dimensionality of the output space (i.e. the number output of filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the dimensions of the convolution window. strides: An integer or tuple/list of n integers, specifying the strides of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, time, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, time, channels, ...)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step (see [activations](../activations.md)). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. (see [initializers](../initializers.md)). recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Use in combination with `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see [regularizer](../regularizers.md)). recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). (see [regularizer](../regularizers.md)). kernel_constraint: Constraint function applied to the `kernel` weights matrix (see [constraints](../constraints.md)). recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. go_backwards: Boolean (default False). If True, process the input sequence backwards. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. # Input shape - if data_format='channels_first' 5D tensor with shape: `(samples, time, channels, rows, cols)` - if data_format='channels_last' 5D tensor with shape: `(samples, time, rows, cols, channels)` # Output shape - if `return_sequences` - if data_format='channels_first' 5D tensor with shape: `(samples, time, filters, output_row, output_col)` - if data_format='channels_last' 5D tensor with shape: `(samples, time, output_row, output_col, filters)` - else - if data_format ='channels_first' 4D tensor with shape: `(samples, filters, output_row, output_col)` - if data_format='channels_last' 4D tensor with shape: `(samples, output_row, output_col, filters)` where o_row and o_col depend on the shape of the filter and the padding # Raises ValueError: in case of invalid constructor arguments. # References - [Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1) The current implementation does not include the feedback loop on the cells output """ @interfaces.legacy_convlstm2d_support def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, go_backwards=False, stateful=False, dropout=0., recurrent_dropout=0., **kwargs): cell = ConvLSTM2DCell(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout) super(ConvLSTM2D, self).__init__(cell, return_sequences=return_sequences, go_backwards=go_backwards, stateful=stateful, **kwargs) self.activity_regularizer = regularizers.get(activity_regularizer) def call(self, inputs, mask=None, training=None, initial_state=None): return super(ConvLSTM2D, self).call(inputs, mask=mask, training=training, initial_state=initial_state) @property def filters(self): return self.cell.filters @property def kernel_size(self): return self.cell.kernel_size @property def strides(self): return self.cell.strides @property def padding(self): return self.cell.padding @property def data_format(self): return self.cell.data_format @property def dilation_rate(self): return self.cell.dilation_rate @property def activation(self): return self.cell.activation @property def recurrent_activation(self): return self.cell.recurrent_activation @property def use_bias(self): return self.cell.use_bias @property def kernel_initializer(self): return self.cell.kernel_initializer @property def recurrent_initializer(self): return self.cell.recurrent_initializer @property def bias_initializer(self): return self.cell.bias_initializer @property def unit_forget_bias(self): return self.cell.unit_forget_bias @property def kernel_regularizer(self): return self.cell.kernel_regularizer @property def recurrent_regularizer(self): return self.cell.recurrent_regularizer @property def bias_regularizer(self): return self.cell.bias_regularizer @property def kernel_constraint(self): return self.cell.kernel_constraint @property def recurrent_constraint(self): return self.cell.recurrent_constraint @property def bias_constraint(self): return self.cell.bias_constraint @property def dropout(self): return self.cell.dropout @property def recurrent_dropout(self): return self.cell.recurrent_dropout def get_config(self): config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout} base_config = super(ConvLSTM2D, self).get_config() del base_config['cell'] return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): return cls(**config)
py
1a43844d2f17170cfd6622f33e3a887653007e7b
import asyncio import json import logging.config import os from types import SimpleNamespace from aiohttp import web from utils.middleware import ( app_info_factory, auth_factory, data_factory, logger_factory, response_factory, ) import blog.app, homepage.app # import blog.handler, blog.api # import homepage.handler, homepage.api import utils.orm as orm from utils import coroweb from utils.jinja_filter import * from utils.utils import init_jinja2 def init_logging( # 初始化日志配置 default_path="conf/logging.json", default_level=logging.INFO ): path = default_path if os.path.exists(path): with open(path, "r") as f: config = json.load(f) logging.config.dictConfig(config) else: logging.basicConfig(level=default_level) async def init(loop): # 初始化服务器 init_logging() with open("conf/conf.json", "r") as f: configs = json.load(f, object_hook=lambda d: SimpleNamespace(**d)) await orm.create_pool(loop=loop, **configs.db.__dict__) # app = web.Application(middlewares=[logger_factory, auth_factory, response_factory]) # app.COOKIE_NAME = "Mume" # app._COOKIE_KEY = configs.session.secret # app._INVITATION_KEY = configs.session.key # init_jinja2( # app, # os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates"), # filters={"datetime": datetime_filter}, # ) # app.add_routes(homepage.handler.routes) # # app.add_routes(homepage.api.routes) # blog_app = web.Application() # blog_app.add_routes(blog.handler.routes) # blog_app.add_routes(blog.api.routes) # app.router.add_static( # "/static/", # os.path.join(os.path.dirname(os.path.abspath(__file__)), "../static"), # ) # app.add_subapp("/blog", blog_app) # runner = web.AppRunner(app) # await runner.setup() # site = web.TCPSite(runner, "localhost", 9000) # logging.info("server started at http://localhost:9000") # await site.start() homepage_app = homepage.app.init() runner = web.AppRunner(homepage_app) await runner.setup() site = web.TCPSite(runner, "localhost", 9000) # site = web.UnixSite(runner, "/tmp/Mume_blog.sock") logging.info("server started at http://localhost:9000") await site.start() blog_app = blog.app.init() runner = web.AppRunner(blog_app) await runner.setup() site = web.TCPSite(runner, "localhost", 9001) # site = web.UnixSite(runner, "/tmp/Mume_blog.sock") logging.info("server started at http://localhost:9001") await site.start() loop = asyncio.get_event_loop() loop.run_until_complete(init(loop)) loop.run_forever()
py
1a4384724974756b935197e31cbbd89c1b5f3bee
from ..style import use use("alba")
py
1a43855b6e6867183a6e5b3d176a5c6cfea71cab
import yaml import torch import torch.nn as nn import argparse import pprint from typing import List, Dict from pathlib import Path from tqdm import tqdm from torch.utils.data import DataLoader from model import Generator, Discriminator, Vgg19 from dataset import BuildDataset, noise_generate from visualize import Visualizer from loss import SPADELossCalculator from utils import session class Trainer: def __init__(self, config, outdir, outdir_fix, modeldir, data_path, sketch_path, ): self.train_config = config["train"] self.data_config = config["dataset"] model_config = config["model"] self.loss_config = config["loss"] self.outdir = outdir self.outdir_fix = outdir_fix self.modeldir = modeldir self.dataset = BuildDataset(data_path, sketch_path, self.data_config["line_method"], self.data_config["extension"], self.data_config["train_size"], self.data_config["valid_size"], self.data_config["color_space"], self.data_config["line_space"] ) print(self.dataset) gen = Generator(model_config["generator"]["in_ch"], self.train_config["latent_dim"]) self.gen, self.gen_opt = self._setting_model_optim(gen, model_config["generator"]) dis = Discriminator(multi_patterns=model_config["discriminator"]["multi"]) self.dis, self.dis_opt = self._setting_model_optim(dis, model_config["discriminator"]) self.vgg = Vgg19(requires_grad=False) self.vgg.cuda() self.vgg.eval() self.lossfunc = SPADELossCalculator() self.visualizer = Visualizer() self.l_dim = self.train_config["latent_dim"] @staticmethod def _setting_model_optim(model: nn.Module, config: Dict): model.cuda() if config["mode"] == "train": model.train() elif config["mode"] == "eval": model.eval() optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"], betas=(config["b1"], config["b2"])) return model, optimizer @staticmethod def _build_dict(loss_dict: Dict[str, float], epoch: int, num_epochs: int) -> Dict[str, str]: report_dict = {} report_dict["epoch"] = f"{epoch}/{num_epochs}" for k, v in loss_dict.items(): report_dict[k] = f"{v:.6f}" return report_dict @staticmethod def _valid_prepare(dataset, validsize: int, l_dim: int) -> List[torch.Tensor]: c_val, l_val, m_val, c_fix, l_fix, m_fix = dataset.valid(validsize) x_val = torch.cat([l_val, m_val], dim=1) x_fix = torch.cat([l_fix, m_fix], dim=1) z_fix = noise_generate(validsize, l_dim) return [x_val, l_val, m_val, c_val], [x_fix, l_fix, m_fix, c_fix], z_fix def _eval(self, l_dim: int, z_fix: torch.Tensor, iteration: int, validsize: int, v_list: List[torch.Tensor], fix_list: List[torch.Tensor]): torch.save(self.gen.state_dict(), f"{self.modeldir}/generator_{iteration}.pt") torch.save(self.dis.state_dict(), f"{self.modeldir}/discriminator_{iteration}.pt") with torch.no_grad(): y_fix = self.gen(z_fix, fix_list[0]) z = noise_generate(validsize, l_dim) y = self.gen(z, v_list[0]) self.visualizer(fix_list[1:], y_fix, self.outdir_fix, iteration, validsize) self.visualizer(v_list[1:], y, self.outdir, iteration, validsize) def _iter(self, data): color, line, mask = data color = color.cuda() line = line.cuda() mask = mask.cuda() loss = {} x = torch.cat([line, mask], dim=1) batchsize = x.size(0) z = noise_generate(batchsize, self.l_dim) # Discriminator update y = self.gen(z, x) dis_loss = self.loss_config["adv"] * self.lossfunc.adversarial_disloss(self.dis, y.detach(), color) self.dis_opt.zero_grad() dis_loss.backward() self.dis_opt.step() # Generator update y = self.gen(z, x) gen_adv_loss = self.loss_config["adv"] * self.lossfunc.adversarial_genloss(self.dis, y) content_loss = self.loss_config["content"] * self.lossfunc.content_loss(y, color) pef_loss = self.loss_config["pe"] * self.lossfunc.positive_enforcing_loss(y) gen_loss = gen_adv_loss + content_loss + pef_loss self.gen_opt.zero_grad() gen_loss.backward() self.gen_opt.step() loss["loss_adv_dis"] = dis_loss.item() loss["loss_adv_gen"] = gen_adv_loss.item() loss["loss_content"] = content_loss.item() loss["loss_pef"] = pef_loss.item() return loss def __call__(self): iteration = 0 v_list, fix_list, z_fix = self._valid_prepare(self.dataset, self.train_config["validsize"], self.l_dim) for epoch in range(self.train_config["epoch"]): dataloader = DataLoader(self.dataset, batch_size=self.train_config["batchsize"], shuffle=True, drop_last=True) with tqdm(total=len(self.dataset)) as pbar: for index, data in enumerate(dataloader): iteration += 1 loss_dict = self._iter(data) report_dict = self._build_dict(loss_dict, epoch, self.train_config["epoch"]) pbar.update(self.train_config["batchsize"]) pbar.set_postfix(**report_dict) if iteration % self.train_config["snapshot_interval"] == 1: self._eval(self.l_dim, z_fix, iteration, self.train_config["validsize"], v_list, fix_list) if __name__ == "__main__": parser = argparse.ArgumentParser(description="SPADE colorization") parser.add_argument('--session', type=str, default='spade', help="session name") parser.add_argument('--data_path', type=Path, help="path containing color images") parser.add_argument('--sketch_path', type=Path, help="path containing sketch images") args = parser.parse_args() outdir, outdir_fix, modeldir = session(args.session) with open("param.yaml", "r") as f: config = yaml.safe_load(f) pprint.pprint(config) trainer = Trainer(config, outdir, outdir_fix, modeldir, args.data_path, args.sketch_path) trainer()
py
1a438593ba7283fd407c88be3ac3b84784e63aa0
from django.db import models # Create your models here. class Catalog(models.Model): title = models.CharField(max_length=200, unique=True) image = models.CharField(max_length=1000) background_image = models.CharField(max_length=1000) genre = models.CharField(max_length=1000) bio = models.TextField(max_length=2500) release_date = models.DateField('date released') review = models.IntegerField(default=[0]) def __str__(self): return self.title
py
1a43861096f168b338c2030a5cc5d7624e15b6ed
import tensorflow as tf from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras import Sequential from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score class TrainingModel: def __init__(self, input_shape): self.model = Sequential() self.model.add(Dense(64, activation='relu', input_shape=input_shape)) self.model.add(Dropout(0.3)) self.model.add(Dense(128, activation='relu')) self.model.add(Dropout(0.3)) self.model.add(Dense(128, activation='relu')) self.model.add(Dense(1, activation='sigmoid')) self.model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) def fit(self, data, label): self.model.fit(data, label, epochs=1, batch_size=128, verbose=0) def predict(self, data): return self.model.predict_classes(data) def evaluate(self, X_test, y_test, print_report=True): y_predicted = self.predict(X_test) y_predicted_probs = self.model.predict_proba(X_test) if print_report: self.print_report(y_test, y_predicted, y_predicted_probs) else: accuracy = accuracy_score(y_test, y_predicted) report = classification_report(y_test, y_predicted, output_dict=True) auc_score = roc_auc_score(y_test, y_predicted_probs) matrix = confusion_matrix(y_test, y_predicted) return { 'accuracy': accuracy, 'auc_score': auc_score, **report['weighted avg'], } def print_report(self, test, predicted, predicted_probs): accuracy = accuracy_score(test, predicted) report = classification_report(test, predicted) matrix = confusion_matrix(test, predicted) print('Accuracy score: {:.5f}'.format(accuracy)) print('-' * 20) print('Confusion Matrix:') print(matrix) print('-' * 20) print(report) print('-' * 20) print('AUC score: {:.5f}'.format(roc_auc_score(test, predicted_probs)))
py
1a4386456ee4a81853ff0b87695c8f486187d7a9
from .exceptions_warnings import * from .abstraction_functions import _univ_repr from .math_functions import _tol_fun
py
1a4386725057fbb9aa104d8e7d4fc6e92ab25501
import sys import os from cx_Freeze import setup, Executable # ADD FILES files = ['icon.ico','themes/'] # TARGET target = Executable( script="main.py", base="Win32GUI", icon="icon.ico" ) # SETUP CX FREEZE setup( name = "PyDracula", version = "1.0", description = "Modern GUI for Python applications", author = "Wanderson M. Pimenta",f options = {'build_exe' : {'include_files' : files}}, executables = [target] )
py
1a4386aa55b0c8dcb303083129471b159d0f69a9
import torch import torch.nn as nn import torch.nn.functional as F class RelNMS(nn.Module): def __init__(self, cfg): super(RelNMS, self).__init__() self.fg_iou_threshold = 0.7 self.bg_iou_threshold = 0.3 self.nms_threshold = 0.5 self.top_k_proposals = cfg.RELPN.DPN.NUM_DURATION_PROPOSALS self.anchor = None def forward(self, relationness, duration_proposals): relationness
py
1a4387d848f2b6dfe556c399737267ead03d40e8
# Copyright 2020 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for working with the local dataset cache. Parts of this file is adapted from the AllenNLP library at https://github.com/allenai/allennlp. """ import copy import fnmatch import functools import importlib.util import io import json import os import re import shutil import subprocess import sys import tarfile import tempfile import types from collections import OrderedDict, UserDict from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from functools import partial, wraps from hashlib import sha256 from itertools import chain from pathlib import Path from types import ModuleType from typing import Any, BinaryIO, ContextManager, Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 from zipfile import ZipFile, is_zipfile import numpy as np from packaging import version import requests from filelock import FileLock from huggingface_hub import HfFolder, Repository, create_repo, list_repo_files, whoami from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from transformers.utils.versions import importlib_metadata from . import __version__ from .utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: _torch_available = importlib.util.find_spec("torch") is not None if _torch_available: try: _torch_version = importlib_metadata.version("torch") logger.info(f"PyTorch version {_torch_version} available.") except importlib_metadata.PackageNotFoundError: _torch_available = False else: logger.info("Disabling PyTorch because USE_TF is set") _torch_available = False if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: _tf_available = importlib.util.find_spec("tensorflow") is not None if _tf_available: candidates = ( "tensorflow", "tensorflow-cpu", "tensorflow-gpu", "tf-nightly", "tf-nightly-cpu", "tf-nightly-gpu", "intel-tensorflow", "intel-tensorflow-avx512", "tensorflow-rocm", "tensorflow-macos", ) _tf_version = None # For the metadata, we have to look for both tensorflow and tensorflow-cpu for pkg in candidates: try: _tf_version = importlib_metadata.version(pkg) break except importlib_metadata.PackageNotFoundError: pass _tf_available = _tf_version is not None if _tf_available: if version.parse(_tf_version) < version.parse("2"): logger.info(f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum.") _tf_available = False else: logger.info(f"TensorFlow version {_tf_version} available.") else: logger.info("Disabling Tensorflow because USE_TORCH is set") _tf_available = False if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: _flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None if _flax_available: try: _jax_version = importlib_metadata.version("jax") _flax_version = importlib_metadata.version("flax") logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") except importlib_metadata.PackageNotFoundError: _flax_available = False else: _flax_available = False _datasets_available = importlib.util.find_spec("datasets") is not None try: # Check we're not importing a "datasets" directory somewhere but the actual library by trying to grab the version # AND checking it has an author field in the metadata that is HuggingFace. _ = importlib_metadata.version("datasets") _datasets_metadata = importlib_metadata.metadata("datasets") if _datasets_metadata.get("author", "") != "HuggingFace Inc.": _datasets_available = False except importlib_metadata.PackageNotFoundError: _datasets_available = False _detectron2_available = importlib.util.find_spec("detectron2") is not None try: _detectron2_version = importlib_metadata.version("detectron2") logger.debug(f"Successfully imported detectron2 version {_detectron2_version}") except importlib_metadata.PackageNotFoundError: _detectron2_available = False _faiss_available = importlib.util.find_spec("faiss") is not None try: _faiss_version = importlib_metadata.version("faiss") logger.debug(f"Successfully imported faiss version {_faiss_version}") except importlib_metadata.PackageNotFoundError: try: _faiss_version = importlib_metadata.version("faiss-cpu") logger.debug(f"Successfully imported faiss version {_faiss_version}") except importlib_metadata.PackageNotFoundError: _faiss_available = False coloredlogs = importlib.util.find_spec("coloredlogs") is not None try: _coloredlogs_available = importlib_metadata.version("coloredlogs") logger.debug(f"Successfully imported sympy version {_coloredlogs_available}") except importlib_metadata.PackageNotFoundError: _coloredlogs_available = False sympy_available = importlib.util.find_spec("sympy") is not None try: _sympy_available = importlib_metadata.version("sympy") logger.debug(f"Successfully imported sympy version {_sympy_available}") except importlib_metadata.PackageNotFoundError: _sympy_available = False _tf2onnx_available = importlib.util.find_spec("tf2onnx") is not None try: _tf2onnx_version = importlib_metadata.version("tf2onnx") logger.debug(f"Successfully imported tf2onnx version {_tf2onnx_version}") except importlib_metadata.PackageNotFoundError: _tf2onnx_available = False _onnx_available = importlib.util.find_spec("onnxruntime") is not None try: _onxx_version = importlib_metadata.version("onnx") logger.debug(f"Successfully imported onnx version {_onxx_version}") except importlib_metadata.PackageNotFoundError: _onnx_available = False _scatter_available = importlib.util.find_spec("torch_scatter") is not None try: _scatter_version = importlib_metadata.version("torch_scatter") logger.debug(f"Successfully imported torch-scatter version {_scatter_version}") except importlib_metadata.PackageNotFoundError: _scatter_available = False _pytorch_quantization_available = importlib.util.find_spec("pytorch_quantization") is not None try: _pytorch_quantization_version = importlib_metadata.version("pytorch_quantization") logger.debug(f"Successfully imported pytorch-quantization version {_pytorch_quantization_version}") except importlib_metadata.PackageNotFoundError: _pytorch_quantization_available = False _soundfile_available = importlib.util.find_spec("soundfile") is not None try: _soundfile_version = importlib_metadata.version("soundfile") logger.debug(f"Successfully imported soundfile version {_soundfile_version}") except importlib_metadata.PackageNotFoundError: _soundfile_available = False _tensorflow_probability_available = importlib.util.find_spec("tensorflow_probability") is not None try: _tensorflow_probability_version = importlib_metadata.version("tensorflow_probability") logger.debug(f"Successfully imported tensorflow-probability version {_tensorflow_probability_version}") except importlib_metadata.PackageNotFoundError: _tensorflow_probability_available = False _timm_available = importlib.util.find_spec("timm") is not None try: _timm_version = importlib_metadata.version("timm") logger.debug(f"Successfully imported timm version {_timm_version}") except importlib_metadata.PackageNotFoundError: _timm_available = False _torchaudio_available = importlib.util.find_spec("torchaudio") is not None try: _torchaudio_version = importlib_metadata.version("torchaudio") logger.debug(f"Successfully imported torchaudio version {_torchaudio_version}") except importlib_metadata.PackageNotFoundError: _torchaudio_available = False _phonemizer_available = importlib.util.find_spec("phonemizer") is not None try: _phonemizer_version = importlib_metadata.version("phonemizer") logger.debug(f"Successfully imported phonemizer version {_phonemizer_version}") except importlib_metadata.PackageNotFoundError: _phonemizer_available = False _pyctcdecode_available = importlib.util.find_spec("pyctcdecode") is not None try: _pyctcdecode_version = importlib_metadata.version("pyctcdecode") logger.debug(f"Successfully imported pyctcdecode version {_pyctcdecode_version}") except importlib_metadata.PackageNotFoundError: _pyctcdecode_available = False _librosa_available = importlib.util.find_spec("librosa") is not None try: _librosa_version = importlib_metadata.version("librosa") logger.debug(f"Successfully imported librosa version {_librosa_version}") except importlib_metadata.PackageNotFoundError: _librosa_available = False torch_cache_home = os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) old_default_cache_path = os.path.join(torch_cache_home, "transformers") # New default cache, shared with the Datasets library hf_cache_home = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) default_cache_path = os.path.join(hf_cache_home, "transformers") # Onetime move from the old location to the new one if no ENV variable has been set. if ( os.path.isdir(old_default_cache_path) and not os.path.isdir(default_cache_path) and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ and "TRANSFORMERS_CACHE" not in os.environ ): logger.warning( "In Transformers v4.0.0, the default path to cache downloaded models changed from " "'~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have overridden " "and '~/.cache/torch/transformers' is a directory that exists, we're moving it to " "'~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should " "only see this message once." ) shutil.move(old_default_cache_path, default_cache_path) PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules")) TRANSFORMERS_DYNAMIC_MODULE_NAME = "transformers_modules" SESSION_ID = uuid4().hex DISABLE_TELEMETRY = os.getenv("DISABLE_TELEMETRY", False) in ENV_VARS_TRUE_VALUES WEIGHTS_NAME = "pytorch_model.bin" TF2_WEIGHTS_NAME = "tf_model.h5" TF_WEIGHTS_NAME = "model.ckpt" FLAX_WEIGHTS_NAME = "flax_model.msgpack" CONFIG_NAME = "config.json" FEATURE_EXTRACTOR_NAME = "preprocessor_config.json" MODEL_CARD_NAME = "modelcard.json" SENTENCEPIECE_UNDERLINE = "▁" SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility MULTIPLE_CHOICE_DUMMY_INPUTS = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" _staging_mode = os.environ.get("HUGGINGFACE_CO_STAGING", "NO").upper() in ENV_VARS_TRUE_VALUES _default_endpoint = "https://moon-staging.huggingface.co" if _staging_mode else "https://huggingface.co" HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", _default_endpoint) HUGGINGFACE_CO_PREFIX = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/{model_id}/resolve/{revision}/{filename}" # This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs. TORCH_FX_REQUIRED_VERSION = version.parse("1.9") TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION = version.parse("1.8") _is_offline_mode = True if os.environ.get("TRANSFORMERS_OFFLINE", "0").upper() in ENV_VARS_TRUE_VALUES else False def is_offline_mode(): return _is_offline_mode def is_torch_available(): return _torch_available def is_pyctcdecode_available(): return _pyctcdecode_available def is_librosa_available(): return _librosa_available def is_torch_cuda_available(): if is_torch_available(): import torch return torch.cuda.is_available() else: return False def is_torch_bf16_available(): if not is_torch_available(): return False import torch # since currently no utility function is available we build our own. # some bits come from https://github.com/pytorch/pytorch/blob/2289a12f21c54da93bf5d696e3f9aea83dd9c10d/torch/testing/_internal/common_cuda.py#L51 # with additional check for torch version # to succeed: # 1. the hardware needs to support bf16 (arch >= Ampere) # 2. torch >= 1.10 (1.9 should be enough for AMP API has changed in 1.10, so using 1.10 as minimal) # 3. CUDA >= 11 # 4. torch.autocast exists # XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's # really only correct for the 0th gpu (or currently set default device if different from 0) if not torch.cuda.is_available() or torch.version.cuda is None: return False if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: return False if int(torch.version.cuda.split(".")[0]) < 11: return False if version.parse(torch.__version__) < version.parse("1.10"): return False if not hasattr(torch, "autocast"): return False return True def is_torch_tf32_available(): if not is_torch_available(): return False import torch if not torch.cuda.is_available() or torch.version.cuda is None: return False if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: return False if int(torch.version.cuda.split(".")[0]) < 11: return False if version.parse(torch.__version__) < version.parse("1.7"): return False return True _torch_fx_available = _torch_onnx_dict_inputs_support_available = False if _torch_available: torch_version = version.parse(importlib_metadata.version("torch")) _torch_fx_available = (torch_version.major, torch_version.minor) == ( TORCH_FX_REQUIRED_VERSION.major, TORCH_FX_REQUIRED_VERSION.minor, ) _torch_onnx_dict_inputs_support_available = torch_version >= TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION def is_torch_fx_available(): return _torch_fx_available def is_torch_onnx_dict_inputs_support_available(): return _torch_onnx_dict_inputs_support_available def is_tf_available(): return _tf_available def is_coloredlogs_available(): return _coloredlogs_available def is_tf2onnx_available(): return _tf2onnx_available def is_onnx_available(): return _onnx_available def is_flax_available(): return _flax_available def is_torch_tpu_available(): if not _torch_available: return False # This test is probably enough, but just in case, we unpack a bit. if importlib.util.find_spec("torch_xla") is None: return False if importlib.util.find_spec("torch_xla.core") is None: return False return importlib.util.find_spec("torch_xla.core.xla_model") is not None def is_datasets_available(): return _datasets_available def is_detectron2_available(): return _detectron2_available def is_rjieba_available(): return importlib.util.find_spec("rjieba") is not None def is_psutil_available(): return importlib.util.find_spec("psutil") is not None def is_py3nvml_available(): return importlib.util.find_spec("py3nvml") is not None def is_apex_available(): return importlib.util.find_spec("apex") is not None def is_faiss_available(): return _faiss_available def is_scipy_available(): return importlib.util.find_spec("scipy") is not None def is_sklearn_available(): if importlib.util.find_spec("sklearn") is None: return False return is_scipy_available() and importlib.util.find_spec("sklearn.metrics") def is_sentencepiece_available(): return importlib.util.find_spec("sentencepiece") is not None def is_protobuf_available(): if importlib.util.find_spec("google") is None: return False return importlib.util.find_spec("google.protobuf") is not None def is_tokenizers_available(): return importlib.util.find_spec("tokenizers") is not None def is_vision_available(): return importlib.util.find_spec("PIL") is not None def is_pytesseract_available(): return importlib.util.find_spec("pytesseract") is not None def is_spacy_available(): return importlib.util.find_spec("spacy") is not None def is_ftfy_available(): return importlib.util.find_spec("ftfy") is not None def is_in_notebook(): try: # Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py get_ipython = sys.modules["IPython"].get_ipython if "IPKernelApp" not in get_ipython().config: raise ImportError("console") if "VSCODE_PID" in os.environ: raise ImportError("vscode") return importlib.util.find_spec("IPython") is not None except (AttributeError, ImportError, KeyError): return False def is_scatter_available(): return _scatter_available def is_pytorch_quantization_available(): return _pytorch_quantization_available def is_tensorflow_probability_available(): return _tensorflow_probability_available def is_pandas_available(): return importlib.util.find_spec("pandas") is not None def is_sagemaker_dp_enabled(): # Get the sagemaker specific env variable. sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". sagemaker_params = json.loads(sagemaker_params) if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None def is_sagemaker_mp_enabled(): # Get the sagemaker specific mp parameters from smp_options variable. smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") try: # Parse it and check the field "partitions" is included, it is required for model parallel. smp_options = json.loads(smp_options) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". mpi_options = json.loads(mpi_options) if not mpi_options.get("sagemaker_mpi_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None def is_training_run_on_sagemaker(): return "SAGEMAKER_JOB_NAME" in os.environ def is_soundfile_availble(): return _soundfile_available def is_timm_available(): return _timm_available def is_torchaudio_available(): return _torchaudio_available def is_speech_available(): # For now this depends on torchaudio but the exact dependency might evolve in the future. return _torchaudio_available def is_phonemizer_available(): return _phonemizer_available def torch_only_method(fn): def wrapper(*args, **kwargs): if not _torch_available: raise ImportError( "You need to install pytorch to use this method or class, " "or activate it with environment variables USE_TORCH=1 and USE_TF=0." ) else: return fn(*args, **kwargs) return wrapper # docstyle-ignore DATASETS_IMPORT_ERROR = """ {0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with: ``` pip install datasets ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install datasets ``` then restarting your kernel. Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or that python file if that's the case. """ # docstyle-ignore TOKENIZERS_IMPORT_ERROR = """ {0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with: ``` pip install tokenizers ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install tokenizers ``` """ # docstyle-ignore SENTENCEPIECE_IMPORT_ERROR = """ {0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones that match your environment. """ # docstyle-ignore PROTOBUF_IMPORT_ERROR = """ {0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones that match your environment. """ # docstyle-ignore FAISS_IMPORT_ERROR = """ {0} requires the faiss library but it was not found in your environment. Checkout the instructions on the installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones that match your environment. """ # docstyle-ignore PYTORCH_IMPORT_ERROR = """ {0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. """ # docstyle-ignore SKLEARN_IMPORT_ERROR = """ {0} requires the scikit-learn library but it was not found in your environment. You can install it with: ``` pip install -U scikit-learn ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install -U scikit-learn ``` """ # docstyle-ignore TENSORFLOW_IMPORT_ERROR = """ {0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the installation page: https://www.tensorflow.org/install and follow the ones that match your environment. """ # docstyle-ignore DETECTRON2_IMPORT_ERROR = """ {0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones that match your environment. """ # docstyle-ignore FLAX_IMPORT_ERROR = """ {0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/google/flax and follow the ones that match your environment. """ # docstyle-ignore SCATTER_IMPORT_ERROR = """ {0} requires the torch-scatter library but it was not found in your environment. You can install it with pip as explained here: https://github.com/rusty1s/pytorch_scatter. """ # docstyle-ignore PYTORCH_QUANTIZATION_IMPORT_ERROR = """ {0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip: `pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com` """ # docstyle-ignore TENSORFLOW_PROBABILITY_IMPORT_ERROR = """ {0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as explained here: https://github.com/tensorflow/probability. """ # docstyle-ignore PANDAS_IMPORT_ERROR = """ {0} requires the pandas library but it was not found in your environment. You can install it with pip as explained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html. """ # docstyle-ignore PHONEMIZER_IMPORT_ERROR = """ {0} requires the phonemizer library but it was not found in your environment. You can install it with pip: `pip install phonemizer` """ # docstyle-ignore SCIPY_IMPORT_ERROR = """ {0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install scipy` """ # docstyle-ignore SPEECH_IMPORT_ERROR = """ {0} requires the torchaudio library but it was not found in your environment. You can install it with pip: `pip install torchaudio` """ # docstyle-ignore TIMM_IMPORT_ERROR = """ {0} requires the timm library but it was not found in your environment. You can install it with pip: `pip install timm` """ # docstyle-ignore VISION_IMPORT_ERROR = """ {0} requires the PIL library but it was not found in your environment. You can install it with pip: `pip install pillow` """ # docstyle-ignore PYTESSERACT_IMPORT_ERROR = """ {0} requires the PyTesseract library but it was not found in your environment. You can install it with pip: `pip install pytesseract` """ # docstyle-ignore PYCTCDECODE_IMPORT_ERROR = """ {0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip: `pip install pyctcdecode` """ BACKENDS_MAPPING = OrderedDict( [ ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)), ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)), ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)), ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)), ("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)), ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)), ("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)), ("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)), ("scatter", (is_scatter_available, SCATTER_IMPORT_ERROR)), ("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)), ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)), ("speech", (is_speech_available, SPEECH_IMPORT_ERROR)), ("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)), ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("timm", (is_timm_available, TIMM_IMPORT_ERROR)), ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("vision", (is_vision_available, VISION_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ] ) def requires_backends(obj, backends): if not isinstance(backends, (list, tuple)): backends = [backends] name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ if not all(BACKENDS_MAPPING[backend][0]() for backend in backends): raise ImportError("".join([BACKENDS_MAPPING[backend][1].format(name) for backend in backends])) class DummyObject(type): """ Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by `requires_backend` each time a user tries to access any method of that class. """ def __getattr__(cls, key): if key.startswith("_"): return super().__getattr__(cls, key) requires_backends(cls, cls._backends) def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_start_docstrings_to_model_forward(*docstr): def docstring_decorator(fn): docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") class_name = f"[`{fn.__qualname__.split('.')[0]}`]" intro = f" The {class_name} forward method, overrides the `__call__` special method." note = r""" <Tip> Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`] instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them. </Tip> """ fn.__doc__ = intro + note + docstring return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr) return fn return docstring_decorator PT_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of `torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ TF_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ def _get_indent(t): """Returns the indentation in the first line of t""" search = re.search(r"^(\s*)\S", t) return "" if search is None else search.groups()[0] def _convert_output_args_doc(output_args_doc): """Convert output_args_doc to display properly.""" # Split output_arg_doc in blocks argument/description indent = _get_indent(output_args_doc) blocks = [] current_block = "" for line in output_args_doc.split("\n"): # If the indent is the same as the beginning, the line is the name of new arg. if _get_indent(line) == indent: if len(current_block) > 0: blocks.append(current_block[:-1]) current_block = f"{line}\n" else: # Otherwise it's part of the description of the current arg. # We need to remove 2 spaces to the indentation. current_block += f"{line[2:]}\n" blocks.append(current_block[:-1]) # Format each block for proper rendering for i in range(len(blocks)): blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i]) blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i]) return "\n".join(blocks) def _prepare_output_docstrings(output_type, config_class, min_indent=None): """ Prepares the return part of the docstring using `output_type`. """ output_docstring = output_type.__doc__ # Remove the head of the docstring to keep the list of args only lines = output_docstring.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): params_docstring = "\n".join(lines[(i + 1) :]) params_docstring = _convert_output_args_doc(params_docstring) # Add the return introduction full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) result = intro + params_docstring # Apply minimum indent if necessary if min_indent is not None: lines = result.split("\n") # Find the indent of the first nonempty line i = 0 while len(lines[i]) == 0: i += 1 indent = len(_get_indent(lines[i])) # If too small, add indentation to all nonempty lines if indent < min_indent: to_add = " " * (min_indent - indent) lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] result = "\n".join(lines) return result PT_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import torch >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import torch >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="pt") >>> start_positions = torch.tensor([1]) >>> end_positions = torch.tensor([3]) >>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits ``` """ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example of single-label classification: ```python >>> from transformers import {processor_class}, {model_class} >>> import torch >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ``` Example of multi-label classification: ```python >>> from transformers import {processor_class}, {model_class} >>> import torch >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import torch >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import torch >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ PT_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import torch >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1 >>> # the linear classifier still needs to be trained >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> import torch >>> from transformers import {processor_class}, {model_class} >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_ids = torch.argmax(logits, dim=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} ``` ```python >>> with processor.as_target_processor(): ... inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1).item() >>> predicted_label = model.config.id2label[predicted_class_ids] >>> predicted_label {expected_output} ``` ```python >>> # compute loss - target_label is e.g. "down" >>> target_label = model.config.id2label[0] >>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_FRAME_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> probabilities = torch.sigmoid(logits[0]) >>> # labels is a one-hot array of shape (num_frames, num_speakers) >>> labels = (probabilities > 0.5).long() >>> labels[0].tolist() {expected_output} ``` """ PT_SPEECH_XVECTOR_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor( ... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True ... ) >>> with torch.no_grad(): ... embeddings = model(**inputs).embeddings >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() >>> # the resulting embeddings can be used for cosine similarity-based retrieval >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1) >>> similarity = cosine_sim(embeddings[0], embeddings[1]) >>> threshold = 0.7 # the optimal threshold is dataset-dependent >>> if similarity < threshold: ... print("Speakers are not the same!") >>> round(similarity.item(), 2) {expected_output} ``` """ PT_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = feature_extractor(image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = feature_extractor(image, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ PT_SAMPLE_DOCSTRINGS = { "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, "TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": PT_MASKED_LM_SAMPLE, "LMHead": PT_CAUSAL_LM_SAMPLE, "BaseModel": PT_BASE_MODEL_SAMPLE, "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, "CTC": PT_SPEECH_CTC_SAMPLE, "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, "AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE, "AudioXVector": PT_SPEECH_XVECTOR_SAMPLE, "VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE, "ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE, } TF_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> input_ids = inputs["input_ids"] >>> inputs["labels"] = tf.reshape( ... tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids)) >>> ) # Batch size 1 >>> outputs = model(inputs) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ TF_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> input_dict = tokenizer(question, text, return_tensors="tf") >>> outputs = model(input_dict) >>> start_logits = outputs.start_logits >>> end_logits = outputs.end_logits >>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) >>> answer = " ".join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0] + 1]) ``` """ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 >>> outputs = model(inputs) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ TF_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf") >>> inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] >>> outputs = model(inputs) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ TF_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ TF_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True) >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}} >>> outputs = model(inputs) # batch size is 1 >>> # the linear classifier still needs to be trained >>> logits = outputs.logits ``` """ TF_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> logits = outputs.logits ``` """ TF_SAMPLE_DOCSTRINGS = { "SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE, "TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": TF_MASKED_LM_SAMPLE, "LMHead": TF_CAUSAL_LM_SAMPLE, "BaseModel": TF_BASE_MODEL_SAMPLE, } FLAX_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="jax") >>> outputs = model(**inputs) >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits ``` """ FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ FLAX_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True) >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}}) >>> logits = outputs.logits ``` """ FLAX_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import {processor_class}, {model_class} >>> tokenizer = {processor_class}.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np") >>> outputs = model(**inputs) >>> # retrieve logts for next token >>> next_token_logits = outputs.logits[:, -1] ``` """ FLAX_SAMPLE_DOCSTRINGS = { "SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE, "TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": FLAX_MASKED_LM_SAMPLE, "BaseModel": FLAX_BASE_MODEL_SAMPLE, "LMHead": FLAX_CAUSAL_LM_SAMPLE, } def add_code_sample_docstrings( *docstr, processor_class=None, checkpoint=None, output_type=None, config_class=None, mask="[MASK]", model_cls=None, modality=None, expected_output="", expected_loss="", ): def docstring_decorator(fn): # model_class defaults to function's class if not specified otherwise model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls if model_class[:2] == "TF": sample_docstrings = TF_SAMPLE_DOCSTRINGS elif model_class[:4] == "Flax": sample_docstrings = FLAX_SAMPLE_DOCSTRINGS else: sample_docstrings = PT_SAMPLE_DOCSTRINGS # putting all kwargs for docstrings in a dict to be used # with the `.format(**doc_kwargs)`. Note that string might # be formatted with non-existing keys, which is fine. doc_kwargs = dict( model_class=model_class, processor_class=processor_class, checkpoint=checkpoint, mask=mask, expected_output=expected_output, expected_loss=expected_loss, ) if "SequenceClassification" in model_class and modality == "audio": code_sample = sample_docstrings["AudioClassification"] elif "SequenceClassification" in model_class: code_sample = sample_docstrings["SequenceClassification"] elif "QuestionAnswering" in model_class: code_sample = sample_docstrings["QuestionAnswering"] elif "TokenClassification" in model_class: code_sample = sample_docstrings["TokenClassification"] elif "MultipleChoice" in model_class: code_sample = sample_docstrings["MultipleChoice"] elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]: code_sample = sample_docstrings["MaskedLM"] elif "LMHead" in model_class or "CausalLM" in model_class: code_sample = sample_docstrings["LMHead"] elif "CTC" in model_class: code_sample = sample_docstrings["CTC"] elif "AudioFrameClassification" in model_class: code_sample = sample_docstrings["AudioFrameClassification"] elif "XVector" in model_class and modality == "audio": code_sample = sample_docstrings["AudioXVector"] elif "Model" in model_class and modality == "audio": code_sample = sample_docstrings["SpeechBaseModel"] elif "Model" in model_class and modality == "vision": code_sample = sample_docstrings["VisionBaseModel"] elif "Model" in model_class or "Encoder" in model_class: code_sample = sample_docstrings["BaseModel"] elif "ImageClassification" in model_class: code_sample = sample_docstrings["ImageClassification"] else: raise ValueError(f"Docstring can't be built for model {model_class}") func_doc = (fn.__doc__ or "") + "".join(docstr) output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class) built_doc = code_sample.format(**doc_kwargs) fn.__doc__ = func_doc + output_doc + built_doc return fn return docstring_decorator def replace_return_docstrings(output_type=None, config_class=None): def docstring_decorator(fn): func_doc = fn.__doc__ lines = func_doc.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: i += 1 if i < len(lines): indent = len(_get_indent(lines[i])) lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent) func_doc = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, " f"current docstring is:\n{func_doc}" ) fn.__doc__ = func_doc return fn return docstring_decorator def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def hf_bucket_url( model_id: str, filename: str, subfolder: Optional[str] = None, revision: Optional[str] = None, mirror=None ) -> str: """ Resolve a model identifier, a file name, and an optional revision id, to a huggingface.co-hosted url, redirecting to Cloudfront (a Content Delivery Network, or CDN) for large files. Cloudfront is replicated over the globe so downloads are way faster for the end user (and it also lowers our bandwidth costs). Cloudfront aggressively caches files by default (default TTL is 24 hours), however this is not an issue here because we migrated to a git-based versioning system on huggingface.co, so we now store the files on S3/Cloudfront in a content-addressable way (i.e., the file name is its hash). Using content-addressable filenames means cache can't ever be stale. In terms of client-side caching from this library, we base our caching on the objects' ETag. An object' ETag is: its sha1 if stored in git, or its sha256 if stored in git-lfs. Files cached locally from transformers before v3.5.0 are not shared with those new files, because the cached file's name contains a hash of the url (which changed). """ if subfolder is not None: filename = f"{subfolder}/{filename}" if mirror: if mirror in ["tuna", "bfsu"]: raise ValueError("The Tuna and BFSU mirrors are no longer available. Try removing the mirror argument.") legacy_format = "/" not in model_id if legacy_format: return f"{mirror}/{model_id}-{filename}" else: return f"{mirror}/{model_id}/{filename}" if revision is None: revision = "main" return HUGGINGFACE_CO_PREFIX.format(model_id=model_id, revision=revision, filename=filename) def url_to_filename(url: str, etag: Optional[str] = None) -> str: """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name so that TF 2.0 can identify it as a HDF5 file (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) """ url_bytes = url.encode("utf-8") filename = sha256(url_bytes).hexdigest() if etag: etag_bytes = etag.encode("utf-8") filename += "." + sha256(etag_bytes).hexdigest() if url.endswith(".h5"): filename += ".h5" return filename def filename_to_url(filename, cache_dir=None): """ Return the url and etag (which may be `None`) stored for *filename*. Raise `EnvironmentError` if *filename* or its stored metadata do not exist. """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError(f"file {cache_path} not found") meta_path = cache_path + ".json" if not os.path.exists(meta_path): raise EnvironmentError(f"file {meta_path} not found") with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] return url, etag def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: """ Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with *.bin* are added. Args: cache_dir (`Union[str, Path]`, *optional*): The cache directory to search for models within. Will default to the transformers cache if unset. Returns: List[Tuple]: List of tuples each with shape `(model_url, etag, size_MB)` """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE elif isinstance(cache_dir, Path): cache_dir = str(cache_dir) cached_models = [] for file in os.listdir(cache_dir): if file.endswith(".json"): meta_path = os.path.join(cache_dir, file) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] if url.endswith(".bin"): size_MB = os.path.getsize(meta_path.strip(".json")) / 1e6 cached_models.append((url, etag, size_MB)) return cached_models def cached_path( url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent: Union[Dict, str, None] = None, extract_compressed_file=False, force_extract=False, use_auth_token: Union[bool, str, None] = None, local_files_only=False, ) -> Optional[str]: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path Args: cache_dir: specify a cache directory to save the file to (overwrite the default cache dir). force_download: if True, re-download the file even if it's already cached in the cache dir. resume_download: if True, resume the download if incompletely received file is found. user_agent: Optional string or dict that will be appended to the user-agent on remote requests. use_auth_token: Optional string or boolean to use as Bearer token for remote files. If True, will get token from ~/.huggingface. extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed file in a folder along the archive. force_extract: if True when extract_compressed_file is True and the archive was already extracted, re-extract the archive and override the folder where it was extracted. Return: Local path (string) of file or if networking is off, last version of file cached on disk. Raises: In case of non-recoverable file (non-existent or inaccessible url + no cache on disk). """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, use_auth_token=use_auth_token, local_files_only=local_files_only, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif urlparse(url_or_filename).scheme == "": # File, but it doesn't exist. raise EnvironmentError(f"file {url_or_filename} not found") else: # Something unknown raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") if extract_compressed_file: if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" output_dir, output_file = os.path.split(output_path) output_extract_dir_name = output_file.replace(".", "-") + "-extracted" output_path_extracted = os.path.join(output_dir, output_extract_dir_name) if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: return output_path_extracted # Prevent parallel extractions lock_path = output_path + ".lock" with FileLock(lock_path): shutil.rmtree(output_path_extracted, ignore_errors=True) os.makedirs(output_path_extracted) if is_zipfile(output_path): with ZipFile(output_path, "r") as zip_file: zip_file.extractall(output_path_extracted) zip_file.close() elif tarfile.is_tarfile(output_path): tar_file = tarfile.open(output_path) tar_file.extractall(output_path_extracted) tar_file.close() else: raise EnvironmentError(f"Archive format of {output_path} could not be identified") return output_path_extracted return output_path def define_sagemaker_information(): try: instance_data = requests.get(os.environ["ECS_CONTAINER_METADATA_URI"]).json() dlc_container_used = instance_data["Image"] dlc_tag = instance_data["Image"].split(":")[1] except Exception: dlc_container_used = None dlc_tag = None sagemaker_params = json.loads(os.getenv("SM_FRAMEWORK_PARAMS", "{}")) runs_distributed_training = True if "sagemaker_distributed_dataparallel_enabled" in sagemaker_params else False account_id = os.getenv("TRAINING_JOB_ARN").split(":")[4] if "TRAINING_JOB_ARN" in os.environ else None sagemaker_object = { "sm_framework": os.getenv("SM_FRAMEWORK_MODULE", None), "sm_region": os.getenv("AWS_REGION", None), "sm_number_gpu": os.getenv("SM_NUM_GPUS", 0), "sm_number_cpu": os.getenv("SM_NUM_CPUS", 0), "sm_distributed_training": runs_distributed_training, "sm_deep_learning_container": dlc_container_used, "sm_deep_learning_container_tag": dlc_tag, "sm_account_id": account_id, } return sagemaker_object def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: """ Formats a user-agent string with basic info about a request. """ ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_tf_available(): ua += f"; tensorflow/{_tf_version}" if DISABLE_TELEMETRY: return ua + "; telemetry/off" if is_training_run_on_sagemaker(): ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items()) # CI will set this value to True if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent return ua class RepositoryNotFoundError(HTTPError): """ Raised when trying to access a hf.co URL with an invalid repository name, or with a private repo name the user does not have access to. """ class EntryNotFoundError(HTTPError): """Raised when trying to access a hf.co URL with a valid repository and revision but an invalid filename.""" class RevisionNotFoundError(HTTPError): """Raised when trying to access a hf.co URL with a valid repository but an invalid revision.""" def _raise_for_status(request): """ Internal version of `request.raise_for_status()` that will refine a potential HTTPError. """ if "X-Error-Code" in request.headers: error_code = request.headers["X-Error-Code"] if error_code == "RepoNotFound": raise RepositoryNotFoundError(f"404 Client Error: Repository Not Found for url: {request.url}") elif error_code == "EntryNotFound": raise EntryNotFoundError(f"404 Client Error: Entry Not Found for url: {request.url}") elif error_code == "RevisionNotFound": raise RevisionNotFoundError((f"404 Client Error: Revision Not Found for url: {request.url}")) request.raise_for_status() def http_get(url: str, temp_file: BinaryIO, proxies=None, resume_size=0, headers: Optional[Dict[str, str]] = None): """ Download remote file. Do not gobble up errors. """ headers = copy.deepcopy(headers) if resume_size > 0: headers["Range"] = f"bytes={resume_size}-" r = requests.get(url, stream=True, proxies=proxies, headers=headers) _raise_for_status(r) content_length = r.headers.get("Content-Length") total = resume_size + int(content_length) if content_length is not None else None # `tqdm` behavior is determined by `utils.logging.is_progress_bar_enabled()` # and can be set using `utils.logging.enable/disable_progress_bar()` progress = tqdm( unit="B", unit_scale=True, unit_divisor=1024, total=total, initial=resume_size, desc="Downloading", ) for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache( url: str, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False, user_agent: Union[Dict, str, None] = None, use_auth_token: Union[bool, str, None] = None, local_files_only=False, ) -> Optional[str]: """ Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the path to the cached file. Return: Local path (string) of file or if networking is off, last version of file cached on disk. Raises: In case of non-recoverable file (non-existent or inaccessible url + no cache on disk). """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) headers = {"user-agent": http_user_agent(user_agent)} if isinstance(use_auth_token, str): headers["authorization"] = f"Bearer {use_auth_token}" elif use_auth_token: token = HfFolder.get_token() if token is None: raise EnvironmentError("You specified use_auth_token=True, but a huggingface token was not found.") headers["authorization"] = f"Bearer {token}" url_to_download = url etag = None if not local_files_only: try: r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=etag_timeout) _raise_for_status(r) etag = r.headers.get("X-Linked-Etag") or r.headers.get("ETag") # We favor a custom header indicating the etag of the linked resource, and # we fallback to the regular etag header. # If we don't have any of those, raise an error. if etag is None: raise OSError( "Distant resource does not have an ETag, we won't be able to reliably ensure reproducibility." ) # In case of a redirect, # save an extra redirect on the request.get call, # and ensure we download the exact atomic version even if it changed # between the HEAD and the GET (unlikely, but hey). if 300 <= r.status_code <= 399: url_to_download = r.headers["Location"] except (requests.exceptions.SSLError, requests.exceptions.ProxyError): # Actually raise for those subclasses of ConnectionError raise except (requests.exceptions.ConnectionError, requests.exceptions.Timeout): # Otherwise, our Internet connection is down. # etag is None pass filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # etag is None == we don't have a connection or we passed local_files_only. # try to get the last downloaded one if etag is None: if os.path.exists(cache_path): return cache_path else: matching_files = [ file for file in fnmatch.filter(os.listdir(cache_dir), filename.split(".")[0] + ".*") if not file.endswith(".json") and not file.endswith(".lock") ] if len(matching_files) > 0: return os.path.join(cache_dir, matching_files[-1]) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise FileNotFoundError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) else: raise ValueError( "Connection error, and we cannot find the requested files in the cached path." " Please try again or make sure your Internet connection is on." ) # From now on, etag is not None. if os.path.exists(cache_path) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lock_path = cache_path + ".lock" with FileLock(lock_path): # If the download just completed while the lock was activated. if os.path.exists(cache_path) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: incomplete_path = cache_path + ".incomplete" @contextmanager def _resumable_file_manager() -> "io.BufferedWriter": with open(incomplete_path, "ab") as f: yield f temp_file_manager = _resumable_file_manager if os.path.exists(incomplete_path): resume_size = os.stat(incomplete_path).st_size else: resume_size = 0 else: temp_file_manager = partial(tempfile.NamedTemporaryFile, mode="wb", dir=cache_dir, delete=False) resume_size = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") http_get(url_to_download, temp_file, proxies=proxies, resume_size=resume_size, headers=headers) logger.info(f"storing {url} in cache at {cache_path}") os.replace(temp_file.name, cache_path) # NamedTemporaryFile creates a file with hardwired 0600 perms (ignoring umask), so fixing it. umask = os.umask(0o666) os.umask(umask) os.chmod(cache_path, 0o666 & ~umask) logger.info(f"creating metadata file for {cache_path}") meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w") as meta_file: json.dump(meta, meta_file) return cache_path def get_file_from_repo( path_or_repo: Union[str, os.PathLike], filename: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, ): """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision(`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the file does not exist. Examples: ```python # Download a tokenizer configuration from huggingface.co and cache. tokenizer_config = get_file_from_repo("bert-base-uncased", "tokenizer_config.json") # This model does not have a tokenizer config so the result will be None. tokenizer_config = get_file_from_repo("xlm-roberta-base", "tokenizer_config.json") ```""" if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True path_or_repo = str(path_or_repo) if os.path.isdir(path_or_repo): resolved_file = os.path.join(path_or_repo, filename) return resolved_file if os.path.isfile(resolved_file) else None else: resolved_file = hf_bucket_url(path_or_repo, filename=filename, revision=revision, mirror=None) try: # Load from URL or cache if already cached resolved_file = cached_path( resolved_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, ) except RepositoryNotFoundError as err: logger.error(err) raise EnvironmentError( f"{path_or_repo} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to " "pass a token having permission to this repo with `use_auth_token` or log in with " "`huggingface-cli login` and pass `use_auth_token=True`." ) except RevisionNotFoundError as err: logger.error(err) raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists " "for this model name. Check the model page at " f"'https://huggingface.co/{path_or_repo}' for available revisions." ) except EnvironmentError: # The repo and revision exist, but the file does not or there was a connection error fetching it. return None return resolved_file def has_file( path_or_repo: Union[str, os.PathLike], filename: str, revision: Optional[str] = None, mirror: Optional[str] = None, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, ): """ Checks if a repo contains a given file wihtout downloading it. Works for remote repos and local folders. <Tip warning={false}> This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for this repo, but will return False for regular connection errors. </Tip> """ if os.path.isdir(path_or_repo): return os.path.isfile(os.path.join(path_or_repo, filename)) url = hf_bucket_url(path_or_repo, filename=filename, revision=revision, mirror=mirror) headers = {"user-agent": http_user_agent()} if isinstance(use_auth_token, str): headers["authorization"] = f"Bearer {use_auth_token}" elif use_auth_token: token = HfFolder.get_token() if token is None: raise EnvironmentError("You specified use_auth_token=True, but a huggingface token was not found.") headers["authorization"] = f"Bearer {token}" r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=10) try: _raise_for_status(r) return True except RepositoryNotFoundError as e: logger.error(e) raise EnvironmentError(f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.") except RevisionNotFoundError as e: logger.error(e) raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this " "model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions." ) except requests.HTTPError: # We return false for EntryNotFoundError (logical) as well as any connection error. return False def get_list_of_files( path_or_repo: Union[str, os.PathLike], revision: Optional[str] = None, use_auth_token: Optional[Union[bool, str]] = None, local_files_only: bool = False, ) -> List[str]: """ Gets the list of files inside `path_or_repo`. Args: path_or_repo (`str` or `os.PathLike`): Can be either the id of a repo on huggingface.co or a path to a *directory*. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). local_files_only (`bool`, *optional*, defaults to `False`): Whether or not to only rely on local files and not to attempt to download any files. <Tip warning={true}> This API is not optimized, so calling it a lot may result in connection errors. </Tip> Returns: `List[str]`: The list of files available in `path_or_repo`. """ path_or_repo = str(path_or_repo) # If path_or_repo is a folder, we just return what is inside (subdirectories included). if os.path.isdir(path_or_repo): list_of_files = [] for path, dir_names, file_names in os.walk(path_or_repo): list_of_files.extend([os.path.join(path, f) for f in file_names]) return list_of_files # Can't grab the files if we are on offline mode. if is_offline_mode() or local_files_only: return [] # Otherwise we grab the token and use the list_repo_files method. if isinstance(use_auth_token, str): token = use_auth_token elif use_auth_token is True: token = HfFolder.get_token() else: token = None try: return list_repo_files(path_or_repo, revision=revision, token=token) except HTTPError as e: raise ValueError( f"{path_or_repo} is not a local path or a model identifier on the model Hub. Did you make a typo?" ) from e class cached_property(property): """ Descriptor that mimics @property but caches output in member variable. From tensorflow_datasets Built-in in functools from Python 3.8. """ def __get__(self, obj, objtype=None): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") attr = "__cached_" + self.fget.__name__ cached = getattr(obj, attr, None) if cached is None: cached = self.fget(obj) setattr(obj, attr, cached) return cached def torch_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_torch_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires PyTorch.") return wrapper def tf_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_tf_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires TF.") return wrapper def is_torch_fx_proxy(x): if is_torch_fx_available(): import torch.fx return isinstance(x, torch.fx.Proxy) return False def is_tensor(x): """ Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray` or `np.ndarray`. """ if is_torch_fx_proxy(x): return True if is_torch_available(): import torch if isinstance(x, torch.Tensor): return True if is_tf_available(): import tensorflow as tf if isinstance(x, tf.Tensor): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(x, (jnp.ndarray, Tracer)): return True return isinstance(x, np.ndarray) def _is_numpy(x): return isinstance(x, np.ndarray) def _is_torch(x): import torch return isinstance(x, torch.Tensor) def _is_torch_device(x): import torch return isinstance(x, torch.device) def _is_tensorflow(x): import tensorflow as tf return isinstance(x, tf.Tensor) def _is_jax(x): import jax.numpy as jnp # noqa: F811 return isinstance(x, jnp.ndarray) def to_py_obj(obj): """ Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list. """ if isinstance(obj, (dict, UserDict)): return {k: to_py_obj(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return [to_py_obj(o) for o in obj] elif is_tf_available() and _is_tensorflow(obj): return obj.numpy().tolist() elif is_torch_available() and _is_torch(obj): return obj.detach().cpu().tolist() elif is_flax_available() and _is_jax(obj): return np.asarray(obj).tolist() elif isinstance(obj, (np.ndarray, np.number)): # tolist also works on 0d np arrays return obj.tolist() else: return obj def to_numpy(obj): """ Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array. """ if isinstance(obj, (dict, UserDict)): return {k: to_numpy(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return np.array(obj) elif is_tf_available() and _is_tensorflow(obj): return obj.numpy() elif is_torch_available() and _is_torch(obj): return obj.detach().cpu().numpy() elif is_flax_available() and _is_jax(obj): return np.asarray(obj) else: return obj class ModelOutput(OrderedDict): """ Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular python dictionary. <Tip warning={true}> You can't unpack a `ModelOutput` directly. Use the [`~file_utils.ModelOutput.to_tuple`] method to convert it to a tuple before. </Tip> """ def __post_init__(self): class_fields = fields(self) # Safety and consistency checks if not len(class_fields): raise ValueError(f"{self.__class__.__name__} has no fields.") if not all(field.default is None for field in class_fields[1:]): raise ValueError(f"{self.__class__.__name__} should not have more than one required field.") first_field = getattr(self, class_fields[0].name) other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(first_field): if isinstance(first_field, dict): iterator = first_field.items() first_field_iterator = True else: try: iterator = iter(first_field) first_field_iterator = True except TypeError: first_field_iterator = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for element in iterator: if ( not isinstance(element, (list, tuple)) or not len(element) == 2 or not isinstance(element[0], str) ): break setattr(self, element[0], element[1]) if element[1] is not None: self[element[0]] = element[1] elif first_field is not None: self[class_fields[0].name] = first_field else: for field in class_fields: v = getattr(self, field.name) if v is not None: self[field.name] = v def __delitem__(self, *args, **kwargs): raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def setdefault(self, *args, **kwargs): raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def pop(self, *args, **kwargs): raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def update(self, *args, **kwargs): raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__(self, k): if isinstance(k, str): inner_dict = {k: v for (k, v) in self.items()} return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self, name, value): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(name, value) super().__setattr__(name, value) def __setitem__(self, key, value): # Will raise a KeyException if needed super().__setitem__(key, value) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(key, value) def to_tuple(self) -> Tuple[Any]: """ Convert self to a tuple containing all the attributes/keys that are not `None`. """ return tuple(self[k] for k in self.keys()) class ExplicitEnum(Enum): """ Enum with more explicit error message for missing values. """ @classmethod def _missing_(cls, value): raise ValueError( f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}" ) class PaddingStrategy(ExplicitEnum): """ Possible values for the `padding` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an IDE. """ LONGEST = "longest" MAX_LENGTH = "max_length" DO_NOT_PAD = "do_not_pad" class TensorType(ExplicitEnum): """ Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an IDE. """ PYTORCH = "pt" TENSORFLOW = "tf" NUMPY = "np" JAX = "jax" class _LazyModule(ModuleType): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. """ # Very heavily inspired by optuna.integration._IntegrationModule # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): super().__init__(name) self._modules = set(import_structure.keys()) self._class_to_module = {} for key, values in import_structure.items(): for value in values: self._class_to_module[value] = key # Needed for autocompletion in an IDE self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) self.__file__ = module_file self.__spec__ = module_spec self.__path__ = [os.path.dirname(module_file)] self._objects = {} if extra_objects is None else extra_objects self._name = name self._import_structure = import_structure # Needed for autocompletion in an IDE def __dir__(self): result = super().__dir__() # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. for attr in self.__all__: if attr not in result: result.append(attr) return result def __getattr__(self, name: str) -> Any: if name in self._objects: return self._objects[name] if name in self._modules: value = self._get_module(name) elif name in self._class_to_module.keys(): module = self._get_module(self._class_to_module[name]) value = getattr(module, name) else: raise AttributeError(f"module {self.__name__} has no attribute {name}") setattr(self, name, value) return value def _get_module(self, module_name: str): try: return importlib.import_module("." + module_name, self.__name__) except Exception as e: raise RuntimeError( f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its traceback):\n{e}" ) from e def __reduce__(self): return (self.__class__, (self._name, self.__file__, self._import_structure)) def copy_func(f): """Returns a copy of a function f.""" # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g def is_local_clone(repo_path, repo_url): """ Checks if the folder in `repo_path` is a local clone of `repo_url`. """ # First double-check that `repo_path` is a git repo if not os.path.exists(os.path.join(repo_path, ".git")): return False test_git = subprocess.run("git branch".split(), cwd=repo_path) if test_git.returncode != 0: return False # Then look at its remotes remotes = subprocess.run( "git remote -v".split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, check=True, encoding="utf-8", cwd=repo_path, ).stdout return repo_url in remotes.split() class PushToHubMixin: """ A Mixin containing the functionality to push a model or tokenizer to the hub. """ def push_to_hub( self, repo_path_or_name: Optional[str] = None, repo_url: Optional[str] = None, use_temp_dir: bool = False, commit_message: Optional[str] = None, organization: Optional[str] = None, private: Optional[bool] = None, use_auth_token: Optional[Union[bool, str]] = None, **model_card_kwargs ) -> str: """ Upload the {object_files} to the 🤗 Model Hub while synchronizing a local clone of the repo in `repo_path_or_name`. Parameters: repo_path_or_name (`str`, *optional*): Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case the repository will have the name of that local folder). If not specified, will default to the name given by `repo_url` and a local directory with that name will be created. repo_url (`str`, *optional*): Specify this in case you want to push to an existing repository in the hub. If unspecified, a new repository will be created in your namespace (unless you specify an `organization`) with `repo_name`. use_temp_dir (`bool`, *optional*, defaults to `False`): Whether or not to clone the distant repo in a temporary directory or in `repo_path_or_name` inside the current working directory. This will slow things down if you are making changes in an existing repo since you will need to clone the repo before every push. commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"add {object}"`. organization (`str`, *optional*): Organization in which you want to push your {object} (you must be a member of this organization). private (`bool`, *optional*): Whether or not the repository created should be private (requires a paying subscription). use_auth_token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` is not specified. Returns: `str`: The url of the commit of your {object} in the given repository. Examples: ```python from transformers import {object_class} {object} = {object_class}.from_pretrained("bert-base-cased") # Push the {object} to your namespace with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. {object}.push_to_hub("my-finetuned-bert") # Push the {object} to your namespace with the name "my-finetuned-bert" with no local clone. {object}.push_to_hub("my-finetuned-bert", use_temp_dir=True) # Push the {object} to an organization with the name "my-finetuned-bert" and have a local clone in the # *my-finetuned-bert* folder. {object}.push_to_hub("my-finetuned-bert", organization="huggingface") # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*. {object}.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert") ``` """ if use_temp_dir: # Make sure we use the right `repo_name` for the `repo_url` before replacing it. if repo_url is None: if use_auth_token is None: use_auth_token = True repo_name = Path(repo_path_or_name).name repo_url = self._get_repo_url_from_name( repo_name, organization=organization, private=private, use_auth_token=use_auth_token ) repo_path_or_name = tempfile.mkdtemp() # Create or clone the repo. If the repo is already cloned, this just retrieves the path to the repo. repo = self._create_or_get_repo( repo_path_or_name=repo_path_or_name, repo_url=repo_url, organization=organization, private=private, use_auth_token=use_auth_token, ) # Save the files in the cloned repo self.save_pretrained(repo_path_or_name) if hasattr(self, "history") and hasattr(self, "create_model_card"): # This is a Keras model and we might be able to fish out its History and make a model card out of it base_model_card_args = { "output_dir": repo_path_or_name, "model_name": Path(repo_path_or_name).name, } base_model_card_args.update(model_card_kwargs) self.create_model_card(**base_model_card_args) # Commit and push! url = self._push_to_hub(repo, commit_message=commit_message) # Clean up! Clean up! Everybody everywhere! if use_temp_dir: shutil.rmtree(repo_path_or_name) return url @staticmethod def _get_repo_url_from_name( repo_name: str, organization: Optional[str] = None, private: bool = None, use_auth_token: Optional[Union[bool, str]] = None, ) -> str: if isinstance(use_auth_token, str): token = use_auth_token elif use_auth_token: token = HfFolder.get_token() if token is None: raise ValueError( "You must login to the Hugging Face hub on this computer by typing `transformers-cli login` and " "entering your credentials to use `use_auth_token=True`. Alternatively, you can pass your own " "token as the `use_auth_token` argument." ) else: token = None # Special provision for the test endpoint (CI) return create_repo( token, repo_name, organization=organization, private=private, repo_type=None, exist_ok=True, ) @classmethod def _create_or_get_repo( cls, repo_path_or_name: Optional[str] = None, repo_url: Optional[str] = None, organization: Optional[str] = None, private: bool = None, use_auth_token: Optional[Union[bool, str]] = None, ) -> Repository: if repo_path_or_name is None and repo_url is None: raise ValueError("You need to specify a `repo_path_or_name` or a `repo_url`.") if use_auth_token is None and repo_url is None: use_auth_token = True if repo_path_or_name is None: repo_path_or_name = repo_url.split("/")[-1] if repo_url is None and not os.path.exists(repo_path_or_name): repo_name = Path(repo_path_or_name).name repo_url = cls._get_repo_url_from_name( repo_name, organization=organization, private=private, use_auth_token=use_auth_token ) # Create a working directory if it does not exist. if not os.path.exists(repo_path_or_name): os.makedirs(repo_path_or_name) repo = Repository(repo_path_or_name, clone_from=repo_url, use_auth_token=use_auth_token) repo.git_pull() return repo @classmethod def _push_to_hub(cls, repo: Repository, commit_message: Optional[str] = None) -> str: if commit_message is None: if "Tokenizer" in cls.__name__: commit_message = "add tokenizer" elif "Config" in cls.__name__: commit_message = "add config" else: commit_message = "add model" return repo.push_to_hub(commit_message=commit_message) def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}" class ContextManagers: """ Wrapper for `contextlib.ExitStack` which enters a collection of context managers. Adaptation of `ContextManagers` in the `fastcore` library. """ def __init__(self, context_managers: List[ContextManager]): self.context_managers = context_managers self.stack = ExitStack() def __enter__(self): for context_manager in self.context_managers: self.stack.enter_context(context_manager) def __exit__(self, *args, **kwargs): self.stack.__exit__(*args, **kwargs)
py
1a43881335ec703441aa55748a9410a7f25adad3
from pathlib import Path import moderngl_window from moderngl_window import geometry class Gradient(moderngl_window.WindowConfig): title = "Gradient" resource_dir = (Path(__file__) / "../resources").absolute() aspect_ratio = None window_size = 720, 720 resizable = False samples = 16 def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.quad_fs = geometry.quad_fs() self.gradient_program = self.load_program("gradient_shader.glsl") self.gradient_program["wnd_size"].value = self.wnd.buffer_size # change this if you want it to go faster/slower self.gradient_program["speed"].value = 7.5 def render(self, time: float, frame_time: float) -> None: self.gradient_program["time"].value = time self.quad_fs.render(self.gradient_program) if __name__ == "__main__": Gradient.run()
py
1a43884e8d88b3ee2325e60b4d887fdae0d30698
from __future__ import unicode_literals import re import sys import six from botocore.awsrequest import AWSPreparedRequest from moto.core.utils import ( str_to_rfc_1123_datetime, py2_strip_unicode_keys, unix_time_millis, ) from six.moves.urllib.parse import parse_qs, urlparse, unquote, parse_qsl import xmltodict from moto.packages.httpretty.core import HTTPrettyRequest from moto.core.responses import _TemplateEnvironmentMixin, ActionAuthenticatorMixin from moto.core.utils import path_url from moto.core import ACCOUNT_ID from moto.s3bucket_path.utils import ( bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys, ) from .exceptions import ( BucketAlreadyExists, DuplicateTagKeys, InvalidContinuationToken, S3ClientError, MissingBucket, MissingKey, MissingVersion, InvalidPartOrder, MalformedXML, MalformedACLError, IllegalLocationConstraintException, InvalidNotificationARN, InvalidNotificationEvent, ObjectNotInActiveTierError, NoSystemTags, PreconditionFailed, InvalidRange, ) from .models import ( s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, ) from .utils import ( bucket_name_from_url, clean_key_name, undo_clean_key_name, metadata_from_headers, parse_region_from_url, ) from xml.dom import minidom DEFAULT_REGION_NAME = "us-east-1" ACTION_MAP = { "BUCKET": { "GET": { "uploads": "ListBucketMultipartUploads", "location": "GetBucketLocation", "lifecycle": "GetLifecycleConfiguration", "versioning": "GetBucketVersioning", "policy": "GetBucketPolicy", "website": "GetBucketWebsite", "acl": "GetBucketAcl", "tagging": "GetBucketTagging", "logging": "GetBucketLogging", "cors": "GetBucketCORS", "notification": "GetBucketNotification", "accelerate": "GetAccelerateConfiguration", "versions": "ListBucketVersions", "public_access_block": "GetPublicAccessBlock", "DEFAULT": "ListBucket", }, "PUT": { "lifecycle": "PutLifecycleConfiguration", "versioning": "PutBucketVersioning", "policy": "PutBucketPolicy", "website": "PutBucketWebsite", "acl": "PutBucketAcl", "tagging": "PutBucketTagging", "logging": "PutBucketLogging", "cors": "PutBucketCORS", "notification": "PutBucketNotification", "accelerate": "PutAccelerateConfiguration", "public_access_block": "PutPublicAccessBlock", "DEFAULT": "CreateBucket", }, "DELETE": { "lifecycle": "PutLifecycleConfiguration", "policy": "DeleteBucketPolicy", "tagging": "PutBucketTagging", "cors": "PutBucketCORS", "public_access_block": "DeletePublicAccessBlock", "DEFAULT": "DeleteBucket", }, }, "KEY": { "GET": { "uploadId": "ListMultipartUploadParts", "acl": "GetObjectAcl", "tagging": "GetObjectTagging", "versionId": "GetObjectVersion", "DEFAULT": "GetObject", }, "PUT": { "acl": "PutObjectAcl", "tagging": "PutObjectTagging", "DEFAULT": "PutObject", }, "DELETE": { "uploadId": "AbortMultipartUpload", "versionId": "DeleteObjectVersion", "DEFAULT": " DeleteObject", }, "POST": { "uploads": "PutObject", "restore": "RestoreObject", "uploadId": "PutObject", }, }, "CONTROL": { "GET": {"publicAccessBlock": "GetPublicAccessBlock"}, "PUT": {"publicAccessBlock": "PutPublicAccessBlock"}, "DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"}, }, } def parse_key_name(pth): # strip the first '/' left by urlparse return pth[1:] if pth.startswith("/") else pth def is_delete_keys(request, path, bucket_name): # GOlang sends a request as url/?delete= (treating it as a normal key=value, even if the value is empty) # Python sends a request as url/?delete (treating it as a flag) # https://github.com/spulec/moto/issues/2937 return ( path == "/?delete" or path == "/?delete=" or (path == "/" and getattr(request, "query_string", "") == "delete") ) class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): def __init__(self, backend): super(ResponseObject, self).__init__() self.backend = backend self.method = "" self.path = "" self.data = {} self.headers = {} @property def should_autoescape(self): return True def all_buckets(self): self.data["Action"] = "ListAllMyBuckets" self._authenticate_and_authorize_s3_action() # No bucket specified. Listing all buckets all_buckets = self.backend.get_all_buckets() template = self.response_template(S3_ALL_BUCKETS) return template.render(buckets=all_buckets) def subdomain_based_buckets(self, request): host = request.headers.get("host", request.headers.get("Host")) if not host: host = urlparse(request.url).netloc if ( not host or host.startswith("localhost") or host.startswith("localstack") or re.match(r"^[^.]+$", host) or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host) ): # Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev), # (3) local host names that do not contain a "." (e.g., Docker container host names), or # (4) kubernetes host names return False match = re.match(r"^([^\[\]:]+)(:\d+)?$", host) if match: match = re.match( r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}", match.groups()[0] ) if match: return False match = re.match(r"^\[(.+)\](:\d+)?$", host) if match: match = re.match( r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z", match.groups()[0], re.IGNORECASE, ) if match: return False path_based = host == "s3.amazonaws.com" or re.match( r"s3[\.\-]([^.]*)\.amazonaws\.com", host ) return not path_based def is_delete_keys(self, request, path, bucket_name): if self.subdomain_based_buckets(request): return is_delete_keys(request, path, bucket_name) else: return bucketpath_is_delete_keys(request, path, bucket_name) def parse_bucket_name_from_url(self, request, url): if self.subdomain_based_buckets(request): return bucket_name_from_url(url) else: return bucketpath_bucket_name_from_url(url) def parse_key_name(self, request, url): if self.subdomain_based_buckets(request): return parse_key_name(url) else: return bucketpath_parse_key_name(url) def ambiguous_response(self, request, full_url, headers): # Depending on which calling format the client is using, we don't know # if this is a bucket or key request so we have to check if self.subdomain_based_buckets(request): return self.key_or_control_response(request, full_url, headers) else: # Using path-based buckets return self.bucket_response(request, full_url, headers) def bucket_response(self, request, full_url, headers): self.method = request.method self.path = self._get_path(request) self.headers = request.headers if "host" not in self.headers: self.headers["host"] = urlparse(full_url).netloc try: response = self._bucket_response(request, full_url, headers) except S3ClientError as s3error: response = s3error.code, {}, s3error.description return self._send_response(response) @staticmethod def _send_response(response): if isinstance(response, six.string_types): return 200, {}, response.encode("utf-8") else: status_code, headers, response_content = response if not isinstance(response_content, six.binary_type): response_content = response_content.encode("utf-8") return status_code, headers, response_content def _bucket_response(self, request, full_url, headers): querystring = self._get_querystring(full_url) method = request.method region_name = parse_region_from_url(full_url) bucket_name = self.parse_bucket_name_from_url(request, full_url) if not bucket_name: # If no bucket specified, list all buckets return self.all_buckets() self.data["BucketName"] = bucket_name if hasattr(request, "body"): # Boto body = request.body else: # Flask server body = request.data if body is None: body = b"" if isinstance(body, six.binary_type): body = body.decode("utf-8") body = "{0}".format(body).encode("utf-8") if method == "HEAD": return self._bucket_response_head(bucket_name) elif method == "GET": return self._bucket_response_get(bucket_name, querystring) elif method == "PUT": return self._bucket_response_put( request, body, region_name, bucket_name, querystring ) elif method == "DELETE": return self._bucket_response_delete(body, bucket_name, querystring) elif method == "POST": return self._bucket_response_post(request, body, bucket_name) else: raise NotImplementedError( "Method {0} has not been implemented in the S3 backend yet".format( method ) ) @staticmethod def _get_querystring(full_url): parsed_url = urlparse(full_url) querystring = parse_qs(parsed_url.query, keep_blank_values=True) return querystring def _bucket_response_head(self, bucket_name): try: self.backend.get_bucket(bucket_name) except MissingBucket: # Unless we do this, boto3 does not raise ClientError on # HEAD (which the real API responds with), and instead # raises NoSuchBucket, leading to inconsistency in # error response between real and mocked responses. return 404, {}, "" return 200, {}, "" def _bucket_response_get(self, bucket_name, querystring): self._set_action("BUCKET", "GET", querystring) self._authenticate_and_authorize_s3_action() if "uploads" in querystring: for unsup in ("delimiter", "max-uploads"): if unsup in querystring: raise NotImplementedError( "Listing multipart uploads with {} has not been implemented yet.".format( unsup ) ) multiparts = list(self.backend.get_all_multiparts(bucket_name).values()) if "prefix" in querystring: prefix = querystring.get("prefix", [None])[0] multiparts = [ upload for upload in multiparts if upload.key_name.startswith(prefix) ] template = self.response_template(S3_ALL_MULTIPARTS) return template.render(bucket_name=bucket_name, uploads=multiparts) elif "location" in querystring: bucket = self.backend.get_bucket(bucket_name) template = self.response_template(S3_BUCKET_LOCATION) location = bucket.location # us-east-1 is different - returns a None location if location == DEFAULT_REGION_NAME: location = None return template.render(location=location) elif "lifecycle" in querystring: bucket = self.backend.get_bucket(bucket_name) if not bucket.rules: template = self.response_template(S3_NO_LIFECYCLE) return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION) return template.render(rules=bucket.rules) elif "versioning" in querystring: versioning = self.backend.get_bucket_versioning(bucket_name) template = self.response_template(S3_BUCKET_GET_VERSIONING) return template.render(status=versioning) elif "policy" in querystring: policy = self.backend.get_bucket_policy(bucket_name) if not policy: template = self.response_template(S3_NO_POLICY) return 404, {}, template.render(bucket_name=bucket_name) return 200, {}, policy elif "website" in querystring: website_configuration = self.backend.get_bucket_website_configuration( bucket_name ) if not website_configuration: template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG) return 404, {}, template.render(bucket_name=bucket_name) return 200, {}, website_configuration elif "acl" in querystring: bucket = self.backend.get_bucket(bucket_name) template = self.response_template(S3_OBJECT_ACL_RESPONSE) return template.render(obj=bucket) elif "tagging" in querystring: tags = self.backend.get_bucket_tagging(bucket_name)["Tags"] # "Special Error" if no tags: if len(tags) == 0: template = self.response_template(S3_NO_BUCKET_TAGGING) return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_OBJECT_TAGGING_RESPONSE) return template.render(tags=tags) elif "logging" in querystring: logging = self.backend.get_bucket_logging(bucket_name) if not logging: template = self.response_template(S3_NO_LOGGING_CONFIG) return 200, {}, template.render() template = self.response_template(S3_LOGGING_CONFIG) return 200, {}, template.render(logging=logging) elif "cors" in querystring: cors = self.backend.get_bucket_cors(bucket_name) if len(cors) == 0: template = self.response_template(S3_NO_CORS_CONFIG) return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_CORS_RESPONSE) return template.render(cors=cors) elif "notification" in querystring: notification_configuration = self.backend.get_bucket_notification_configuration( bucket_name ) if not notification_configuration: return 200, {}, "" template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) return template.render(config=notification_configuration) elif "accelerate" in querystring: bucket = self.backend.get_bucket(bucket_name) if bucket.accelerate_configuration is None: template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET) return 200, {}, template.render() template = self.response_template(S3_BUCKET_ACCELERATE) return template.render(bucket=bucket) elif "publicAccessBlock" in querystring: public_block_config = self.backend.get_bucket_public_access_block( bucket_name ) template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION) return template.render(public_block_config=public_block_config) elif "versions" in querystring: delimiter = querystring.get("delimiter", [None])[0] encoding_type = querystring.get("encoding-type", [None])[0] key_marker = querystring.get("key-marker", [None])[0] max_keys = querystring.get("max-keys", [None])[0] prefix = querystring.get("prefix", [""])[0] version_id_marker = querystring.get("version-id-marker", [None])[0] bucket = self.backend.get_bucket(bucket_name) versions = self.backend.get_bucket_versions( bucket_name, delimiter=delimiter, encoding_type=encoding_type, key_marker=key_marker, max_keys=max_keys, version_id_marker=version_id_marker, prefix=prefix, ) latest_versions = self.backend.get_bucket_latest_versions( bucket_name=bucket_name ) key_list = [] delete_marker_list = [] for version in versions: if isinstance(version, FakeKey): key_list.append(version) else: delete_marker_list.append(version) template = self.response_template(S3_BUCKET_GET_VERSIONS) key_list.sort(key=lambda r: (r.name, -unix_time_millis(r.last_modified))) return ( 200, {}, template.render( key_list=key_list, delete_marker_list=delete_marker_list, latest_versions=latest_versions, bucket=bucket, prefix="", max_keys=1000, delimiter="", is_truncated="false", ), ) elif "encryption" in querystring: encryption = self.backend.get_bucket_encryption(bucket_name) if not encryption: template = self.response_template(S3_NO_ENCRYPTION) return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_ENCRYPTION_CONFIG) return 200, {}, template.render(encryption=encryption) elif querystring.get("list-type", [None])[0] == "2": return 200, {}, self._handle_list_objects_v2(bucket_name, querystring) bucket = self.backend.get_bucket(bucket_name) prefix = querystring.get("prefix", [None])[0] if prefix and isinstance(prefix, six.binary_type): prefix = prefix.decode("utf-8") delimiter = querystring.get("delimiter", [None])[0] max_keys = int(querystring.get("max-keys", [1000])[0]) marker = querystring.get("marker", [None])[0] result_keys, result_folders = self.backend.prefix_query( bucket, prefix, delimiter ) if marker: result_keys = self._get_results_from_token(result_keys, marker) result_keys, is_truncated, next_marker = self._truncate_result( result_keys, max_keys ) template = self.response_template(S3_BUCKET_GET_RESPONSE) return ( 200, {}, template.render( bucket=bucket, prefix=prefix, delimiter=delimiter, result_keys=result_keys, result_folders=result_folders, is_truncated=is_truncated, next_marker=next_marker, max_keys=max_keys, ), ) def _set_action(self, action_resource_type, method, querystring): action_set = False for action_in_querystring, action in ACTION_MAP[action_resource_type][ method ].items(): if action_in_querystring in querystring: self.data["Action"] = action action_set = True if not action_set: self.data["Action"] = ACTION_MAP[action_resource_type][method]["DEFAULT"] def _handle_list_objects_v2(self, bucket_name, querystring): template = self.response_template(S3_BUCKET_GET_RESPONSE_V2) bucket = self.backend.get_bucket(bucket_name) continuation_token = querystring.get("continuation-token", [None])[0] if continuation_token is not None and continuation_token == "": raise InvalidContinuationToken() prefix = querystring.get("prefix", [None])[0] if prefix and isinstance(prefix, six.binary_type): prefix = prefix.decode("utf-8") delimiter = querystring.get("delimiter", [None])[0] result_keys, result_folders = self.backend.prefix_query( bucket, prefix, delimiter ) fetch_owner = querystring.get("fetch-owner", [False])[0] max_keys = int(querystring.get("max-keys", [1000])[0]) start_after = querystring.get("start-after", [None])[0] # sort the combination of folders and keys into lexicographical order all_keys = result_keys + result_folders all_keys.sort(key=self._get_name) if continuation_token or start_after: limit = continuation_token or start_after all_keys = self._get_results_from_token(all_keys, limit) truncated_keys, is_truncated, next_continuation_token = self._truncate_result( all_keys, max_keys ) result_keys, result_folders = self._split_truncated_keys(truncated_keys) key_count = len(result_keys) + len(result_folders) return template.render( bucket=bucket, prefix=prefix or "", delimiter=delimiter, key_count=key_count, result_keys=result_keys, result_folders=result_folders, fetch_owner=fetch_owner, max_keys=max_keys, is_truncated=is_truncated, next_continuation_token=next_continuation_token, start_after=None if continuation_token else start_after, ) @staticmethod def _get_name(key): if isinstance(key, FakeKey): return key.name else: return key @staticmethod def _split_truncated_keys(truncated_keys): result_keys = [] result_folders = [] for key in truncated_keys: if isinstance(key, FakeKey): result_keys.append(key) else: result_folders.append(key) return result_keys, result_folders def _get_results_from_token(self, result_keys, token): continuation_index = 0 for key in result_keys: if (key.name if isinstance(key, FakeKey) else key) > token: break continuation_index += 1 return result_keys[continuation_index:] def _truncate_result(self, result_keys, max_keys): if len(result_keys) > max_keys: is_truncated = "true" result_keys = result_keys[:max_keys] item = result_keys[-1] next_continuation_token = item.name if isinstance(item, FakeKey) else item else: is_truncated = "false" next_continuation_token = None return result_keys, is_truncated, next_continuation_token def _body_contains_location_constraint(self, body): if body: try: xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"] return True except KeyError: pass return False def _create_bucket_configuration_is_empty(self, body): if body: try: create_bucket_configuration = xmltodict.parse(body)[ "CreateBucketConfiguration" ] del create_bucket_configuration["@xmlns"] if len(create_bucket_configuration) == 0: return True except KeyError: pass return False def _parse_pab_config(self, body): parsed_xml = xmltodict.parse(body) parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None) # If Python 2, fix the unicode strings: if sys.version_info[0] < 3: parsed_xml = { "PublicAccessBlockConfiguration": py2_strip_unicode_keys( dict(parsed_xml["PublicAccessBlockConfiguration"]) ) } return parsed_xml def _bucket_response_put( self, request, body, region_name, bucket_name, querystring ): if not request.headers.get("Content-Length"): return 411, {}, "Content-Length required" self._set_action("BUCKET", "PUT", querystring) self._authenticate_and_authorize_s3_action() if "versioning" in querystring: ver = re.search("<Status>([A-Za-z]+)</Status>", body.decode()) if ver: self.backend.set_bucket_versioning(bucket_name, ver.group(1)) template = self.response_template(S3_BUCKET_VERSIONING) return template.render(bucket_versioning_status=ver.group(1)) else: return 404, {}, "" elif "lifecycle" in querystring: rules = xmltodict.parse(body)["LifecycleConfiguration"]["Rule"] if not isinstance(rules, list): # If there is only one rule, xmldict returns just the item rules = [rules] self.backend.set_bucket_lifecycle(bucket_name, rules) return "" elif "policy" in querystring: self.backend.set_bucket_policy(bucket_name, body) return "True" elif "acl" in querystring: # Headers are first. If not set, then look at the body (consistent with the documentation): acls = self._acl_from_headers(request.headers) if not acls: acls = self._acl_from_xml(body) self.backend.set_bucket_acl(bucket_name, acls) return "" elif "tagging" in querystring: tagging = self._bucket_tagging_from_xml(body) self.backend.put_bucket_tagging(bucket_name, tagging) return "" elif "website" in querystring: self.backend.set_bucket_website_configuration(bucket_name, body) return "" elif "cors" in querystring: try: self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body)) return "" except KeyError: raise MalformedXML() elif "logging" in querystring: try: self.backend.put_bucket_logging( bucket_name, self._logging_from_xml(body) ) return "" except KeyError: raise MalformedXML() elif "notification" in querystring: try: self.backend.put_bucket_notification_configuration( bucket_name, self._notification_config_from_xml(body) ) return "" except KeyError: raise MalformedXML() except Exception as e: raise e elif "accelerate" in querystring: try: accelerate_status = self._accelerate_config_from_xml(body) self.backend.put_bucket_accelerate_configuration( bucket_name, accelerate_status ) return "" except KeyError: raise MalformedXML() except Exception as e: raise e elif "publicAccessBlock" in querystring: pab_config = self._parse_pab_config(body) self.backend.put_bucket_public_access_block( bucket_name, pab_config["PublicAccessBlockConfiguration"] ) return "" elif "encryption" in querystring: try: self.backend.put_bucket_encryption( bucket_name, self._encryption_config_from_xml(body) ) return "" except KeyError: raise MalformedXML() except Exception as e: raise e else: # us-east-1, the default AWS region behaves a bit differently # - you should not use it as a location constraint --> it fails # - querying the location constraint returns None # - LocationConstraint has to be specified if outside us-east-1 if ( region_name != DEFAULT_REGION_NAME and not self._body_contains_location_constraint(body) ): raise IllegalLocationConstraintException() if body: if self._create_bucket_configuration_is_empty(body): raise MalformedXML() try: forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][ "LocationConstraint" ] if forced_region == DEFAULT_REGION_NAME: raise S3ClientError( "InvalidLocationConstraint", "The specified location-constraint is not valid", ) else: region_name = forced_region except KeyError: pass try: new_bucket = self.backend.create_bucket(bucket_name, region_name) except BucketAlreadyExists: if region_name == DEFAULT_REGION_NAME: # us-east-1 has different behavior new_bucket = self.backend.get_bucket(bucket_name) else: raise if "x-amz-acl" in request.headers: # TODO: Support the XML-based ACL format self.backend.set_bucket_acl( bucket_name, self._acl_from_headers(request.headers) ) template = self.response_template(S3_BUCKET_CREATE_RESPONSE) return 200, {}, template.render(bucket=new_bucket) def _bucket_response_delete(self, body, bucket_name, querystring): self._set_action("BUCKET", "DELETE", querystring) self._authenticate_and_authorize_s3_action() if "policy" in querystring: self.backend.delete_bucket_policy(bucket_name, body) return 204, {}, "" elif "tagging" in querystring: self.backend.delete_bucket_tagging(bucket_name) return 204, {}, "" elif "cors" in querystring: self.backend.delete_bucket_cors(bucket_name) return 204, {}, "" elif "lifecycle" in querystring: bucket = self.backend.get_bucket(bucket_name) bucket.delete_lifecycle() return 204, {}, "" elif "publicAccessBlock" in querystring: self.backend.delete_bucket_public_access_block(bucket_name) return 204, {}, "" elif "encryption" in querystring: bucket = self.backend.delete_bucket_encryption(bucket_name) return 204, {}, "" removed_bucket = self.backend.delete_bucket(bucket_name) if removed_bucket: # Bucket exists template = self.response_template(S3_DELETE_BUCKET_SUCCESS) return 204, {}, template.render(bucket=removed_bucket) else: # Tried to delete a bucket that still has keys template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR) return 409, {}, template.render(bucket=removed_bucket) def _bucket_response_post(self, request, body, bucket_name): response_headers = {} if not request.headers.get("Content-Length"): return 411, {}, "Content-Length required" path = self._get_path(request) if self.is_delete_keys(request, path, bucket_name): self.data["Action"] = "DeleteObject" self._authenticate_and_authorize_s3_action() return self._bucket_response_delete_keys(request, body, bucket_name) self.data["Action"] = "PutObject" self._authenticate_and_authorize_s3_action() # POST to bucket-url should create file from form if hasattr(request, "form"): # Not HTTPretty form = request.form else: # HTTPretty, build new form object body = body.decode() form = dict(parse_qsl(body)) key = form["key"] if "file" in form: f = form["file"] else: f = request.files["file"].stream.read() if "success_action_redirect" in form: response_headers["Location"] = form["success_action_redirect"] if "success_action_status" in form: status_code = form["success_action_status"] elif "success_action_redirect" in form: status_code = 303 else: status_code = 204 new_key = self.backend.set_object(bucket_name, key, f) if form.get("acl"): acl = get_canned_acl(form.get("acl")) new_key.set_acl(acl) # Metadata metadata = metadata_from_headers(form) new_key.set_metadata(metadata) return status_code, response_headers, "" @staticmethod def _get_path(request): if isinstance(request, HTTPrettyRequest): path = request.path else: path = ( request.full_path if hasattr(request, "full_path") else path_url(request.url) ) return path def _bucket_response_delete_keys(self, request, body, bucket_name): template = self.response_template(S3_DELETE_KEYS_RESPONSE) body_dict = xmltodict.parse(body) objects = body_dict["Delete"].get("Object", []) if not isinstance(objects, list): # We expect a list of objects, but when there is a single <Object> node xmltodict does not # return a list. objects = [objects] if len(objects) == 0: raise MalformedXML() deleted_objects = [] error_names = [] for object_ in objects: key_name = object_["Key"] version_id = object_.get("VersionId", None) success, _ = self.backend.delete_object( bucket_name, undo_clean_key_name(key_name), version_id=version_id ) if success: deleted_objects.append((key_name, version_id)) else: error_names.append(key_name) return ( 200, {}, template.render(deleted=deleted_objects, delete_errors=error_names), ) def _handle_range_header(self, request, headers, response_content): response_headers = {} length = len(response_content) last = length - 1 _, rspec = request.headers.get("range").split("=") if "," in rspec: raise NotImplementedError("Multiple range specifiers not supported") def toint(i): return int(i) if i else None begin, end = map(toint, rspec.split("-")) if begin is not None: # byte range end = last if end is None else min(end, last) elif end is not None: # suffix byte range begin = length - min(end, length) end = last else: return 400, response_headers, "" if begin < 0 or end > last or begin > min(end, last): raise InvalidRange( actual_size=str(length), range_requested=request.headers.get("range") ) response_headers["content-range"] = "bytes {0}-{1}/{2}".format( begin, end, length ) content = response_content[begin : end + 1] response_headers["content-length"] = len(content) return 206, response_headers, content def key_or_control_response(self, request, full_url, headers): # Key and Control are lumped in because splitting out the regex is too much of a pain :/ self.method = request.method self.path = self._get_path(request) self.headers = request.headers if "host" not in self.headers: self.headers["host"] = urlparse(full_url).netloc response_headers = {} try: # Is this an S3 control response? if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url: response = self._control_response(request, full_url, headers) else: response = self._key_response(request, full_url, headers) except S3ClientError as s3error: response = s3error.code, {}, s3error.description if isinstance(response, six.string_types): status_code = 200 response_content = response else: status_code, response_headers, response_content = response if ( status_code == 200 and "range" in request.headers and request.headers["range"] != "" ): try: return self._handle_range_header( request, response_headers, response_content ) except S3ClientError as s3error: return s3error.code, {}, s3error.description return status_code, response_headers, response_content def _control_response(self, request, full_url, headers): parsed_url = urlparse(full_url) query = parse_qs(parsed_url.query, keep_blank_values=True) method = request.method if hasattr(request, "body"): # Boto body = request.body if hasattr(body, "read"): body = body.read() else: # Flask server body = request.data if body is None: body = b"" if method == "GET": return self._control_response_get(request, query, headers) elif method == "PUT": return self._control_response_put(request, body, query, headers) elif method == "DELETE": return self._control_response_delete(request, query, headers) else: raise NotImplementedError( "Method {0} has not been implemented in the S3 backend yet".format( method ) ) def _control_response_get(self, request, query, headers): action = self.path.split("?")[0].split("/")[ -1 ] # Gets the action out of the URL sans query params. self._set_action("CONTROL", "GET", action) self._authenticate_and_authorize_s3_action() response_headers = {} if "publicAccessBlock" in action: public_block_config = self.backend.get_account_public_access_block( headers["x-amz-account-id"] ) template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION) return ( 200, response_headers, template.render(public_block_config=public_block_config), ) raise NotImplementedError( "Method {0} has not been implemented in the S3 backend yet".format(action) ) def _control_response_put(self, request, body, query, headers): action = self.path.split("?")[0].split("/")[ -1 ] # Gets the action out of the URL sans query params. self._set_action("CONTROL", "PUT", action) self._authenticate_and_authorize_s3_action() response_headers = {} if "publicAccessBlock" in action: pab_config = self._parse_pab_config(body) self.backend.put_account_public_access_block( headers["x-amz-account-id"], pab_config["PublicAccessBlockConfiguration"], ) return 200, response_headers, "" raise NotImplementedError( "Method {0} has not been implemented in the S3 backend yet".format(action) ) def _control_response_delete(self, request, query, headers): action = self.path.split("?")[0].split("/")[ -1 ] # Gets the action out of the URL sans query params. self._set_action("CONTROL", "DELETE", action) self._authenticate_and_authorize_s3_action() response_headers = {} if "publicAccessBlock" in action: self.backend.delete_account_public_access_block(headers["x-amz-account-id"]) return 200, response_headers, "" raise NotImplementedError( "Method {0} has not been implemented in the S3 backend yet".format(action) ) def _key_response(self, request, full_url, headers): parsed_url = urlparse(full_url) query = parse_qs(parsed_url.query, keep_blank_values=True) method = request.method key_name = self.parse_key_name(request, parsed_url.path) bucket_name = self.parse_bucket_name_from_url(request, full_url) # Because we patch the requests library the boto/boto3 API # requests go through this method but so do # `requests.get("https://bucket-name.s3.amazonaws.com/file-name")` # Here we deny public access to private files by checking the # ACL and checking for the mere presence of an Authorization # header. if "Authorization" not in request.headers: if hasattr(request, "url"): signed_url = "Signature=" in request.url elif hasattr(request, "requestline"): signed_url = "Signature=" in request.path key = self.backend.get_object(bucket_name, key_name) if key: if not key.acl.public_read and not signed_url: return 403, {}, "" elif signed_url: # coming in from requests.get(s3.generate_presigned_url()) if self._invalid_headers(request.url, dict(request.headers)): return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS if hasattr(request, "body"): # Boto body = request.body if hasattr(body, "read"): body = body.read() else: # Flask server body = request.data # when the data is being passed as a file if request.files and not body: for _, value in request.files.items(): body = value.stream.read() if body is None: body = b"" if method == "GET": return self._key_response_get( bucket_name, query, key_name, headers=request.headers ) elif method == "PUT": return self._key_response_put( request, body, bucket_name, query, key_name, headers ) elif method == "HEAD": return self._key_response_head( bucket_name, query, key_name, headers=request.headers ) elif method == "DELETE": return self._key_response_delete(bucket_name, query, key_name) elif method == "POST": return self._key_response_post(request, body, bucket_name, query, key_name) else: raise NotImplementedError( "Method {0} has not been implemented in the S3 backend yet".format( method ) ) def _key_response_get(self, bucket_name, query, key_name, headers): self._set_action("KEY", "GET", query) self._authenticate_and_authorize_s3_action() response_headers = {} if query.get("uploadId"): upload_id = query["uploadId"][0] parts = self.backend.list_multipart(bucket_name, upload_id) template = self.response_template(S3_MULTIPART_LIST_RESPONSE) return ( 200, response_headers, template.render( bucket_name=bucket_name, key_name=key_name, upload_id=upload_id, count=len(parts), parts=parts, ), ) version_id = query.get("versionId", [None])[0] if_modified_since = headers.get("If-Modified-Since", None) if_match = headers.get("If-Match", None) if_none_match = headers.get("If-None-Match", None) if_unmodified_since = headers.get("If-Unmodified-Since", None) key = self.backend.get_object(bucket_name, key_name, version_id=version_id) if key is None and version_id is None: raise MissingKey(key_name) elif key is None: raise MissingVersion(version_id) if if_unmodified_since: if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since) if key.last_modified > if_unmodified_since: raise PreconditionFailed("If-Unmodified-Since") if if_match and key.etag != if_match: raise PreconditionFailed("If-Match") if if_modified_since: if_modified_since = str_to_rfc_1123_datetime(if_modified_since) if key.last_modified < if_modified_since: return 304, response_headers, "Not Modified" if if_none_match and key.etag == if_none_match: return 304, response_headers, "Not Modified" if "acl" in query: template = self.response_template(S3_OBJECT_ACL_RESPONSE) return 200, response_headers, template.render(obj=key) if "tagging" in query: tags = self.backend.get_key_tags(key)["Tags"] template = self.response_template(S3_OBJECT_TAGGING_RESPONSE) return 200, response_headers, template.render(tags=tags) response_headers.update(key.metadata) response_headers.update(key.response_dict) return 200, response_headers, key.value def _key_response_put(self, request, body, bucket_name, query, key_name, headers): self._set_action("KEY", "PUT", query) self._authenticate_and_authorize_s3_action() response_headers = {} if query.get("uploadId") and query.get("partNumber"): upload_id = query["uploadId"][0] part_number = int(query["partNumber"][0]) if "x-amz-copy-source" in request.headers: src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/") src_bucket, src_key = src.split("/", 1) src_key, src_version_id = ( src_key.split("?versionId=") if "?versionId=" in src_key else (src_key, None) ) src_range = request.headers.get("x-amz-copy-source-range", "").split( "bytes=" )[-1] try: start_byte, end_byte = src_range.split("-") start_byte, end_byte = int(start_byte), int(end_byte) except ValueError: start_byte, end_byte = None, None if self.backend.get_object( src_bucket, src_key, version_id=src_version_id ): key = self.backend.copy_part( bucket_name, upload_id, part_number, src_bucket, src_key, src_version_id, start_byte, end_byte, ) else: return 404, response_headers, "" template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE) response = template.render(part=key) else: key = self.backend.set_part(bucket_name, upload_id, part_number, body) response = "" response_headers.update(key.response_dict) return 200, response_headers, response storage_class = request.headers.get("x-amz-storage-class", "STANDARD") encryption = request.headers.get("x-amz-server-side-encryption", None) kms_key_id = request.headers.get( "x-amz-server-side-encryption-aws-kms-key-id", None ) bucket_key_enabled = request.headers.get( "x-amz-server-side-encryption-bucket-key-enabled", None ) if bucket_key_enabled is not None: bucket_key_enabled = str(bucket_key_enabled).lower() acl = self._acl_from_headers(request.headers) if acl is None: acl = self.backend.get_bucket(bucket_name).acl tagging = self._tagging_from_headers(request.headers) if "acl" in query: key = self.backend.get_object(bucket_name, key_name) # TODO: Support the XML-based ACL format key.set_acl(acl) return 200, response_headers, "" if "tagging" in query: if "versionId" in query: version_id = query["versionId"][0] else: version_id = None key = self.backend.get_object(bucket_name, key_name, version_id=version_id) tagging = self._tagging_from_xml(body) self.backend.set_key_tags(key, tagging, key_name) return 200, response_headers, "" if "x-amz-copy-source" in request.headers: # Copy key # you can have a quoted ?version=abc with a version Id, so work on # we need to parse the unquoted string first src_key = request.headers.get("x-amz-copy-source") if isinstance(src_key, six.binary_type): src_key = src_key.decode("utf-8") src_key_parsed = urlparse(src_key) src_bucket, src_key = ( clean_key_name(src_key_parsed.path).lstrip("/").split("/", 1) ) src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0] key = self.backend.get_object( src_bucket, src_key, version_id=src_version_id ) if key is not None: if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]: if key.response_dict.get( "x-amz-restore" ) is None or 'ongoing-request="true"' in key.response_dict.get( "x-amz-restore" ): raise ObjectNotInActiveTierError(key) self.backend.copy_key( src_bucket, src_key, bucket_name, key_name, storage=storage_class, acl=acl, src_version_id=src_version_id, ) else: return 404, response_headers, "" new_key = self.backend.get_object(bucket_name, key_name) mdirective = request.headers.get("x-amz-metadata-directive") if mdirective is not None and mdirective == "REPLACE": metadata = metadata_from_headers(request.headers) new_key.set_metadata(metadata, replace=True) tdirective = request.headers.get("x-amz-tagging-directive") if tdirective == "REPLACE": tagging = self._tagging_from_headers(request.headers) self.backend.set_key_tags(new_key, tagging) template = self.response_template(S3_OBJECT_COPY_RESPONSE) response_headers.update(new_key.response_dict) return 200, response_headers, template.render(key=new_key) streaming_request = hasattr(request, "streaming") and request.streaming closing_connection = headers.get("connection") == "close" if closing_connection and streaming_request: # Closing the connection of a streaming request. No more data new_key = self.backend.get_object(bucket_name, key_name) elif streaming_request: # Streaming request, more data new_key = self.backend.append_to_key(bucket_name, key_name, body) else: # Initial data new_key = self.backend.set_object( bucket_name, key_name, body, storage=storage_class, encryption=encryption, kms_key_id=kms_key_id, bucket_key_enabled=bucket_key_enabled, ) request.streaming = True metadata = metadata_from_headers(request.headers) metadata.update(metadata_from_headers(query)) new_key.set_metadata(metadata) new_key.set_acl(acl) new_key.website_redirect_location = request.headers.get( "x-amz-website-redirect-location" ) self.backend.set_key_tags(new_key, tagging) response_headers.update(new_key.response_dict) return 200, response_headers, "" def _key_response_head(self, bucket_name, query, key_name, headers): response_headers = {} version_id = query.get("versionId", [None])[0] part_number = query.get("partNumber", [None])[0] if part_number: part_number = int(part_number) if_modified_since = headers.get("If-Modified-Since", None) if_match = headers.get("If-Match", None) if_none_match = headers.get("If-None-Match", None) if_unmodified_since = headers.get("If-Unmodified-Since", None) key = self.backend.get_object( bucket_name, key_name, version_id=version_id, part_number=part_number ) if key: response_headers.update(key.metadata) response_headers.update(key.response_dict) if if_unmodified_since: if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since) if key.last_modified > if_unmodified_since: return 412, response_headers, "" if if_match and key.etag != if_match: return 412, response_headers, "" if if_modified_since: if_modified_since = str_to_rfc_1123_datetime(if_modified_since) if key.last_modified < if_modified_since: return 304, response_headers, "Not Modified" if if_none_match and key.etag == if_none_match: return 304, response_headers, "Not Modified" return 200, response_headers, "" else: return 404, response_headers, "" def _acl_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) if not parsed_xml.get("AccessControlPolicy"): raise MalformedACLError() # The owner is needed for some reason... if not parsed_xml["AccessControlPolicy"].get("Owner"): # TODO: Validate that the Owner is actually correct. raise MalformedACLError() # If empty, then no ACLs: if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None: return [] if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"): raise MalformedACLError() permissions = ["READ", "WRITE", "READ_ACP", "WRITE_ACP", "FULL_CONTROL"] if not isinstance( parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list ): parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = [ parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] ] grants = self._get_grants_from_xml( parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], MalformedACLError, permissions, ) return FakeAcl(grants) def _get_grants_from_xml(self, grant_list, exception_type, permissions): grants = [] for grant in grant_list: if grant.get("Permission", "") not in permissions: raise exception_type() if grant["Grantee"].get("@xsi:type", "") not in [ "CanonicalUser", "AmazonCustomerByEmail", "Group", ]: raise exception_type() # TODO: Verify that the proper grantee data is supplied based on the type. grants.append( FakeGrant( [ FakeGrantee( id=grant["Grantee"].get("ID", ""), display_name=grant["Grantee"].get("DisplayName", ""), uri=grant["Grantee"].get("URI", ""), ) ], [grant["Permission"]], ) ) return grants def _acl_from_headers(self, headers): canned_acl = headers.get("x-amz-acl", "") if canned_acl: return get_canned_acl(canned_acl) grants = [] for header, value in headers.items(): if not header.startswith("x-amz-grant-"): continue permission = { "read": "READ", "write": "WRITE", "read-acp": "READ_ACP", "write-acp": "WRITE_ACP", "full-control": "FULL_CONTROL", }[header[len("x-amz-grant-") :]] grantees = [] for key_and_value in value.split(","): key, value = re.match( '([^=]+)="([^"]+)"', key_and_value.strip() ).groups() if key.lower() == "id": grantees.append(FakeGrantee(id=value)) else: grantees.append(FakeGrantee(uri=value)) grants.append(FakeGrant(grantees, [permission])) if grants: return FakeAcl(grants) else: return None def _tagging_from_headers(self, headers): tags = {} if headers.get("x-amz-tagging"): parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True) for tag in parsed_header.items(): tags[tag[0]] = tag[1][0] return tags def _tagging_from_xml(self, xml): parsed_xml = xmltodict.parse(xml, force_list={"Tag": True}) tags = {} for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]: tags[tag["Key"]] = tag["Value"] return tags def _bucket_tagging_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) tags = {} # Optional if no tags are being sent: if parsed_xml["Tagging"].get("TagSet"): # If there is only 1 tag, then it's not a list: if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list): tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[ "Tagging" ]["TagSet"]["Tag"]["Value"] else: for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]: if tag["Key"] in tags: raise DuplicateTagKeys() tags[tag["Key"]] = tag["Value"] # Verify that "aws:" is not in the tags. If so, then this is a problem: for key, _ in tags.items(): if key.startswith("aws:"): raise NoSystemTags() return tags def _cors_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list): return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]] return [parsed_xml["CORSConfiguration"]["CORSRule"]] def _encryption_config_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) if ( not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule") or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get( "ApplyServerSideEncryptionByDefault" ) or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][ "ApplyServerSideEncryptionByDefault" ].get("SSEAlgorithm") ): raise MalformedXML() return [parsed_xml["ServerSideEncryptionConfiguration"]] def _logging_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"): return {} if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"): raise MalformedXML() if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"): parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = "" # Get the ACLs: if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"): permissions = ["READ", "WRITE", "FULL_CONTROL"] if not isinstance( parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][ "Grant" ], list, ): target_grants = self._get_grants_from_xml( [ parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][ "TargetGrants" ]["Grant"] ], MalformedXML, permissions, ) else: target_grants = self._get_grants_from_xml( parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][ "Grant" ], MalformedXML, permissions, ) parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][ "TargetGrants" ] = target_grants return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"] def _notification_config_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) if not len(parsed_xml["NotificationConfiguration"]): return {} # The types of notifications, and their required fields (apparently lambda is categorized by the API as # "CloudFunction"): notification_fields = [ ("Topic", "sns"), ("Queue", "sqs"), ("CloudFunction", "lambda"), ] event_names = [ "s3:ReducedRedundancyLostObject", "s3:ObjectCreated:*", "s3:ObjectCreated:Put", "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:CompleteMultipartUpload", "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", ] found_notifications = ( 0 # Tripwire -- if this is not ever set, then there were no notifications ) for name, arn_string in notification_fields: # 1st verify that the proper notification configuration has been passed in (with an ARN that is close # to being correct -- nothing too complex in the ARN logic): the_notification = parsed_xml["NotificationConfiguration"].get( "{}Configuration".format(name) ) if the_notification: found_notifications += 1 if not isinstance(the_notification, list): the_notification = parsed_xml["NotificationConfiguration"][ "{}Configuration".format(name) ] = [the_notification] for n in the_notification: if not n[name].startswith("arn:aws:{}:".format(arn_string)): raise InvalidNotificationARN() # 2nd, verify that the Events list is correct: assert n["Event"] if not isinstance(n["Event"], list): n["Event"] = [n["Event"]] for event in n["Event"]: if event not in event_names: raise InvalidNotificationEvent() # Parse out the filters: if n.get("Filter"): # Error if S3Key is blank: if not n["Filter"]["S3Key"]: raise KeyError() if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list): n["Filter"]["S3Key"]["FilterRule"] = [ n["Filter"]["S3Key"]["FilterRule"] ] for filter_rule in n["Filter"]["S3Key"]["FilterRule"]: assert filter_rule["Name"] in ["suffix", "prefix"] assert filter_rule["Value"] if not found_notifications: return {} return parsed_xml["NotificationConfiguration"] def _accelerate_config_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) config = parsed_xml["AccelerateConfiguration"] return config["Status"] def _key_response_delete(self, bucket_name, query, key_name): self._set_action("KEY", "DELETE", query) self._authenticate_and_authorize_s3_action() if query.get("uploadId"): upload_id = query["uploadId"][0] self.backend.cancel_multipart(bucket_name, upload_id) return 204, {}, "" version_id = query.get("versionId", [None])[0] if "tagging" in query: self.backend.delete_object_tagging( bucket_name, key_name, version_id=version_id ) template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE) return 204, {}, template.render(version_id=version_id) success, response_meta = self.backend.delete_object( bucket_name, key_name, version_id=version_id ) response_headers = {} if response_meta is not None: for k in response_meta: response_headers["x-amz-{}".format(k)] = response_meta[k] return 204, response_headers, "" def _complete_multipart_body(self, body): ps = minidom.parseString(body).getElementsByTagName("Part") prev = 0 for p in ps: pn = int(p.getElementsByTagName("PartNumber")[0].firstChild.wholeText) if pn <= prev: raise InvalidPartOrder() yield (pn, p.getElementsByTagName("ETag")[0].firstChild.wholeText) def _key_response_post(self, request, body, bucket_name, query, key_name): self._set_action("KEY", "POST", query) self._authenticate_and_authorize_s3_action() if body == b"" and "uploads" in query: metadata = metadata_from_headers(request.headers) multipart = self.backend.initiate_multipart(bucket_name, key_name, metadata) template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE) response = template.render( bucket_name=bucket_name, key_name=key_name, upload_id=multipart.id ) return 200, {}, response if query.get("uploadId"): body = self._complete_multipart_body(body) upload_id = query["uploadId"][0] key = self.backend.complete_multipart(bucket_name, upload_id, body) template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE) headers = {} if key.version_id: headers["x-amz-version-id"] = key.version_id return ( 200, headers, template.render( bucket_name=bucket_name, key_name=key.name, etag=key.etag ), ) elif "restore" in query: es = minidom.parseString(body).getElementsByTagName("Days") days = es[0].childNodes[0].wholeText key = self.backend.get_object(bucket_name, key_name) r = 202 if key.expiry_date is not None: r = 200 key.restore(int(days)) return r, {}, "" else: raise NotImplementedError( "Method POST had only been implemented for multipart uploads and restore operations, so far" ) def _invalid_headers(self, url, headers): """ Verify whether the provided metadata in the URL is also present in the headers :param url: .../file.txt&content-type=app%2Fjson&Signature=.. :param headers: Content-Type=app/json :return: True or False """ metadata_to_check = { "content-disposition": "Content-Disposition", "content-encoding": "Content-Encoding", "content-language": "Content-Language", "content-length": "Content-Length", "content-md5": "Content-MD5", "content-type": "Content-Type", } for url_key, header_key in metadata_to_check.items(): metadata_in_url = re.search(url_key + "=(.+?)(&.+$|$)", url) if metadata_in_url: url_value = unquote(metadata_in_url.group(1)) if header_key not in headers or (url_value != headers[header_key]): return True return False S3ResponseInstance = ResponseObject(s3_backend) S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01"> <Owner> <ID>bcaf1ffd86f41161ca5fb16fd081034f</ID> <DisplayName>webfile</DisplayName> </Owner> <Buckets> {% for bucket in buckets %} <Bucket> <Name>{{ bucket.name }}</Name> <CreationDate>{{ bucket.creation_date_ISO8601 }}</CreationDate> </Bucket> {% endfor %} </Buckets> </ListAllMyBucketsResult>""" S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Name>{{ bucket.name }}</Name> {% if prefix != None %} <Prefix>{{ prefix }}</Prefix> {% endif %} <MaxKeys>{{ max_keys }}</MaxKeys> {% if delimiter %} <Delimiter>{{ delimiter }}</Delimiter> {% endif %} <IsTruncated>{{ is_truncated }}</IsTruncated> {% if next_marker %} <NextMarker>{{ next_marker }}</NextMarker> {% endif %} {% for key in result_keys %} <Contents> <Key>{{ key.name }}</Key> <LastModified>{{ key.last_modified_ISO8601 }}</LastModified> <ETag>{{ key.etag }}</ETag> <Size>{{ key.size }}</Size> <StorageClass>{{ key.storage_class }}</StorageClass> <Owner> <ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID> <DisplayName>webfile</DisplayName> </Owner> </Contents> {% endfor %} {% if delimiter %} {% for folder in result_folders %} <CommonPrefixes> <Prefix>{{ folder }}</Prefix> </CommonPrefixes> {% endfor %} {% endif %} </ListBucketResult>""" S3_BUCKET_GET_RESPONSE_V2 = """<?xml version="1.0" encoding="UTF-8"?> <ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Name>{{ bucket.name }}</Name> {% if prefix != None %} <Prefix>{{ prefix }}</Prefix> {% endif %} <MaxKeys>{{ max_keys }}</MaxKeys> <KeyCount>{{ key_count }}</KeyCount> {% if delimiter %} <Delimiter>{{ delimiter }}</Delimiter> {% endif %} <IsTruncated>{{ is_truncated }}</IsTruncated> {% if next_continuation_token %} <NextContinuationToken>{{ next_continuation_token }}</NextContinuationToken> {% endif %} {% if start_after %} <StartAfter>{{ start_after }}</StartAfter> {% endif %} {% for key in result_keys %} <Contents> <Key>{{ key.name }}</Key> <LastModified>{{ key.last_modified_ISO8601 }}</LastModified> <ETag>{{ key.etag }}</ETag> <Size>{{ key.size }}</Size> <StorageClass>{{ key.storage_class }}</StorageClass> {% if fetch_owner %} <Owner> <ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID> <DisplayName>webfile</DisplayName> </Owner> {% endif %} </Contents> {% endfor %} {% if delimiter %} {% for folder in result_folders %} <CommonPrefixes> <Prefix>{{ folder }}</Prefix> </CommonPrefixes> {% endfor %} {% endif %} </ListBucketResult>""" S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01"> <CreateBucketResponse> <Bucket>{{ bucket.name }}</Bucket> </CreateBucketResponse> </CreateBucketResponse>""" S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01"> <DeleteBucketResponse> <Code>204</Code> <Description>No Content</Description> </DeleteBucketResponse> </DeleteBucketResponse>""" S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?> <Error><Code>BucketNotEmpty</Code> <Message>The bucket you tried to delete is not empty</Message> <BucketName>{{ bucket.name }}</BucketName> <RequestId>asdfasdfsdafds</RequestId> <HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId> </Error>""" S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?> <LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{% if location != None %}{{ location }}{% endif %}</LocationConstraint>""" S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?> <LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> {% for rule in rules %} <Rule> <ID>{{ rule.id }}</ID> {% if rule.filter %} <Filter> {% if rule.filter.prefix != None %} <Prefix>{{ rule.filter.prefix }}</Prefix> {% endif %} {% if rule.filter.tag_key %} <Tag> <Key>{{ rule.filter.tag_key }}</Key> <Value>{{ rule.filter.tag_value }}</Value> </Tag> {% endif %} {% if rule.filter.and_filter %} <And> {% if rule.filter.and_filter.prefix != None %} <Prefix>{{ rule.filter.and_filter.prefix }}</Prefix> {% endif %} {% for key, value in rule.filter.and_filter.tags.items() %} <Tag> <Key>{{ key }}</Key> <Value>{{ value }}</Value> </Tag> {% endfor %} </And> {% endif %} </Filter> {% else %} {% if rule.prefix != None %} <Prefix>{{ rule.prefix }}</Prefix> {% endif %} {% endif %} <Status>{{ rule.status }}</Status> {% if rule.storage_class %} <Transition> {% if rule.transition_days %} <Days>{{ rule.transition_days }}</Days> {% endif %} {% if rule.transition_date %} <Date>{{ rule.transition_date }}</Date> {% endif %} <StorageClass>{{ rule.storage_class }}</StorageClass> </Transition> {% endif %} {% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %} <Expiration> {% if rule.expiration_days %} <Days>{{ rule.expiration_days }}</Days> {% endif %} {% if rule.expiration_date %} <Date>{{ rule.expiration_date }}</Date> {% endif %} {% if rule.expired_object_delete_marker %} <ExpiredObjectDeleteMarker>{{ rule.expired_object_delete_marker }}</ExpiredObjectDeleteMarker> {% endif %} </Expiration> {% endif %} {% if rule.nvt_noncurrent_days and rule.nvt_storage_class %} <NoncurrentVersionTransition> <NoncurrentDays>{{ rule.nvt_noncurrent_days }}</NoncurrentDays> <StorageClass>{{ rule.nvt_storage_class }}</StorageClass> </NoncurrentVersionTransition> {% endif %} {% if rule.nve_noncurrent_days %} <NoncurrentVersionExpiration> <NoncurrentDays>{{ rule.nve_noncurrent_days }}</NoncurrentDays> </NoncurrentVersionExpiration> {% endif %} {% if rule.aimu_days %} <AbortIncompleteMultipartUpload> <DaysAfterInitiation>{{ rule.aimu_days }}</DaysAfterInitiation> </AbortIncompleteMultipartUpload> {% endif %} </Rule> {% endfor %} </LifecycleConfiguration> """ S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?> <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Status>{{ bucket_versioning_status }}</Status> </VersioningConfiguration> """ S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?> {% if status is none %} <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/> {% else %} <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Status>{{ status }}</Status> </VersioningConfiguration> {% endif %} """ S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?> <ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01"> <Name>{{ bucket.name }}</Name> {% if prefix != None %} <Prefix>{{ prefix }}</Prefix> {% endif %} <KeyMarker>{{ key_marker }}</KeyMarker> <MaxKeys>{{ max_keys }}</MaxKeys> <IsTruncated>{{ is_truncated }}</IsTruncated> {% for key in key_list %} <Version> <Key>{{ key.name }}</Key> <VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId> <IsLatest>{% if latest_versions[key.name] == key.version_id %}true{% else %}false{% endif %}</IsLatest> <LastModified>{{ key.last_modified_ISO8601 }}</LastModified> <ETag>{{ key.etag }}</ETag> <Size>{{ key.size }}</Size> <StorageClass>{{ key.storage_class }}</StorageClass> <Owner> <ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID> <DisplayName>webfile</DisplayName> </Owner> </Version> {% endfor %} {% for marker in delete_marker_list %} <DeleteMarker> <Key>{{ marker.name }}</Key> <VersionId>{{ marker.version_id }}</VersionId> <IsLatest>{% if latest_versions[marker.name] == marker.version_id %}true{% else %}false{% endif %}</IsLatest> <LastModified>{{ marker.last_modified_ISO8601 }}</LastModified> <Owner> <ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID> <DisplayName>webfile</DisplayName> </Owner> </DeleteMarker> {% endfor %} </ListVersionsResult> """ S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?> <DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01"> {% for k, v in deleted %} <Deleted> <Key>{{k}}</Key> {% if v %}<VersionId>{{v}}</VersionId>{% endif %} </Deleted> {% endfor %} {% for k in delete_errors %} <Error> <Key>{{k}}</Key> </Error> {% endfor %} </DeleteResult>""" S3_DELETE_KEY_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?> <DeleteObjectTaggingResult xmlns="http://s3.amazonaws.com/doc/2006-03-01"> <VersionId>{{version_id}}</VersionId> </DeleteObjectTaggingResult> """ S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?> <AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Owner> <ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID> <DisplayName>webfile</DisplayName> </Owner> <AccessControlList> {% for grant in obj.acl.grants %} <Grant> {% for grantee in grant.grantees %} <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="{{ grantee.type }}"> {% if grantee.uri %} <URI>{{ grantee.uri }}</URI> {% endif %} {% if grantee.id %} <ID>{{ grantee.id }}</ID> {% endif %} {% if grantee.display_name %} <DisplayName>{{ grantee.display_name }}</DisplayName> {% endif %} </Grantee> {% endfor %} {% for permission in grant.permissions %} <Permission>{{ permission }}</Permission> {% endfor %} </Grant> {% endfor %} </AccessControlList> </AccessControlPolicy>""" S3_OBJECT_TAGGING_RESPONSE = """\ <?xml version="1.0" encoding="UTF-8"?> <Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <TagSet> {% for tag in tags %} <Tag> <Key>{{ tag.Key }}</Key> <Value>{{ tag.Value }}</Value> </Tag> {% endfor %} </TagSet> </Tagging>""" S3_BUCKET_CORS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?> <CORSConfiguration> {% for cors in cors %} <CORSRule> {% for origin in cors.allowed_origins %} <AllowedOrigin>{{ origin }}</AllowedOrigin> {% endfor %} {% for method in cors.allowed_methods %} <AllowedMethod>{{ method }}</AllowedMethod> {% endfor %} {% if cors.allowed_headers is not none %} {% for header in cors.allowed_headers %} <AllowedHeader>{{ header }}</AllowedHeader> {% endfor %} {% endif %} {% if cors.exposed_headers is not none %} {% for header in cors.exposed_headers %} <ExposedHeader>{{ header }}</ExposedHeader> {% endfor %} {% endif %} {% if cors.max_age_seconds is not none %} <MaxAgeSeconds>{{ cors.max_age_seconds }}</MaxAgeSeconds> {% endif %} </CORSRule> {% endfor %} </CORSConfiguration> """ S3_OBJECT_COPY_RESPONSE = """\ <CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01"> <ETag>{{ key.etag }}</ETag> <LastModified>{{ key.last_modified_ISO8601 }}</LastModified> </CopyObjectResult>""" S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?> <InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Bucket>{{ bucket_name }}</Bucket> <Key>{{ key_name }}</Key> <UploadId>{{ upload_id }}</UploadId> </InitiateMultipartUploadResult>""" S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?> <CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <LastModified>{{ part.last_modified_ISO8601 }}</LastModified> <ETag>{{ part.etag }}</ETag> </CopyPartResult>""" S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?> <ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Bucket>{{ bucket_name }}</Bucket> <Key>{{ key_name }}</Key> <UploadId>{{ upload_id }}</UploadId> <StorageClass>STANDARD</StorageClass> <Initiator> <ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID> <DisplayName>webfile</DisplayName> </Initiator> <Owner> <ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID> <DisplayName>webfile</DisplayName> </Owner> <PartNumberMarker>1</PartNumberMarker> <NextPartNumberMarker>{{ count }}</NextPartNumberMarker> <MaxParts>{{ count }}</MaxParts> <IsTruncated>false</IsTruncated> {% for part in parts %} <Part> <PartNumber>{{ part.name }}</PartNumber> <LastModified>{{ part.last_modified_ISO8601 }}</LastModified> <ETag>{{ part.etag }}</ETag> <Size>{{ part.size }}</Size> </Part> {% endfor %} </ListPartsResult>""" S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?> <CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location> <Bucket>{{ bucket_name }}</Bucket> <Key>{{ key_name }}</Key> <ETag>{{ etag }}</ETag> </CompleteMultipartUploadResult> """ S3_ALL_MULTIPARTS = ( """<?xml version="1.0" encoding="UTF-8"?> <ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Bucket>{{ bucket_name }}</Bucket> <KeyMarker></KeyMarker> <UploadIdMarker></UploadIdMarker> <MaxUploads>1000</MaxUploads> <IsTruncated>False</IsTruncated> {% for upload in uploads %} <Upload> <Key>{{ upload.key_name }}</Key> <UploadId>{{ upload.id }}</UploadId> <Initiator> <ID>arn:aws:iam::""" + ACCOUNT_ID + """:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID> <DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName> </Initiator> <Owner> <ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID> <DisplayName>webfile</DisplayName> </Owner> <StorageClass>STANDARD</StorageClass> <Initiated>2010-11-10T20:48:33.000Z</Initiated> </Upload> {% endfor %} </ListMultipartUploadsResult> """ ) S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?> <Error> <Code>NoSuchBucketPolicy</Code> <Message>The bucket policy does not exist</Message> <BucketName>{{ bucket_name }}</BucketName> <RequestId>0D68A23BB2E2215B</RequestId> <HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId> </Error> """ S3_NO_LIFECYCLE = """<?xml version="1.0" encoding="UTF-8"?> <Error> <Code>NoSuchLifecycleConfiguration</Code> <Message>The lifecycle configuration does not exist</Message> <BucketName>{{ bucket_name }}</BucketName> <RequestId>44425877V1D0A2F9</RequestId> <HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId> </Error> """ S3_NO_BUCKET_TAGGING = """<?xml version="1.0" encoding="UTF-8"?> <Error> <Code>NoSuchTagSet</Code> <Message>The TagSet does not exist</Message> <BucketName>{{ bucket_name }}</BucketName> <RequestId>44425877V1D0A2F9</RequestId> <HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId> </Error> """ S3_NO_BUCKET_WEBSITE_CONFIG = """<?xml version="1.0" encoding="UTF-8"?> <Error> <Code>NoSuchWebsiteConfiguration</Code> <Message>The specified bucket does not have a website configuration</Message> <BucketName>{{ bucket_name }}</BucketName> <RequestId>44425877V1D0A2F9</RequestId> <HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId> </Error> """ S3_INVALID_CORS_REQUEST = """<?xml version="1.0" encoding="UTF-8"?> <Error> <Code>NoSuchWebsiteConfiguration</Code> <Message>The specified bucket does not have a website configuration</Message> <BucketName>{{ bucket_name }}</BucketName> <RequestId>44425877V1D0A2F9</RequestId> <HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId> </Error> """ S3_NO_CORS_CONFIG = """<?xml version="1.0" encoding="UTF-8"?> <Error> <Code>NoSuchCORSConfiguration</Code> <Message>The CORS configuration does not exist</Message> <BucketName>{{ bucket_name }}</BucketName> <RequestId>44425877V1D0A2F9</RequestId> <HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId> </Error> """ S3_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?> <BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01"> <LoggingEnabled> <TargetBucket>{{ logging["TargetBucket"] }}</TargetBucket> <TargetPrefix>{{ logging["TargetPrefix"] }}</TargetPrefix> {% if logging.get("TargetGrants") %} <TargetGrants> {% for grant in logging["TargetGrants"] %} <Grant> <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="{{ grant.grantees[0].type }}"> {% if grant.grantees[0].uri %} <URI>{{ grant.grantees[0].uri }}</URI> {% endif %} {% if grant.grantees[0].id %} <ID>{{ grant.grantees[0].id }}</ID> {% endif %} {% if grant.grantees[0].display_name %} <DisplayName>{{ grant.grantees[0].display_name }}</DisplayName> {% endif %} </Grantee> <Permission>{{ grant.permissions[0] }}</Permission> </Grant> {% endfor %} </TargetGrants> {% endif %} </LoggingEnabled> </BucketLoggingStatus> """ S3_NO_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?> <BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" /> """ S3_ENCRYPTION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?> <BucketEncryptionStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01"> {% for entry in encryption %} <Rule> <ApplyServerSideEncryptionByDefault> <SSEAlgorithm>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }}</SSEAlgorithm> {% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %} <KMSMasterKeyID>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }}</KMSMasterKeyID> {% endif %} </ApplyServerSideEncryptionByDefault> </Rule> {% endfor %} </BucketEncryptionStatus> """ S3_INVALID_PRESIGNED_PARAMETERS = """<?xml version="1.0" encoding="UTF-8"?> <Error> <Code>SignatureDoesNotMatch</Code> <Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message> <RequestId>0D68A23BB2E2215B</RequestId> <HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId> </Error> """ S3_NO_ENCRYPTION = """<?xml version="1.0" encoding="UTF-8"?> <Error> <Code>ServerSideEncryptionConfigurationNotFoundError</Code> <Message>The server side encryption configuration was not found</Message> <BucketName>{{ bucket_name }}</BucketName> <RequestId>0D68A23BB2E2215B</RequestId> <HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId> </Error> """ S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?> <NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> {% for topic in config.topic %} <TopicConfiguration> <Id>{{ topic.id }}</Id> <Topic>{{ topic.arn }}</Topic> {% for event in topic.events %} <Event>{{ event }}</Event> {% endfor %} {% if topic.filters %} <Filter> <S3Key> {% for rule in topic.filters["S3Key"]["FilterRule"] %} <FilterRule> <Name>{{ rule["Name"] }}</Name> <Value>{{ rule["Value"] }}</Value> </FilterRule> {% endfor %} </S3Key> </Filter> {% endif %} </TopicConfiguration> {% endfor %} {% for queue in config.queue %} <QueueConfiguration> <Id>{{ queue.id }}</Id> <Queue>{{ queue.arn }}</Queue> {% for event in queue.events %} <Event>{{ event }}</Event> {% endfor %} {% if queue.filters %} <Filter> <S3Key> {% for rule in queue.filters["S3Key"]["FilterRule"] %} <FilterRule> <Name>{{ rule["Name"] }}</Name> <Value>{{ rule["Value"] }}</Value> </FilterRule> {% endfor %} </S3Key> </Filter> {% endif %} </QueueConfiguration> {% endfor %} {% for cf in config.cloud_function %} <CloudFunctionConfiguration> <Id>{{ cf.id }}</Id> <CloudFunction>{{ cf.arn }}</CloudFunction> {% for event in cf.events %} <Event>{{ event }}</Event> {% endfor %} {% if cf.filters %} <Filter> <S3Key> {% for rule in cf.filters["S3Key"]["FilterRule"] %} <FilterRule> <Name>{{ rule["Name"] }}</Name> <Value>{{ rule["Value"] }}</Value> </FilterRule> {% endfor %} </S3Key> </Filter> {% endif %} </CloudFunctionConfiguration> {% endfor %} </NotificationConfiguration> """ S3_BUCKET_ACCELERATE = """ <AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Status>{{ bucket.accelerate_configuration }}</Status> </AccelerateConfiguration> """ S3_BUCKET_ACCELERATE_NOT_SET = """ <AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/> """ S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION = """ <PublicAccessBlockConfiguration> <BlockPublicAcls>{{public_block_config.block_public_acls}}</BlockPublicAcls> <IgnorePublicAcls>{{public_block_config.ignore_public_acls}}</IgnorePublicAcls> <BlockPublicPolicy>{{public_block_config.block_public_policy}}</BlockPublicPolicy> <RestrictPublicBuckets>{{public_block_config.restrict_public_buckets}}</RestrictPublicBuckets> </PublicAccessBlockConfiguration> """
py
1a4388b98291b04e3ff319c396fd49eb1b091fd9
import json import os from convlab2.util.multiwoz.state import default_state from convlab2.dst.rule.multiwoz.dst_util import normalize_value from convlab2.dst.dst import DST from convlab2.util.multiwoz.multiwoz_slot_trans import REF_SYS_DA class RuleDST(DST): """Rule based DST which trivially updates new values from NLU result to states. Attributes: state(dict): Dialog state. Function ``tatk.util.multiwoz.state.default_state`` returns a default state. value_dict(dict): It helps check whether ``user_act`` has correct content. """ def __init__(self): DST.__init__(self) self.state = default_state() path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))) path = os.path.join(path, 'data/multiwoz/value_dict.json') self.value_dict = json.load(open(path)) def update(self, user_act=None): """ update belief_state, request_state :param user_act: :return: """ self.state['user_action'] = user_act for intent, domain, slot, value in user_act: domain = domain.lower() intent = intent.lower() if domain in ['unk', 'general', 'booking']: continue if intent == 'inform': k = REF_SYS_DA[domain.capitalize()].get(slot, slot) if k is None: continue try: assert domain in self.state['belief_state'] except: raise Exception('Error: domain <{}> not in new belief state'.format(domain)) domain_dic = self.state['belief_state'][domain] assert 'semi' in domain_dic assert 'book' in domain_dic if k in domain_dic['semi']: nvalue = normalize_value(self.value_dict, domain, k, value) self.state['belief_state'][domain]['semi'][k] = nvalue elif k in domain_dic['book']: self.state['belief_state'][domain]['book'][k] = value elif k.lower() in domain_dic['book']: self.state['belief_state'][domain]['book'][k.lower()] = value elif k == 'trainID' and domain == 'train': self.state['belief_state'][domain]['book'][k] = normalize_value(self.value_dict, domain, k, value) else: # raise Exception('unknown slot name <{}> of domain <{}>'.format(k, domain)) with open('unknown_slot.log', 'a+') as f: f.write('unknown slot name <{}> of domain <{}>\n'.format(k, domain)) elif intent == 'request': k = REF_SYS_DA[domain.capitalize()].get(slot, slot) if domain not in self.state['request_state']: self.state['request_state'][domain] = {} if k not in self.state['request_state'][domain]: self.state['request_state'][domain][k] = 0 return self.state def init_session(self): """Initialize ``self.state`` with a default state, which ``tatk.util.multiwoz.state.default_state`` returns.""" self.state = default_state() if __name__ == '__main__': # from tatk.dst.rule.multiwoz import RuleDST dst = RuleDST() # Action is a dict. Its keys are strings(domain-type pairs, both uppercase and lowercase is OK) and its values are list of lists. # The domain may be one of ('Attraction', 'Hospital', 'Booking', 'Hotel', 'Restaurant', 'Taxi', 'Train', 'Police'). # The type may be "inform" or "request". # For example, the action below has a key "Hotel-Inform", in which "Hotel" is domain and "Inform" is action type. # Each list in the value of "Hotel-Inform" is a slot-value pair. "Area" is slot and "east" is value. "Star" is slot and "4" is value. action = [ ["Inform", "Hotel", "Area", "east"], ["Inform", "Hotel", "Stars", "4"] ] # method `update` updates the attribute `state` of tracker, and returns it. state = dst.update(action) assert state == dst.state assert state == {'user_action': [["Inform", "Hotel", "Area", "east"], ["Inform", "Hotel", "Stars", "4"]], 'system_action': [], 'belief_state': {'police': {'book': {'booked': []}, 'semi': {}}, 'hotel': {'book': {'booked': [], 'people': '', 'day': '', 'stay': ''}, 'semi': {'name': '', 'area': 'east', 'parking': '', 'pricerange': '', 'stars': '4', 'internet': '', 'type': ''}}, 'attraction': {'book': {'booked': []}, 'semi': {'type': '', 'name': '', 'area': ''}}, 'restaurant': {'book': {'booked': [], 'people': '', 'day': '', 'time': ''}, 'semi': {'food': '', 'pricerange': '', 'name': '', 'area': ''}}, 'hospital': {'book': {'booked': []}, 'semi': {'department': ''}}, 'taxi': {'book': {'booked': []}, 'semi': {'leaveAt': '', 'destination': '', 'departure': '', 'arriveBy': ''}}, 'train': {'book': {'booked': [], 'people': ''}, 'semi': {'leaveAt': '', 'destination': '', 'day': '', 'arriveBy': '', 'departure': ''}}}, 'request_state': {}, 'terminated': False, 'history': []} # Please call `init_session` before a new dialog. This initializes the attribute `state` of tracker with a default state, which `tatk.util.multiwoz.state.default_state` returns. But You needn't call it before the first dialog, because tracker gets a default state in its constructor. dst.init_session() action = [["Inform", "Train", "Arrive", "19:45"]] state = dst.update(action) assert state == {'user_action': [["Inform", "Train", "Arrive", "19:45"]], 'system_action': [], 'belief_state': {'police': {'book': {'booked': []}, 'semi': {}}, 'hotel': {'book': {'booked': [], 'people': '', 'day': '', 'stay': ''}, 'semi': {'name': '', 'area': '', 'parking': '', 'pricerange': '', 'stars': '', 'internet': '', 'type': ''}}, 'attraction': {'book': {'booked': []}, 'semi': {'type': '', 'name': '', 'area': ''}}, 'restaurant': {'book': {'booked': [], 'people': '', 'day': '', 'time': ''}, 'semi': {'food': '', 'pricerange': '', 'name': '', 'area': ''}}, 'hospital': {'book': {'booked': []}, 'semi': {'department': ''}}, 'taxi': {'book': {'booked': []}, 'semi': {'leaveAt': '', 'destination': '', 'departure': '', 'arriveBy': ''}}, 'train': {'book': {'booked': [], 'people': ''}, 'semi': {'leaveAt': '', 'destination': '', 'day': '', 'arriveBy': '19:45', 'departure': ''}}}, 'request_state': {}, 'terminated': False, 'history': []}
py
1a43896511ed90d3a93080331a77293090f3fc8f
"""Utility functions to handle downloaded files.""" import glob import os import pathlib from hashlib import md5 def get_next_name(file_path: str) -> str: """ Get next available name to download file. Parameters ---------- file_path: str Absolute path of the file for which next available name to be generated. Returns ------- str Absolute path of the next available name for the file. """ posix_path = pathlib.Path(file_path) counter: int = 1 new_file_name: str = os.path.join("{0}", "{1}-copy{2}{3}") while os.path.isfile( new_file_name.format( posix_path.parent, posix_path.stem, counter, "".join(posix_path.suffixes), ) ): counter += 1 return new_file_name.format( posix_path.parent, posix_path.stem, counter, "".join(posix_path.suffixes), ) def manage_duplicate_file(file_path: str): """ Check if a file is duplicate. Compare the md5 of files with copy name pattern and remove if the md5 hash is same. Parameters ---------- file_path: str Absolute path of the file for which duplicates needs to be managed. Returns ------- str Absolute path of the duplicate managed file. """ # pylint: disable = R1732 posix_path = pathlib.Path(file_path) file_base_name: str = "".join(posix_path.stem.split("-copy")[0]) name_pattern: str = f"{posix_path.parent}/{file_base_name}*" # Reason for using `str.translate()` # https://stackoverflow.com/q/22055500/6730439 old_files: list = glob.glob( name_pattern.translate({ord("["): "[[]", ord("]"): "[]]"}) ) if file_path in old_files: old_files.remove(file_path) current_file_md5: str = md5(open(file_path, "rb").read()).hexdigest() for old_file_path in old_files: old_file_md5: str = md5(open(old_file_path, "rb").read()).hexdigest() if current_file_md5 == old_file_md5: os.remove(file_path) return old_file_path return file_path
py
1a4389a25fcffcd3dd1a6b4e396045502d8146dd
import pytest from nyr.interpreter.interpreter import Interpreter from nyr.parser.parser import Parser def testUninitializedVariable(): ast = Parser().parse("let x;") env = Interpreter().interpret(ast) assert env == {'x': None} @pytest.mark.parametrize( ("code"), ( pytest.param("let x; let y;", id="seperate"), pytest.param("let x, y;", id="merged"), ), ) def testMultipleUninitializedVariables(code: str): ast = Parser().parse(code) env = Interpreter().interpret(ast) assert env == {"x": None, "y": None} @pytest.mark.parametrize( ("code", "expected"), ( pytest.param( 'let string = "I am a string!";', {"string": "I am a string!"}, id="string", ), pytest.param( "let int = 42;", {"int": 42}, id="int", ), pytest.param( "let float = 3.14159;", {"float": 3.14159}, id="float", ), pytest.param( "let bool = false;", {"bool": False}, id="bool", ), pytest.param( "let none = null;", {"none": None}, id="none", ), ), ) def testTypeAssignments(code: str, expected): ast = Parser().parse(code) env = Interpreter().interpret(ast) assert env == expected def testMixedInitialize(): ast = Parser().parse("let x, y = 7, z;") env = Interpreter().interpret(ast) assert env == { 'x': None, 'y': 7, 'z': None, } def testAssignWithBinaryExpr(): ast = Parser().parse(""" let x = 4; let y = 7; let z = x + y; """) env = Interpreter().interpret(ast) assert env == { "x": 4, "y": 7, "z": 11, } @pytest.mark.parametrize( ("code"), ( pytest.param("let x; let x;", id="seperate"), pytest.param("let x, x;", id="merged"), ), ) def testVarExists(code: str): ast = Parser().parse(code) # FIXME: Wrong error returned from code with pytest.raises(Exception, match='Unknown variable "None"'): Interpreter().interpret(ast) def testVarNotExists(): ast = Parser().parse("x = 4;") with pytest.raises(Exception, match='Variable "x" does not exist in available scope'): Interpreter().interpret(ast)
py
1a438a4c50a4aebe34666c3c43f459d4d9b40e91
import abc import glob import os from typing import (Any, Dict, List, Optional) import importlib import redis from pkg_resources import resource_filename from gtmcore.logging import LMLogger logger = LMLogger.get_logger() class DevEnvMonitor(abc.ABC): """Class to monitor a development environments for the need to start Activity Monitor Instances""" @staticmethod def get_dev_env_name() -> List[str]: """Method to return a list of name(s) of the development environment that this class interfaces with. Should be the value used in the `name` attribute of the Dev Env Environment Component""" raise NotImplemented @abc.abstractmethod def run(self, dev_env_monitor_key: str) -> None: """Method called in a periodically scheduled async worker that should check the dev env and manage Activity Monitor Instances as needed Args: dev_env_monitor_key(str): The unique string used as the key in redis to track this DevEnvMonitor instance """ raise NotImplemented class DevEnvMonitorManager(object): """Class to manage creating DevEnvMonitor instances""" def __init__(self, database=1) -> None: # Get available monitor classes from redis or register available classes # Redis is used to store this information to reduce the overhead of re-registering all classes every time # DevEnvMonitorManager is instantiated, which happens often, both in the LabManager API and in async workers redis_conn = redis.Redis(db=database) data = redis_conn.hgetall('##AVAILABLE_DEV_ENV_MONITOR_CLASSES##') if data: # Load the class info from redis # TODO: verify if loading from imports is actually faster than using this redis cache implementation result_dict = {} for key in data: # Decode from bytes to strings if needed value = data[key] if type(key) == bytes: key = key.decode('utf-8') if type(value) == bytes: value = value.decode('utf-8') module_name, class_name = value.rsplit('.', 1) # load the module m = importlib.import_module(module_name) # get the class and store result_dict[key] = getattr(m, class_name) self.available_monitors = result_dict else: self.available_monitors = self._register_monitor_classes() for key in self.available_monitors: logger.info("Registering DevEnvMonitor Class: {} for {}".format(self.available_monitors[key], key)) redis_conn.hset('##AVAILABLE_DEV_ENV_MONITOR_CLASSES##', key, "{}.{}".format(self.available_monitors[key].__module__, self.available_monitors[key].__name__)) def _register_monitor_classes(self) -> Dict[str, Any]: """Private method to register all available Dev Env Monitor classes Creates a dictionary of the form {development environment name: monitor class name, ...} Returns: dict """ # Dynamically find files to import that start with monitor_* monitor_dir = os.path.join(resource_filename('gtmcore', 'activity'), 'monitors') for module_name in glob.glob('{}{}monitor_*'.format(monitor_dir, os.path.sep)): filename = os.path.basename(module_name) importlib.import_module("gtmcore.activity.monitors.{}".format(filename.split(".py")[0])) all_monitor_classes = [cls for cls in DevEnvMonitor.__subclasses__()] register_data: Dict[str, Any] = {} for cls in all_monitor_classes: dev_env_name = cls.get_dev_env_name() if any([(name in register_data) for name in dev_env_name]): msg = "Two Development Environment Monitors attempting to register for a single Dev Env:" msg = "{}\n Dev Env: {}".format(msg, dev_env_name) msg = "{}\n Class 1: {}".format(msg, [register_data[n] for n in dev_env_name]) msg = "{}\n Class 2: {}".format(msg, cls) raise ValueError(msg) # New Dev Env. Register it for all supported dev envs for name in dev_env_name: register_data[name] = cls return register_data def is_available(self, dev_env_name: str) -> bool: """Method to test if a dev env monitor is available for a given development environment name Args: dev_env_name(str): Name of a development environment to monitor Returns: bool """ return dev_env_name in self.available_monitors def get_monitor_instance(self, dev_env_name: str) -> Optional[DevEnvMonitor]: """Method to get a Dev Env Monitor instance based on the Dev Env name Args: dev_env_name(str): Name of a development environment to monitor Returns: DevEnvMonitor """ if self.is_available(dev_env_name): return self.available_monitors[dev_env_name]() else: return None
py
1a438aa475120dd8c92ff6bf320f569e68ec735e
"""Module for encoding and decoding length delimited fields""" # Copyright (c) 2018-2022 NCC Group Plc # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import binascii import copy import sys import six import logging from google.protobuf.internal import wire_format, encoder, decoder import blackboxprotobuf.lib from blackboxprotobuf.lib.types import varint from blackboxprotobuf.lib.exceptions import ( EncoderException, DecoderException, TypedefException, ) def encode_string(value): """Encode a string as a length delimited byte array""" try: value = six.ensure_text(value) except TypeError as exc: six.raise_from( EncoderException("Error encoding string to message: %r" % value), exc ) return encode_bytes(value) def encode_bytes(value): """Encode a length delimited byte array""" if isinstance(value, bytearray): value = bytes(value) try: value = six.ensure_binary(value) except TypeError as exc: six.raise_from( EncoderException("Error encoding bytes to message: %r" % value), exc ) encoded_length = varint.encode_varint(len(value)) return encoded_length + value def decode_bytes(buf, pos): """Decode a length delimited bytes array from buf""" length, pos = varint.decode_varint(buf, pos) end = pos + length try: return buf[pos:end], end except IndexError as exc: six.raise_from( DecoderException( ( "Error decoding bytes. Decoded length %d is longer than bytes" " available %d" ) % (length, len(buf) - pos) ), exc, ) def encode_bytes_hex(value): """Encode a length delimited byte array represented by a hex string""" try: return encode_bytes(binascii.unhexlify(value)) except (TypeError, binascii.Error) as exc: six.raise_from( EncoderException("Error encoding hex bytestring %s" % value), exc ) def decode_bytes_hex(buf, pos): """Decode a length delimited byte array from buf and return a hex encoded string""" value, pos = decode_bytes(buf, pos) return binascii.hexlify(value), pos def decode_string(value, pos): """Decode a length delimited byte array as a string""" length, pos = varint.decode_varint(value, pos) end = pos + length try: # backslash escaping isn't reversible easily return value[pos:end].decode("utf-8"), end except (TypeError, UnicodeDecodeError) as exc: six.raise_from( DecoderException("Error decoding UTF-8 string %s" % value[pos:end]), exc ) def encode_message(data, config, typedef, path=None, field_order=None): """Encode a Python dictionary to a binary protobuf message""" output = bytearray() if path is None: path = [] skiplist = set() if field_order is not None: for field_number, index in field_order: if field_number in data: value = data[field_number] # This will probably fail in some weird cases, and will get a weird # encoding for packed numbers but our main conern when ordering # fields is that it's a default decoding which won't be a packed try: new_output = _encode_message_field( config, typedef, path, field_number, value, selected_index=index ) output += new_output skiplist.add((field_number, index)) except EncoderException as exc: logging.warn( "Error encoding priority field: %s %s %r %r", field_number, index, path, exc, ) for field_number, value in data.items(): new_output = _encode_message_field( config, typedef, path, field_number, value, skiplist=skiplist ) output += new_output return output def _encode_message_field( config, typedef, path, field_number, value, selected_index=None, skiplist=None ): # Encodes a single field of a message to the byte array # If selected_index is passed, it will only encode a single element if value is a list # If skiplist is passed, it should be in the form of (field_number,index) # and this will skip encoding those elements # Get the field number convert it as necessary alt_field_number = None if six.PY2: string_types = (str, unicode) else: string_types = str if isinstance(field_number, string_types): if "-" in field_number: field_number, alt_field_number = field_number.split("-") for number, info in typedef.items(): if info.get("name", "") != "" and info["name"] == field_number and field_number != "": field_number = number break else: field_number = str(field_number) field_path = path[:] field_path.append(field_number) if field_number not in typedef: raise EncoderException( "Provided field name/number %s is not valid" % (field_number), field_path, ) field_typedef = typedef[field_number] # Get encoder if "type" not in field_typedef: raise TypedefException( "Field %s does not have a defined type." % field_number, field_path ) field_type = field_typedef["type"] field_order = field_typedef.get("field_order", None) field_encoder = None if alt_field_number is not None: if alt_field_number not in field_typedef["alt_typedefs"]: raise EncoderException( "Provided alt field name/number %s is not valid for field_number %s" % (alt_field_number, field_number), field_path, ) if isinstance(field_typedef["alt_typedefs"][alt_field_number], dict): innertypedef = field_typedef["alt_typedefs"][alt_field_number] field_encoder = lambda data: encode_lendelim_message( data, config, innertypedef, path=field_path, field_order=field_order ) else: # just let the field field_type = field_typedef["alt_typedefs"][alt_field_number] if field_encoder is None: if field_type == "message": innertypedef = None if "message_typedef" in field_typedef: innertypedef = field_typedef["message_typedef"] elif "message_type_name" in field_typedef: message_type_name = field_typedef["message_type_name"] if message_type_name not in config.known_types: raise TypedefException( "Message type (%s) has not been defined" % field_typedef["message_type_name"], field_path, ) innertypedef = config.known_types[message_type_name] else: raise TypedefException( "Could not find message typedef for %s" % field_number, field_path, ) field_encoder = lambda data: encode_lendelim_message( data, config, innertypedef, path=field_path, field_order=field_order ) else: if field_type not in blackboxprotobuf.lib.types.ENCODERS: raise TypedefException("Unknown type: %s" % field_type) field_encoder = blackboxprotobuf.lib.types.ENCODERS[field_type] if field_encoder is None: raise TypedefException( "Encoder not implemented for %s" % field_type, field_path ) # Encode the tag tag = encoder.TagBytes( int(field_number), blackboxprotobuf.lib.types.WIRETYPES[field_type] ) output = bytearray() try: # Handle repeated values if isinstance(value, list) and not field_type.startswith("packed_"): if selected_index is not None: if selected_index >= len(value): raise EncoderException( "Selected index is greater than the length of values: %r %r" % (selected_index, len(value)), path, ) output += tag output += field_encoder(value[selected_index]) else: for index, repeated in enumerate(value): if skiplist is None or (field_number, index) not in skiplist: output += tag output += field_encoder(repeated) else: if skiplist is None or (field_number, 0) not in skiplist: output += tag output += field_encoder(value) except EncoderException as exc: exc.set_path(field_path) six.reraise(*sys.exc_info()) return output def decode_message(buf, config, typedef=None, pos=0, end=None, depth=0, path=None): """Decode a protobuf message with no length prefix""" if end is None: end = len(buf) if typedef is None: typedef = {} else: # Don't want to accidentally modify the original typedef = copy.deepcopy(typedef) if path is None: path = [] output = {} grouped_fields, field_order, pos = _group_by_number(buf, pos, end, path) for (field_number, (wire_type, buffers)) in grouped_fields.items(): # wire_type should already be validated by _group_by_number path = path[:] + [field_number] field_outputs = None field_typedef = typedef.get(field_number, {}) field_key = _get_field_key(field_number, typedef, path) # Easy cases. Fixed size or bytes/string if ( wire_type in [ wire_format.WIRETYPE_FIXED32, wire_format.WIRETYPE_FIXED64, wire_format.WIRETYPE_VARINT, ] or ("type" in field_typedef and field_typedef["type"] != "message") ): if "type" not in field_typedef: field_typedef["type"] = config.get_default_type(wire_type) else: # have a type, but make sure it matches the wiretype if ( blackboxprotobuf.lib.types.WIRETYPES[field_typedef["type"]] != wire_type ): raise DecoderException( "Type %s from typedef did not match wiretype %s for " "field %s" % (field_typedef["type"], wire_type, field_key), path=path, ) # we already have a type, just map the decoder if field_typedef["type"] not in blackboxprotobuf.lib.types.DECODERS: raise TypedefException( "Got unkown type %s for field_number %s" % (field_typedef["type"], field_number), path=path, ) decoder = blackboxprotobuf.lib.types.DECODERS[field_typedef["type"]] field_outputs = [decoder(buf, 0) for buf in buffers] # this shouldn't happen, but let's check just in case for buf, _pos in zip(buffers, [y for _, y in field_outputs]): assert len(buf) == _pos field_outputs = [value for (value, _) in field_outputs] if len(field_outputs) == 1: output[field_key] = field_outputs[0] else: output[field_key] = field_outputs elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED: _try_decode_lendelim_fields( buffers, field_key, field_typedef, output, config ) # Save the field typedef/type back to the typedef typedef[field_number] = field_typedef return output, typedef, field_order, pos def _group_by_number(buf, pos, end, path): # Parse through the whole message and split into buffers based on wire # type and organized by field number. This forces us to parse the whole # message at once, but I think we're doing that anyway. This catches size # errors early as well, which is usually the best indicator of if it's a # protobuf message or not. # Returns a dictionary like: # { # "2": (<wiretype>, [<data>]) # } output_map = {} field_order = [] while pos < end: # Read in a field tag, pos = varint.decode_uvarint(buf, pos) field_number, wire_type = wire_format.UnpackTag(tag) # We want field numbers as strings everywhere field_number = str(field_number) path = path[:] + [field_number] if field_number in output_map and output_map[field_number][0] != wire_type: # This should never happen raise DecoderException( "Field %s has mistmatched wiretypes. Previous: %s Now: %s" % (field_number, output_map[field_number][0], wire_type), path=path, ) length = None if wire_type == wire_format.WIRETYPE_VARINT: # We actually have to read in the whole varint to figure out it's size _, new_pos = varint.decode_varint(buf, pos) length = new_pos - pos elif wire_type == wire_format.WIRETYPE_FIXED32: length = 4 elif wire_type == wire_format.WIRETYPE_FIXED64: length = 8 elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED: # Read the length from the start of the message # add on the length of the length tag as well bytes_length, new_pos = varint.decode_varint(buf, pos) length = bytes_length + (new_pos - pos) elif wire_type in [ wire_format.WIRETYPE_START_GROUP, wire_format.WIRETYPE_END_GROUP, ]: raise DecoderException("GROUP wire types not supported", path=path) else: raise DecoderException("Got unkown wire type: %d" % wire_type, path=path) if pos + length > end: raise DecoderException( "Decoded length for field %s goes over end: %d > %d" % (field_number, pos + length, end), path=path, ) field_buf = buf[pos : pos + length] if field_number in output_map: output_map[field_number][1].append(field_buf) else: output_map[field_number] = (wire_type, [field_buf]) field_order.append((field_number, len(output_map[field_number][1]) - 1)) pos += length return output_map, field_order, pos def _get_field_key(field_number, typedef, path): # Translate a field_number into a name if one is available in the typedef if not isinstance(field_number, (int, str)): raise EncoderException("Field key in message must be a str or int", path=path) if isinstance(field_number, int): field_number = str(field_number) # handle an alt_typedef by transforming 1-1 to name-1 # I don't think should actually be used with the current uses of # _get_field_key alt_field_number = None if "-" in field_number: field_number, alt_field_number = field_number.split("-") if field_number in typedef and typedef[field_number].get("name", "") != "": field_key = typedef[field_number]["name"] else: field_key = field_number # Return the new field_name + alt_field_number return field_key + ("" if alt_field_number is None else "-" + alt_field_number) def _try_decode_lendelim_fields( buffers, field_key, field_typedef, message_output, config ): # This is where things get weird # To start, since we want to decode messages and not treat every # embedded message as bytes, we have to guess if it's a message or # not. # Unlike other types, we can't assume our message types are # consistent across the tree or even within the same message. # A field could be a bytes type that that decodes to multiple different # messages that don't have the same type definition. This is where # 'alt_typedefs' let us say that these are the different message types # we've seen for this one field. # In general, if something decodes as a message once, the rest should too # and we can enforce that across a single message, but not multiple # messages. # This is going to change the definition of "alt_typedefs" a bit from just # alternate message type definitions to also allowing downgrading to # 'bytes' or string with an 'alt_type' if it doesn't parse try: outputs_map = {} field_order = [] # grab all dictonary alt_typedefs all_typedefs = { # we don't want this to modify in-place if it fails key: copy.deepcopy(value) for key, value in field_typedef.get("alt_typedefs", {}).items() if isinstance(value, dict) } all_typedefs["1"] = copy.deepcopy(field_typedef.get("message_typedef", {})) for buf in buffers: output = None output_typedef = None output_typedef_num = None new_field_order = [] for alt_typedef_num, alt_typedef in sorted( all_typedefs.items(), key=lambda x: int(x[0]) ): try: ( output, output_typedef, new_field_order, _, ) = decode_lendelim_message(buf, config, alt_typedef) except: continue output_typedef_num = alt_typedef_num break # try an anonymous type # let the error propogate up if we fail this if output is None: output, output_typedef, new_field_order, _ = decode_lendelim_message( buf, config, {} ) output_typedef_num = str( max([int(i) for i in ["0"] + list(all_typedefs.keys())]) + 1 ) # save the output or typedef we found all_typedefs[output_typedef_num] = output_typedef output_list = outputs_map.get(output_typedef_num, []) output_list.append(output) outputs_map[output_typedef_num] = output_list # we should technically have a different field order for each instance of the data # but that would require a very messy JSON which we're trying to avoid if len(new_field_order) > len(field_order): field_order = new_field_order # was able to decode everything as a message field_typedef["type"] = "message" field_typedef["message_typedef"] = all_typedefs["1"] field_typedef["field_order"] = field_order if len(all_typedefs.keys()) > 1: del all_typedefs["1"] field_typedef.setdefault("alt_typedefs", {}).update(all_typedefs) # messages get set as "key-alt_number" for output_typedef_num, outputs in outputs_map.items(): output_field_key = field_key if output_typedef_num != "1": output_field_key += "-" + output_typedef_num message_output[output_field_key] = ( outputs if len(outputs) > 1 else outputs[0] ) # success, return return except DecoderException as exc: # this should be pretty common, don't be noisy or throw an exception logging.debug( "Could not decode a buffer for field number %s as a message: %s", field_key, exc, ) # Decoding as a message did not work, try strings and then bytes # The bytes decoding should never fail for target_type in ["string", config.default_binary_type]: try: outputs = [] decoder = blackboxprotobuf.lib.types.DECODERS[target_type] for buf in buffers: output, _ = decoder(buf, 0) outputs.append(output) # all outputs worked, this is our type # check if there is a message type already in the typedef if "type" in field_typedef and "message" == field_typedef["type"]: # we already had a message type. save it as an alt_typedef # check if we already have this type as an alt_typedef output_typedef_nums = { key: value for key, value in field_typedef.setdefault( "alt_typedefs", {} ).items() if value == target_type }.keys() output_typedef_num = None if len(output_typedef_nums) == 0: # find the next largest alt typedef number to put this type as output_typedef_num = str( max([int(i) for i in ["0"] + all_typedefs.keys()]) + 1 ) field_typedef.setdefault("alt_typedefs", {})[ output_typedef_num ] = target_type else: # we already have an alt typedef with this number output_typedef_num = output_typedef_nums[0] message_output[field_key + "-" + output_typedef_num] = ( outputs if len(outputs) > 1 else outputs[0] ) else: field_typedef["type"] = target_type message_output[field_key] = outputs if len(outputs) > 1 else outputs[0] return except DecoderException: continue def encode_lendelim_message(data, config, typedef, path=None, field_order=None): """Encode data as a length delimited protobuf message""" message_out = encode_message( data, config, typedef, path=path, field_order=field_order ) length = varint.encode_varint(len(message_out)) logging.debug("Message length encoded: %d", len(length) + len(message_out)) return length + message_out def decode_lendelim_message(buf, config, typedef=None, pos=0, depth=0, path=None): """Deocde a length delimited protobuf message from buf""" length, pos = varint.decode_varint(buf, pos) ret = decode_message( buf, config, typedef, pos, pos + length, depth=depth, path=path ) return ret def generate_packed_encoder(wrapped_encoder): """Generate an encoder for a packed type based on a base type encoder""" def length_wrapper(values): # Encode repeat values and prefix with the length output = bytearray() for value in values: output += wrapped_encoder(value) length = varint.encode_varint(len(output)) return length + output return length_wrapper def generate_packed_decoder(wrapped_decoder): """Generate an decoder for a packed type based on a base type decoder""" def length_wrapper(buf, pos): # Decode repeat values prefixed with the length length, pos = varint.decode_varint(buf, pos) end = pos + length output = [] while pos < end: value, pos = wrapped_decoder(buf, pos) output.append(value) if pos > end: raise DecoderException( ( "Error decoding packed field. Packed length larger than" " buffer: decoded = %d, left = %d" ) % (length, len(buf) - pos) ) return output, pos return length_wrapper
py
1a438b89c1bdd364f49fa6b621a7326c3dc701ed
#!/usr/bin/python import os import struct from collections import OrderedDict FILE_NAME = "DR.SG0" #change name to extract other file def showData(f): f.seek(1) data = f.read(1) driverId = struct.unpack("<B",(data))[0] print("DriverId: {0}".format(driverId)) data = f.read(1) useWeapons = struct.unpack("<B",(data))[0] print("useWeapons: {0}".format(useWeapons)) data = f.read(1) difficulty = struct.unpack("<B",(data))[0] print("Difficulty: {0}".format(difficulty)) data = f.read(15) print("SaveGame Name: {0}".format(data)) drivers=20 for index in range(drivers): f.seek(19+108*index) name = f.read(12) print("#{0}".format(index)) print(" Name: {0}".format(name)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Damage: {0}".format(data)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Engine: {0}".format(data%256)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Tire: {0}".format(data%255 )) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Armour: {0}".format(data%255)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Car Type: {0}".format(data%255)) if index ==driverId: data = f.read(4) data = struct.unpack("I",(data))[0] print(" ?: {0}".format(data%255)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" ?: {0}".format(data%255)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" ?: {0}".format(data%255)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Color: {0}".format(data%255)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Money: {0}".format(data%(255*255))) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Loan Type: {0}".format(data%255)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Loan Races Left: {0}".format(data%255)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Actual car value: {0}".format(data%(255*255))) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Face: {0}".format(data%255)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Points: {0}".format(data)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Rank: {0}".format(data%255)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Races won: {0}".format(data)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Total races: {0}".format(data%255)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" ?: {0}".format(data)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Total income: {0}".format(data)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Mines: {0}".format(data)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Spikes: {0}".format(data)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Rocket: {0}".format(data)) data = f.read(4) data = struct.unpack("I",(data))[0] print(" Sabotage: {0}".format(data)) else: f.read(19*4) print("Reading: {0}".format(FILE_NAME)) f = open(FILE_NAME, "rb") fDest = open(FILE_NAME + "_decrypted","w+") try: positions = 2179 initialPosition = 0 for index in range(positions): f.seek(index) data = f.read(1) if index==0: initialPosition =struct.unpack("<B",(data))[0] data =initialPosition else: tmpData = struct.unpack("<B",(data))[0] << index%6 tmpData=tmpData %256 data = tmpData|struct.unpack("<B",(data))[0] >> (8-index%6) data=data %256 data +=-17* index data=data %256 data=data+initialPosition data=data %256 fDest.seek(index) fDest.write(chr(data)) showData(fDest) print("File readed: {0}".format(FILE_NAME)) finally: fDest.close() fDest.close() f.close()
py
1a438bc8f16d1dfdf0b0792ff643f0598029f57a
#!/usr/bin/python proto = ["ssh", "http", "https"] protoa = ["ssh", "http", "https"] print(proto) proto.append('dns') # adds dns to EOL protoa.append('dns') # adds dns to EOL print(proto) proto2 = [22,80,443,53] # list common ports proto.extend(proto2) #pass proto2 as arguement to ext method print(proto) protoa.append(proto2) #pass proto2 as arguement to ext method print(protoa) #.insert added to lists proto.insert(2,proto2) #inserts list proto2 into index 2 print(proto)
py
1a438c9ec9be1715f256a4adb55e94238d4049d9
import os from celery import Celery from django.apps import apps, AppConfig from django.conf import settings if not settings.configured: # set the default Django settings module for the 'celery' program. os.environ.setdefault( "DJANGO_SETTINGS_MODULE", "config.settings.local" ) # pragma: no cover app = Celery("dear_petition") # Using a string here means the worker will not have to # pickle the object when using Windows. # - namespace='CELERY' means all celery-related configuration keys # should have a `CELERY_` prefix. app.config_from_object("django.conf:settings", namespace="CELERY") class CeleryAppConfig(AppConfig): name = "dear_petition.taskapp" verbose_name = "Celery Config" def ready(self): installed_apps = [app_config.name for app_config in apps.get_app_configs()] app.autodiscover_tasks(lambda: installed_apps, force=True) @app.task(bind=True) def debug_task(self): print(f"Request: {self.request!r}") # pragma: no cover
py
1a438d918ffa27d61d2494e05645b57aa64efdc1
from .job import run_jobs
py
1a438e0754376d16814f7ba8f62e0e74d67051a3
""" WHAT: A class which which manages the interface with MySQL WHY: Need to read and write data to MySQL ASSUMES: MySQL is running per the connection parameters FUTURE IMPROVEMENTS: Add table upload functions, and DDL creation as required WHO: SL 2020-08-13 """ import mysql.connector as mysql import pandas as pd from tqdm import tqdm import modConfig import logging import os class clsDatabase: """ See header """ def __init__(self): """ Constructor """ self.connection = None def connect(self): """ Connect to MySQL :return: None """ self.connection = mysql.connect(host='localhost', port=12345, database='xxx', user='root', password='xxx', autocommit=False, option_files="my.cnf") def disconnect(self): """ Disconnect from MySQL :return: None """ if self.connection is not None: self.connection.close() def reconnect(self): """ Reconnect to MySQL :return: None """ self.disconnect() self.connect() def execute(self, sql, expectingReturn=False): """ Execute a sql query, and optional return results as a pandas DataFrame :param sql: Any sql statement :param expectingReturn: True meaning return a pandas DataFrame, False meaning no return :return: pandas DataFrame or None """ if expectingReturn: return pd.read_sql(sql=sql, con=self.connection) else: cursor = self.connection.cursor() cursor.execute(sql) cursor.close() def uploadTableViaDataFrame(self, df, tableName, clearTable=False, shouldCrashOnBadRow=True): """ Uploads a pandas DataFrame to a given MySQL table via insert statements :param df: A pandas DataFrame with column names which match the target table column names :param tableName: A MySQL table name :param clearTable: Boolean whether to clear the table before uploading :return: None """ sql = "insert into `%s`\n(`" % tableName + "`,`".join(df.columns) + "`)\nvalues\n(" + ",".join(["%s"]*len(df.columns)) + ")" cursor = self.connection.cursor() if clearTable: cursor.execute("delete from `%s`" % tableName) rowCounter = 0 for row in tqdm(df.values.tolist(), desc="Uploading table %s" % tableName, unit="row"): try: cursor.execute(sql, row) except Exception as e: if shouldCrashOnBadRow: raise else: msg = "Row failure at row %s with error: %s" % (str(rowCounter), str(e)) print(msg) logging.error(msg) rowCounter += 1 cursor.close() self.connection.commit() def uploadTableViaCsvFile(self, fileName, tableName, columnNames=None, clearTable=False): """ Uploads a pandas DataFrame to a given MySQL table via local csv file. NOTE Server must have local_infile turned on set @@global.local_infile = 1 NOTE Client must have local_infile enabled in its .cnf file [client] allow_local_infile=ON :param fileName: A comma separated text file with equivalent of pd.to_csv( , index=False, line_terminator='\n') :param tableName: A MySQL table name :param clearTable: Boolean whether to clear the table before uploading :return: None """ sql = \ """ LOAD DATA LOCAL INFILE '%s' INTO TABLE `%s` FIELDS TERMINATED BY ',' ENCLOSED BY '"' LINES TERMINATED BY '\n' IGNORE 1 LINES """ % (fileName.replace(os.sep, '/'), tableName) # os.sep is to handle windows file paths if columnNames is not None: sql += "\n(`" + "`,`".join(columnNames) + "`)" sql += ";" cursor = self.connection.cursor() if clearTable: cursor.execute("delete from `%s`" % tableName) cursor.execute(sql) cursor.close() self.connection.commit() if __name__ == '__main__': print("Connecting to database") db = clsDatabase() db.connect() print("Querying database") df = db.execute(sql="select current_timestamp", expectingReturn=True) print(df) print("Disconnecting from database") db.disconnect() print("Done")
py
1a438e15403e7dbaa226139e336b8342199a330e
import json import logging import sys import unittest from handlers.proj_schedule_initializer import lambda_handler from handlers.proj_schedule_initializer import logger logging.basicConfig(format='%(asctime)s %(filename)s [line:%(lineno)d] [PID:%(process)d] %(levelname)s: %(message)s', stream=sys.stdout) class ScheduleInitializerTestCase(unittest.TestCase): def test_get_schedule_initializer(self): logger.info("test get schedule initializer") get_test_event = { "resource": "/schedule/", "path": "/schedule/", "httpMethod": "GET", "queryStringParameters": { "pageSize": "20", "pageNo": "0", "userId": "test-editor" }, "multiValueQueryStringParameters": { "pageSize": [ "20" ], "pageNo": [ "0" ], "userId": [ "test-editor" ] }, "pathParameters": {} } handler_response = lambda_handler(get_test_event, None) logger.debug(json.dumps(handler_response, indent=2)) logger.debug(json.dumps(json.loads(handler_response["body"]), indent=2)) self.assertEqual(handler_response["statusCode"], 200) logger.info("Completed!") def test_post_schedule_initializer(self): logger.info("test post schedule initializer") post_test_event = { "resource": "/schedule/", "path": "/schedule/", "httpMethod": "POST", "queryStringParameters": { "targetArea": "New York", "userId": "test-editor" }, "multiValueQueryStringParameters": { "targetArea": [ "New York" ], "userId": [ "test-editor" ], }, "pathParameters": {} } handler_response = lambda_handler(post_test_event, None) print(json.dumps(handler_response, indent=2)) self.assertEqual(handler_response["statusCode"], 200) logger.debug(json.dumps(json.loads(handler_response["body"]), indent=2)) logger.info("Completed!") if __name__ == '__main__': unittest.main()
py
1a438f964dfafffdc5007df5ccea8fa2a723afbb
# coding: utf-8 """Example / benchmark for building a PTB LSTM model. Trains the model described in: (Zaremba, et. al.) Recurrent Neural Network Regularization http://arxiv.org/abs/1409.2329 There are 3 supported model configurations: =========================================== | config | epochs | datasets | valid | test =========================================== | small | 13 | 37.99 | 121.39 | 115.91 | medium | 39 | 48.45 | 86.16 | 82.07 | large | 55 | 37.87 | 82.62 | 78.29 The exact results may vary depending on the random initialization. The hyperparameters used in the model: - init_scale - the initial scale of the weights - learning_rate - the initial value of the learning rate - max_grad_norm - the maximum permissible norm of the gradient - num_layers - the number of LSTM layers - num_steps - the number of unrolled steps of LSTM - hidden_size - the number of LSTM units - max_epoch - the number of epochs trained with the initial learning rate - max_max_epoch - the total number of epochs for training - keep_prob - the probability of keeping weights in the dropout layer - lr_decay - the decay of the learning rate for each epoch after "max_epoch" - batch_size - the batch size The data required for this example is in the data/ dir of the PTB dataset from Tomas Mikolov's webpage: $ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz $ tar xvf simple-examples.tgz To run: $ python ptb_word_lm.py --data_path=simple-examples/data/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect import time import numpy as np import tensorflow as tf from test import reader flags = tf.flags logging = tf.logging flags.DEFINE_string( "model", "small", "A type of model. Possible options are: small, medium, large.") flags.DEFINE_string("data_path", None, "Where the training/test data is stored.") flags.DEFINE_string("save_path", None, "Model output directory.") flags.DEFINE_bool("use_fp16", False, "Train using 16-bit floats instead of 32bit floats") FLAGS = flags.FLAGS def data_type(): return tf.float16 if FLAGS.use_fp16 else tf.float32 class PTBInput(object): """input 데이터""" def __init__(self, config, data, name=None): self.batch_size = batch_size = config.batch_size self.num_steps = num_steps = config.num_steps self.epoch_size = ((len(data) // batch_size) - 1) // num_steps self.input_data, self.targets = reader.ptb_producer( data, batch_size, num_steps, name=name) class PTBModel(object): """PTB 모델""" def __init__(self, is_training, config, input_): self._input = input_ batch_size = input_.batch_size num_steps = input_.num_steps size = config.hidden_size vocab_size = config.vocab_size # Slightly better results can be obtained with forget gate biases # initialized to 1 but the hyperparameters of the model would need to be # different than reported in the paper. def lstm_cell(): # With the latest TensorFlow source code (as of Mar 27, 2017), # the BasicLSTMCell will need a reuse checkpoint which is unfortunately not # defined in TensorFlow 1.0. To maintain backwards compatibility, we add # an argument check here: if 'reuse' in inspect.getargspec( tf.contrib.rnn.BasicLSTMCell.__init__).args: return tf.contrib.rnn.BasicLSTMCell( size, forget_bias=0.0, state_is_tuple=True, reuse=tf.get_variable_scope().reuse) else: return tf.contrib.rnn.BasicLSTMCell( size, forget_bias=0.0, state_is_tuple=True) attn_cell = lstm_cell if is_training and config.keep_prob < 1: def attn_cell(): return tf.contrib.rnn.DropoutWrapper( lstm_cell(), output_keep_prob=config.keep_prob) cell = tf.contrib.rnn.MultiRNNCell( [attn_cell() for _ in range(config.num_layers)], state_is_tuple=True) self._initial_state = cell.zero_state(batch_size, data_type()) with tf.device("/cpu:0"): embedding = tf.get_variable( "embedding", [vocab_size, size], dtype=data_type()) inputs = tf.nn.embedding_lookup(embedding, input_.input_data) if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob) # Simplified version of models/tutorials/rnn/rnn.py's rnn(). # This builds an unrolled LSTM for tutorial purposes only. # In general, use the rnn() or state_saving_rnn() from rnn.py. # # The alternative version of the code below is: # # inputs = tf.unstack(inputs, num=num_steps, axis=1) # outputs, state = tf.contrib.rnn.static_rnn( # cell, inputs, initial_state=self._initial_state) outputs = [] state = self._initial_state with tf.variable_scope("RNN"): for time_step in range(num_steps): if time_step > 0: tf.get_variable_scope().reuse_variables() (cell_output, state) = cell(inputs[:, time_step, :], state) outputs.append(cell_output) output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size]) softmax_w = tf.get_variable( "softmax_w", [size, vocab_size], dtype=data_type()) softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type()) logits = tf.matmul(output, softmax_w) + softmax_b # Reshape logits to be 3-D tensor for sequence loss logits = tf.reshape(logits, [batch_size, num_steps, vocab_size]) # use the contrib sequence loss and average over the batches loss = tf.contrib.seq2seq.sequence_loss( logits, input_.targets, tf.ones([batch_size, num_steps], dtype=data_type()), average_across_timesteps=False, average_across_batch=True ) # update the cost variables self._cost = cost = tf.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads = tf.gradients(cost,tvars) grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.contrib.framework.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name="new_learning_rate") self._lr_update = tf.assign(self._lr, self._new_lr) def assign_lr(self, session, lr_value): session.run(self._lr_update, feed_dict={self._new_lr: lr_value}) @property def input(self): return self._input @property def initial_state(self): return self._initial_state @property def cost(self): return self._cost @property def final_state(self): return self._final_state @property def lr(self): return self._lr @property def train_op(self): return self._train_op class SmallConfig(object): """--model flag가 small일때의 설정값들""" init_scale = 0.1 learning_rate = 1.0 max_grad_norm = 5 num_layers = 2 num_steps = 20 hidden_size = 200 max_epoch = 4 max_max_epoch = 13 keep_prob = 1.0 lr_decay = 0.5 batch_size = 20 vocab_size = 10000 class MediumConfig(object): """--model flag가 medium일때의 설정값들""" init_scale = 0.05 learning_rate = 1.0 max_grad_norm = 5 num_layers = 2 num_steps = 35 hidden_size = 650 max_epoch = 6 max_max_epoch = 39 keep_prob = 0.5 lr_decay = 0.8 batch_size = 20 vocab_size = 10000 class LargeConfig(object): """--model flag가 large일때의 설정값들""" init_scale = 0.04 learning_rate = 1.0 max_grad_norm = 10 num_layers = 2 num_steps = 35 hidden_size = 1500 max_epoch = 14 max_max_epoch = 55 keep_prob = 0.35 lr_decay = 1 / 1.15 batch_size = 20 vocab_size = 10000 class TestConfig(object): """Tiny config, for testing.""" init_scale = 0.1 learning_rate = 1.0 max_grad_norm = 1 num_layers = 1 num_steps = 2 hidden_size = 2 max_epoch = 1 max_max_epoch = 1 keep_prob = 1.0 lr_decay = 0.5 batch_size = 20 vocab_size = 10000 def run_epoch(session, model, eval_op=None, verbose=False): """Runs the model on the given data.""" start_time = time.time() costs = 0.0 iters = 0 state = session.run(model.initial_state) fetches = { "cost": model.cost, "final_state": model.final_state, } if eval_op is not None: fetches["eval_op"] = eval_op for step in range(model.input.epoch_size): feed_dict = {} for i, (c, h) in enumerate(model.initial_state): feed_dict[c] = state[i].c feed_dict[h] = state[i].h vals = session.run(fetches, feed_dict) cost = vals["cost"] state = vals["final_state"] costs += cost print(cost) iters += model.input.num_steps print(iters) if verbose and step % (model.input.epoch_size // 10) == 10: print("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / model.input.epoch_size, np.exp(costs / iters), iters * model.input.batch_size / (time.time() - start_time))) return np.exp(costs / iters) def get_config(): if FLAGS.model == "small": return SmallConfig() elif FLAGS.model == "medium": return MediumConfig() elif FLAGS.model == "large": return LargeConfig() elif FLAGS.model == "test": return TestConfig() else: raise ValueError("Invalid model: %s", FLAGS.model) def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost) tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput(config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) sv = tf.train.Supervisor(logdir=FLAGS.save_path) with sv.managed_session() as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True) print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) valid_perplexity = run_epoch(session, mvalid) print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) test_perplexity = run_epoch(session, mtest) print("Test Perplexity: %.3f" % test_perplexity) if FLAGS.save_path: print("Saving model to %s." % FLAGS.save_path) sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step) if __name__ == "__main__": tf.app.run()
py
1a4391db709e24b32054ba6ea5d1df60e94ea24e
# -*- coding: utf-8 -*- from itertools import chain import os from os import path from unittest.case import TestCase import math import sys project_dir = path.dirname(__file__) project_dir = path.join('..') sys.path.append(project_dir) from placa_grafica_tkinter import rodar_fase project_dir = os.path.join(os.path.dirname(__file__), '..') project_dir = os.path.normpath(project_dir) sys.path.append(project_dir) from atores import Obstaculo, Porco, PassaroVermelho, PassaroAmarelo, DESTRUIDO, ATIVO, \ Ator, Passaro from fase import Fase, Ponto, EM_ANDAMENTO, VITORIA, DERROTA class FaseTestes(TestCase): def teste_acabou_com_porcos_e_passaros(self): fase = Fase() porcos = [Porco(1, 1) for i in range(2)] # criando 2 porcos passaros = [PassaroAmarelo(1, 1) for i in range(2)] # criando 2 pássaros fase.adicionar_porco(*porcos) fase.adicionar_passaro(*passaros) self.assertEqual(EM_ANDAMENTO, fase.status()) # colidindo cada passaro com um porco no tempo 3 for passaro, porco in zip(passaros, porcos): passaro.colidir(porco, 3) self.assertEqual(VITORIA, fase.status()) fase.adicionar_obstaculo(Obstaculo()) self.assertEqual(VITORIA, fase.status(), 'Obstáculo não interfere no fim do jogo') fase.adicionar_porco(Porco()) self.assertEqual(DERROTA, fase.status(), 'Com Porco ativo e sem pássaro para lançar, o jogo deveria acabar') fase.adicionar_passaro(PassaroAmarelo()) self.assertEqual(EM_ANDAMENTO, fase.status(), 'Com Porco ativo e com pássaro para lançar, o jogo não deveria acabar') def teste_status(self): fase = Fase() porcos = [Porco(1, 1) for i in range(2)] passaros = [PassaroAmarelo(1, 1) for i in range(2)] fase.adicionar_porco(*porcos) fase.adicionar_passaro(*passaros) self.assertEqual(EM_ANDAMENTO, fase.status()) for passaro, porco in zip(passaros, porcos): passaro.colidir(porco, 3) self.assertEqual(VITORIA, fase.status(), 'Sem porcos ativos o jogo deveria terminar com vitória') fase.adicionar_obstaculo(Obstaculo()) self.assertEqual(VITORIA, fase.status(), 'Obstáculo não interfere para definir vitória') porco = Porco() fase.adicionar_porco(porco) self.assertEqual(DERROTA, fase.status(), 'Com Porco ativo e sem pássaro para lançar, o jogo deveria acabar em derrota') fase.adicionar_passaro(PassaroAmarelo()) self.assertEqual(EM_ANDAMENTO, fase.status(), 'Com Porco ativo e com pássaro para lançar, o jogo não deveria acabar') porco.colidir(porco, 3) self.assertEqual(VITORIA, fase.status(), 'Sem porco ativo, o jogo deveria acabar com vitória') def teste_lancar_passaro_sem_erro_quando_nao_existe_passaro(self): passaro_vermelho, passaro_amarelo = PassaroVermelho(1, 1), PassaroAmarelo(1, 1) fase = Fase() fase.adicionar_passaro(passaro_vermelho, passaro_amarelo) self.assertFalse(passaro_vermelho.foi_lancado()) self.assertFalse(passaro_amarelo.foi_lancado()) fase.lancar(90, 1) fase.lancar(45, 3) fase.lancar(31, 5) # testando que lançar passaros depios de todos lançados não causa erro self.assertTrue(passaro_vermelho.foi_lancado()) self.assertEqual(math.radians(90), passaro_vermelho._angulo_de_lancamento) self.assertEqual(1, passaro_vermelho._tempo_de_lancamento) self.assertTrue(passaro_amarelo.foi_lancado()) self.assertEqual(math.radians(45), passaro_amarelo._angulo_de_lancamento) self.assertEqual(3, passaro_amarelo._tempo_de_lancamento) def teste_intervalo_de_colisao_padrão(self): ''' Método que testa se o intervalo de colisão da Fase é repassado aos atores. Padrão de intervalo é 1 ''' fase = Fase() passaro = PassaroAmarelo(1, 1) fase.adicionar_passaro(passaro) porco = Porco(2, 2) fase.adicionar_porco(porco) fase.calcular_pontos(0) self.assertEqual(DESTRUIDO, passaro.status) self.assertEqual(DESTRUIDO, porco.status) def teste_intervalo_de_colisao_nao_padrao(self): ''' Método que testa se o intervalo de colisão da Fase é repassado aos atores. valor testado: 31 ''' fase = Fase(30) passaro = PassaroAmarelo(1, 1) fase.adicionar_passaro(passaro) porco = Porco(31, 31) fase.adicionar_porco(porco) fase.calcular_pontos(0) self.assertEqual(DESTRUIDO, passaro.status) self.assertEqual(DESTRUIDO, porco.status) def teste_calcular_pontos(self): fase_exemplo = criar_fase_exemplo() expected = set([Ponto(3, 3, 'A'), Ponto(3, 3, 'A'), Ponto(78, 1, '@'), Ponto(31, 10, 'O'), Ponto(70, 1, '@'), Ponto(3, 3, 'V')]) self.assertSetEqual(expected, set(fase_exemplo.calcular_pontos(0))) # ??? Investigar #x = set(fase_exemplo.calcular_pontos(0)) # ??? Investigar self.assertSetEqual(expected, x) # ??? Investigar fase_exemplo.lancar(45, 1) # i variando de 1 até 2.9 for i in range(100, 300, 1): fase_exemplo.calcular_pontos(i / 100) fase_exemplo.lancar(63, 3) # i variando de 3 até 3.9 for i in range(300, 400, 1): fase_exemplo.calcular_pontos(i / 100) fase_exemplo.lancar(23, 4) expected = set([Ponto(32, 11, 'v'), Ponto(17, 25, 'A'), Ponto(3, 3, 'A'), Ponto(31, 10, ' '), Ponto(78, 1, '@'), Ponto(70, 1, '@')]) self.assertSetEqual(expected, set(fase_exemplo.calcular_pontos(4))) # i variando de 4 até 6.9 for i in range(400, 700, 1): fase_exemplo.calcular_pontos(i / 100) expected = set( [Ponto(32, 11, 'v'), Ponto(57, 30, 'A'), Ponto(70, 2, 'a'), Ponto(31, 10, ' '), Ponto(78, 1, '@'), Ponto(70, 1, '+')]) self.assertSetEqual(expected, set(fase_exemplo.calcular_pontos(7))) # i variando de 7 até 8.49 for i in range(700, 849, 1): fase_exemplo.calcular_pontos(i / 100) print(fase_exemplo.calcular_pontos(8.5)) expected = set([Ponto(32, 11, 'v'), Ponto(77, 0, 'a'), Ponto(70, 2, 'a'), Ponto(31, 10, ' '), Ponto(78, 1, '+'), Ponto(70, 1, '+')]) self.assertSetEqual(expected, set(fase_exemplo.calcular_pontos(8.5))) self.assertEqual(VITORIA, fase_exemplo.status()) def criar_fase_exemplo(multiplicador=1): fase_exemplo = Fase(1 if multiplicador == 1 else 32) passaros = [PassaroVermelho(3 * multiplicador, 3 * multiplicador), PassaroAmarelo(3 * multiplicador, 3 * multiplicador), PassaroAmarelo(3 * multiplicador, 3 * multiplicador)] porcos = [Porco(78 * multiplicador, multiplicador), Porco(70 * multiplicador, multiplicador)] obstaculos = [Obstaculo(31 * multiplicador, 10 * multiplicador)] fase_exemplo.adicionar_passaro(*passaros) fase_exemplo.adicionar_porco(*porcos) fase_exemplo.adicionar_obstaculo(*obstaculos) return fase_exemplo if __name__ == '__main__': rodar_fase(criar_fase_exemplo(10))
py
1a4392273bc9a10877f6d4cee5e53d447e993131
# Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 from numpy.testing import assert_allclose import pytest from jax import random import jax.numpy as jnp import numpyro from numpyro.contrib.control_flow import cond, scan import numpyro.distributions as dist from numpyro.handlers import seed, substitute, trace from numpyro.infer import MCMC, NUTS, SVI, Predictive, Trace_ELBO from numpyro.infer.util import potential_energy def test_scan(): def model(T=10, q=1, r=1, phi=0.0, beta=0.0): def transition(state, i): x0, mu0 = state x1 = numpyro.sample("x", dist.Normal(phi * x0, q)) mu1 = beta * mu0 + x1 y1 = numpyro.sample("y", dist.Normal(mu1, r)) numpyro.deterministic("y2", y1 * 2) return (x1, mu1), (x1, y1) mu0 = x0 = numpyro.sample("x_0", dist.Normal(0, q)) y0 = numpyro.sample("y_0", dist.Normal(mu0, r)) _, xy = scan(transition, (x0, mu0), jnp.arange(T)) x, y = xy return jnp.append(x0, x), jnp.append(y0, y) T = 10 num_samples = 100 kernel = NUTS(model) mcmc = MCMC(kernel, num_warmup=100, num_samples=num_samples) mcmc.run(random.PRNGKey(0), T=T) assert set(mcmc.get_samples()) == {"x", "y", "y2", "x_0", "y_0"} mcmc.print_summary() samples = mcmc.get_samples() x = samples.pop("x")[0] # take 1 sample of x # this tests for the composition of condition and substitute # this also tests if we can use `vmap` for predictive. future = 5 predictive = Predictive( numpyro.handlers.condition(model, {"x": x}), samples, return_sites=["x", "y", "y2"], parallel=True, ) result = predictive(random.PRNGKey(1), T=T + future) expected_shape = (num_samples, T + future) assert result["x"].shape == expected_shape assert result["y"].shape == expected_shape assert result["y2"].shape == expected_shape assert_allclose(result["x"][:, :T], jnp.broadcast_to(x, (num_samples, T))) assert_allclose(result["y"][:, :T], samples["y"]) @pytest.mark.xfail(raises=RuntimeError) def test_nested_scan_smoke(): def model(): def outer_fn(y, val): def body_fn(z, val): z = numpyro.sample("z", dist.Normal(z, 1)) return z, z y = numpyro.sample("y", dist.Normal(y, 1)) _, zs = scan(body_fn, y, None, 4) return y, zs x = numpyro.sample("x", dist.Normal(0, 1)) _, zs = scan(outer_fn, x, None, 3) return zs data = jnp.arange(12).reshape((3, 4)) # we can scan but can't substitute values through multiple levels of scan with trace(), seed(rng_seed=0), substitute(data={"z": data}): zs = model() assert_allclose(zs, data) def test_scan_constrain_reparam_compatible(): def model(T, q=1, r=1, phi=0.0, beta=0.0): x = 0.0 mu = 0.0 for i in range(T): x = numpyro.sample(f"x_{i}", dist.LogNormal(phi * x, q)) mu = beta * mu + x numpyro.sample(f"y_{i}", dist.Normal(mu, r)) def fun_model(T, q=1, r=1, phi=0.0, beta=0.0): def transition(state, i): x, mu = state x = numpyro.sample("x", dist.LogNormal(phi * x, q)) mu = beta * mu + x numpyro.sample("y", dist.Normal(mu, r)) return (x, mu), None scan(transition, (0.0, 0.0), jnp.arange(T)) T = 10 params = {} for i in range(T): params[f"x_{i}"] = (i + 1.0) / 10 params[f"y_{i}"] = -i / 5 fun_params = {"x": jnp.arange(1, T + 1) / 10, "y": -jnp.arange(T) / 5} actual_log_joint = potential_energy(fun_model, (T,), {}, fun_params) expected_log_joint = potential_energy(model, (T,), {}, params) assert_allclose(actual_log_joint, expected_log_joint) def test_scan_without_stack(): def multiply_and_add_repeatedly(K, c_in): def iteration(c_prev, c_in): c_next = jnp.dot(c_prev, K) + c_in return c_next, (c_next,) _, (ys,) = scan(iteration, init=jnp.asarray([1.0, 0.0]), xs=c_in) return ys result = multiply_and_add_repeatedly( K=jnp.asarray([[0.7, 0.3], [0.3, 0.7]]), c_in=jnp.asarray([[1.0, 0.0]]) ) assert_allclose( result, [[1.7, 0.3]], ) def test_cond(): def model(): def true_fun(_): x = numpyro.sample("x", dist.Normal(4.0)) numpyro.deterministic("z", x - 4.0) def false_fun(_): x = numpyro.sample("x", dist.Normal(0.0)) numpyro.deterministic("z", x) cluster = numpyro.sample("cluster", dist.Normal()) cond(cluster > 0, true_fun, false_fun, None) def guide(): m1 = numpyro.param("m1", 2.0) s1 = numpyro.param("s1", 0.1, constraint=dist.constraints.positive) m2 = numpyro.param("m2", 2.0) s2 = numpyro.param("s2", 0.1, constraint=dist.constraints.positive) def true_fun(_): numpyro.sample("x", dist.Normal(m1, s1)) def false_fun(_): numpyro.sample("x", dist.Normal(m2, s2)) cluster = numpyro.sample("cluster", dist.Normal()) cond(cluster > 0, true_fun, false_fun, None) svi = SVI(model, guide, numpyro.optim.Adam(1e-2), Trace_ELBO(num_particles=100)) params, losses = svi.run(random.PRNGKey(0), num_steps=2500) predictive = Predictive( model, guide=guide, params=params, num_samples=1000, return_sites=["cluster", "x", "z"], ) result = predictive(random.PRNGKey(0)) assert result["cluster"].shape == (1000,) assert result["x"].shape == (1000,) assert result["z"].shape == (1000,) mcmc = MCMC( NUTS(model), num_warmup=500, num_samples=2500, num_chains=4, chain_method="sequential", ) mcmc.run(random.PRNGKey(0)) x = mcmc.get_samples()["x"] assert x.shape == (10_000,) assert_allclose( [x[x > 2.0].mean(), x[x > 2.0].std(), x[x < 2.0].mean(), x[x < 2.0].std()], [4.01, 0.965, -0.01, 0.965], atol=0.1, ) assert_allclose([x.mean(), x.std()], [2.0, jnp.sqrt(5.0)], atol=0.5)
py
1a4393469fded2ade23b1e2fa20dacc5dd107e24
""" Pytorch models. """ import torch import torch.autograd as autograd import torch.nn as nn import torch.functional as F import torch.optim as optim from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from torch.nn.init import xavier_uniform_ import utils CUDA = torch.cuda.is_available() class TSpec(nn.Module): def __init__(self, ts_len, spec_len, hid_dim, layers, out_dims): """ Model that accepts, as input, a timeseries concatenated with the spectra of that timeseries. The timeseries is fed through a 1DCNN to extract interesting shapes in the signal. Simultaneously, the spectra of the timeseries is analyzed by a seperate MLP head, which learns about informative peaks in the spectra. The outputs of these two paths are then concatenated and fed through an embedding MLP. Finally, for the n outputs requested, single MLP layer is used to predict either a real number (regression) or distribution (classification). ts_len: Number of timepoints in the timeseries (CNN->LSTM path). spec_len: Number of frequency bins in the spectra (MLP). hid_dim: Controls the size of all intermediate layers. layers: Number of layers for the CNN, MLP, and embedding components. out_dims: List of integers for the size of each output head. One for each prediction task. Regression == 1, Classification >= 1. """ super(TSpec, self).__init__() self.ts_len = ts_len self.spec_len = spec_len self.hid_dim = hid_dim self.layers = layers self.out_dims = out_dims # 5-layer CNN accepts the timeseries input. # Use mean-pooling so we are more sensitive to exact mean R-R times. # Conv --> AvgPool --> BatchNorm --> ReLU. self.conv = nn.Sequential( nn.Conv1d(1, hid_dim, 5), nn.AvgPool1d(5), nn.BatchNorm1d(hid_dim), nn.ReLU(), nn.Conv1d(hid_dim, hid_dim, 5), nn.AvgPool1d(5), nn.BatchNorm1d(hid_dim), nn.ReLU(), nn.Conv1d(hid_dim, hid_dim, 5), nn.AvgPool1d(5), nn.BatchNorm1d(hid_dim), nn.ReLU(), nn.Conv1d(hid_dim, hid_dim, 5), nn.AvgPool1d(5), nn.BatchNorm1d(hid_dim), nn.ReLU(), nn.Conv1d(hid_dim, hid_dim, 3), nn.AvgPool1d(2), nn.BatchNorm1d(hid_dim), nn.ReLU(), ) # n-layer MLP accepts the spectra. Linear --> Batchnorm --> ReLU # Minimum 2-layers, first layer always embeds to FIXED neurons. FIXED = 1000 arch = [] arch.append(nn.Linear(spec_len, FIXED)) arch.append(nn.BatchNorm1d(FIXED)) arch.append(nn.ReLU()) for i in range(layers): if i == 0: arch.append(nn.Linear(FIXED, hid_dim)) else: arch.append(nn.Linear(hid_dim, hid_dim)) arch.append(nn.BatchNorm1d(hid_dim)) arch.append(nn.ReLU()) self.mlp = nn.Sequential(*arch) # Embedding mixes the timeseries and spectral representations. # Linear --> BatchNorm --> ReLU. arch = [] for i in range(layers): if i == 0: arch.append(nn.Linear(hid_dim*2, hid_dim)) else: arch.append(nn.Linear(hid_dim, hid_dim)) arch.append(nn.BatchNorm1d(hid_dim)) arch.append(nn.ReLU()) self.embedding = nn.Sequential(*arch) # Output heads are a single fully connected layer. self.outputs = nn.ModuleList([]) for out_dim in out_dims: self.outputs.append(nn.Linear(hid_dim, out_dim)) def forward(self, X): """ X is size=(batch_size, ts_len+spec_len). We use self.ts_len and self.spec_len to split X to be fed into the CNN head and MLP head. """ batch_size = X.size(0) X_time = X[:, :self.ts_len] X_spec = X[:, self.ts_len:] # Convolutional step on timeseries. conv_act = self.conv(X_time.unsqueeze(1)) # Pass spectra through MLP. mlp_act = self.mlp(X_spec) # Hidden state is the concatenation CNN and MLP branches. hid = torch.cat([conv_act.squeeze(), mlp_act], dim=1) # Embed mixed representations from CNN and MLP. y_hat = self.embedding(hid) # Generate individual predictions from this embedding. y_hats = [] for i, output in enumerate(self.outputs): y_hats.append(output(y_hat)) return(y_hats)
py
1a4393711e56fa03e2c8f600476f600438cb3297
import copy import datetime import logging import traceback import warnings from pathlib import Path from typing import Any, Dict, Iterable, List, Optional, Tuple, Union from urllib.parse import urlparse from great_expectations._version import get_versions # isort:skip __version__ = get_versions()["version"] # isort:skip del get_versions # isort:skip from great_expectations.core import IDDict from great_expectations.core.batch import BatchMarkers, BatchSpec from great_expectations.core.batch_spec import ( RuntimeQueryBatchSpec, SqlAlchemyDatasourceBatchSpec, ) from great_expectations.core.util import convert_to_json_serializable from great_expectations.data_context.types.base import ConcurrencyConfig from great_expectations.exceptions import ( DatasourceKeyPairAuthBadPassphraseError, ExecutionEngineError, GreatExpectationsError, InvalidBatchSpecError, InvalidConfigError, ) from great_expectations.execution_engine import ExecutionEngine from great_expectations.execution_engine.execution_engine import MetricDomainTypes from great_expectations.execution_engine.sqlalchemy_batch_data import ( SqlAlchemyBatchData, ) from great_expectations.expectations.row_conditions import parse_condition_to_sqlalchemy from great_expectations.util import ( filter_properties_dict, get_sqlalchemy_url, import_library_module, ) from great_expectations.validator.metric_configuration import MetricConfiguration logger = logging.getLogger(__name__) try: import sqlalchemy as sa except ImportError: sa = None try: from sqlalchemy.engine import reflection from sqlalchemy.engine.default import DefaultDialect from sqlalchemy.engine.url import URL from sqlalchemy.exc import OperationalError from sqlalchemy.sql import Selectable from sqlalchemy.sql.elements import TextClause, quoted_name except ImportError: reflection = None DefaultDialect = None Selectable = None TextClause = None quoted_name = None OperationalError = None try: import psycopg2 import sqlalchemy.dialects.postgresql.psycopg2 as sqlalchemy_psycopg2 except (ImportError, KeyError): sqlalchemy_psycopg2 = None try: import sqlalchemy_redshift.dialect except ImportError: sqlalchemy_redshift = None try: import snowflake.sqlalchemy.snowdialect if sa: # Sometimes "snowflake-sqlalchemy" fails to self-register in certain environments, so we do it explicitly. # (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake) sa.dialects.registry.register("snowflake", "snowflake.sqlalchemy", "dialect") except (ImportError, KeyError, AttributeError): snowflake = None try: import pybigquery.sqlalchemy_bigquery ### # NOTE: 20210816 - jdimatteo: A convention we rely on is for SqlAlchemy dialects # to define an attribute "dialect". A PR has been submitted to fix this upstream # with https://github.com/googleapis/python-bigquery-sqlalchemy/pull/251. If that # fix isn't present, add this "dialect" attribute here: if not hasattr(pybigquery.sqlalchemy_bigquery, "dialect"): pybigquery.sqlalchemy_bigquery.dialect = ( pybigquery.sqlalchemy_bigquery.BigQueryDialect ) # Sometimes "pybigquery.sqlalchemy_bigquery" fails to self-register in Azure (our CI/CD pipeline) in certain cases, so we do it explicitly. # (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake) sa.dialects.registry.register( "bigquery", "pybigquery.sqlalchemy_bigquery", "dialect" ) try: getattr(pybigquery.sqlalchemy_bigquery, "INTEGER") bigquery_types_tuple = None except AttributeError: # In older versions of the pybigquery driver, types were not exported, so we use a hack logger.warning( "Old pybigquery driver version detected. Consider upgrading to 0.4.14 or later." ) from collections import namedtuple BigQueryTypes = namedtuple( "BigQueryTypes", sorted(pybigquery.sqlalchemy_bigquery._type_map) ) bigquery_types_tuple = BigQueryTypes(**pybigquery.sqlalchemy_bigquery._type_map) except (ImportError, AttributeError): bigquery_types_tuple = None pybigquery = None def _get_dialect_type_module(dialect): """Given a dialect, returns the dialect type, which is defines the engine/system that is used to communicates with the database/database implementation. Currently checks for RedShift/BigQuery dialects""" if dialect is None: logger.warning( "No sqlalchemy dialect found; relying in top-level sqlalchemy types." ) return sa try: # Redshift does not (yet) export types to top level; only recognize base SA types if isinstance(dialect, sqlalchemy_redshift.dialect.RedshiftDialect): return dialect.sa except (TypeError, AttributeError): pass # Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple try: if ( isinstance( dialect, pybigquery.sqlalchemy_bigquery.BigQueryDialect, ) and bigquery_types_tuple is not None ): return bigquery_types_tuple except (TypeError, AttributeError): pass return dialect class SqlAlchemyExecutionEngine(ExecutionEngine): def __init__( self, name=None, credentials=None, data_context=None, engine=None, connection_string=None, url=None, batch_data_dict=None, create_temp_table=True, concurrency: Optional[ConcurrencyConfig] = None, **kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine ): """Builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to access the desired database. Also initializes the dialect to be used and configures usage statistics. Args: name (str): \ The name of the SqlAlchemyExecutionEngine credentials: \ If the Execution Engine is not provided, the credentials can be used to build the Execution Engine. If the Engine is provided, it will be used instead data_context (DataContext): \ An object representing a Great Expectations project that can be used to access Expectation Suites and the Project Data itself engine (Engine): \ A SqlAlchemy Engine used to set the SqlAlchemyExecutionEngine being configured, useful if an Engine has already been configured and should be reused. Will override Credentials if provided. connection_string (string): \ If neither the engines nor the credentials have been provided, a connection string can be used to access the data. This will be overridden by both the engine and credentials if those are provided. url (string): \ If neither the engines, the credentials, nor the connection_string have been provided, a url can be used to access the data. This will be overridden by all other configuration options if any are provided. concurrency (ConcurrencyConfig): Concurrency config used to configure the sqlalchemy engine. """ super().__init__(name=name, batch_data_dict=batch_data_dict) self._name = name self._credentials = credentials self._connection_string = connection_string self._url = url self._create_temp_table = create_temp_table if engine is not None: if credentials is not None: logger.warning( "Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. " "Ignoring credentials." ) self.engine = engine else: concurrency = ( concurrency if concurrency is not None else ConcurrencyConfig() ) concurrency.add_sqlalchemy_create_engine_parameters(kwargs) if credentials is not None: self.engine = self._build_engine(credentials=credentials, **kwargs) elif connection_string is not None: self.engine = sa.create_engine(connection_string, **kwargs) elif url is not None: self.drivername = urlparse(url).scheme self.engine = sa.create_engine(url, **kwargs) else: raise InvalidConfigError( "Credentials or an engine are required for a SqlAlchemyExecutionEngine." ) # Get the dialect **for purposes of identifying types** if self.engine.dialect.name.lower() in [ "postgresql", "mysql", "sqlite", "oracle", "mssql", ]: # These are the officially included and supported dialects by sqlalchemy self.dialect_module = import_library_module( module_name="sqlalchemy.dialects." + self.engine.dialect.name ) elif self.engine.dialect.name.lower() == "snowflake": self.dialect_module = import_library_module( module_name="snowflake.sqlalchemy.snowdialect" ) elif self.engine.dialect.name.lower() == "redshift": self.dialect_module = import_library_module( module_name="sqlalchemy_redshift.dialect" ) elif self.engine.dialect.name.lower() == "bigquery": self.dialect_module = import_library_module( module_name="pybigquery.sqlalchemy_bigquery" ) else: self.dialect_module = None # <WILL> 20210726 - engine_backup is used by the snowflake connector, which requires connection and engine # to be closed and disposed separately. Currently self.engine can refer to either a Connection or Engine, # depending on the backend. This will need to be cleaned up in an upcoming refactor, so that Engine and # Connection can be handled separately. self._engine_backup = None if self.engine and self.engine.dialect.name.lower() in [ "sqlite", "mssql", "snowflake", "mysql", ]: self._engine_backup = self.engine # sqlite/mssql temp tables only persist within a connection so override the engine self.engine = self.engine.connect() # Send a connect event to provide dialect type if data_context is not None and getattr( data_context, "_usage_statistics_handler", None ): handler = data_context._usage_statistics_handler handler.send_usage_message( event="execution_engine.sqlalchemy.connect", event_payload={ "anonymized_name": handler._execution_engine_anonymizer.anonymize( self.name ), "sqlalchemy_dialect": self.engine.name, }, success=True, ) # Gather the call arguments of the present function (and add the "class_name"), filter out the Falsy values, # and set the instance "_config" variable equal to the resulting dictionary. self._config = { "name": name, "credentials": credentials, "data_context": data_context, "engine": engine, "connection_string": connection_string, "url": url, "batch_data_dict": batch_data_dict, "module_name": self.__class__.__module__, "class_name": self.__class__.__name__, } self._config.update(kwargs) filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True) @property def credentials(self): return self._credentials @property def connection_string(self): return self._connection_string @property def url(self): return self._url def _build_engine(self, credentials, **kwargs) -> "sa.engine.Engine": """ Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a private key path. """ # Update credentials with anything passed during connection time drivername = credentials.pop("drivername") schema_name = credentials.pop("schema_name", None) if schema_name is not None: logger.warning( "schema_name specified creating a URL with schema is not supported. Set a default " "schema on the user connecting to your database." ) create_engine_kwargs = kwargs connect_args = credentials.pop("connect_args", None) if connect_args: create_engine_kwargs["connect_args"] = connect_args if "private_key_path" in credentials: options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url( drivername, credentials ) else: options = get_sqlalchemy_url(drivername, **credentials) self.drivername = drivername engine = sa.create_engine(options, **create_engine_kwargs) return engine def _get_sqlalchemy_key_pair_auth_url( self, drivername: str, credentials: dict ) -> Tuple["sa.engine.url.URL", Dict]: """ Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided values into a private key. If passphrase is incorrect, this will fail and an exception is raised. Args: drivername(str) - The name of the driver class credentials(dict) - A dictionary of database credentials used to access the database Returns: a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs. """ from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization private_key_path = credentials.pop("private_key_path") private_key_passphrase = credentials.pop("private_key_passphrase") with Path(private_key_path).expanduser().resolve().open(mode="rb") as key: try: p_key = serialization.load_pem_private_key( key.read(), password=private_key_passphrase.encode() if private_key_passphrase else None, backend=default_backend(), ) except ValueError as e: if "incorrect password" in str(e).lower(): raise DatasourceKeyPairAuthBadPassphraseError( datasource_name="SqlAlchemyDatasource", message="Decryption of key failed, was the passphrase incorrect?", ) from e else: raise e pkb = p_key.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), ) credentials_driver_name = credentials.pop("drivername", None) create_engine_kwargs = {"connect_args": {"private_key": pkb}} return ( get_sqlalchemy_url(drivername or credentials_driver_name, **credentials), create_engine_kwargs, ) def get_domain_records( self, domain_kwargs: Dict, ) -> Selectable: """ Uses the given domain kwargs (which include row_condition, condition_parser, and ignore_row_if directives) to obtain and/or query a batch. Returns in the format of an SqlAlchemy table/column(s) object. Args: domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain Returns: An SqlAlchemy table/column(s) (the selectable object for obtaining data on which to compute) """ batch_id = domain_kwargs.get("batch_id") if batch_id is None: # We allow no batch id specified if there is only one batch if self.active_batch_data: data_object = self.active_batch_data else: raise GreatExpectationsError( "No batch is specified, but could not identify a loaded batch." ) else: if batch_id in self.loaded_batch_data_dict: data_object = self.loaded_batch_data_dict[batch_id] else: raise GreatExpectationsError( f"Unable to find batch with batch_id {batch_id}" ) if "table" in domain_kwargs and domain_kwargs["table"] is not None: # TODO: Add logic to handle record_set_name once implemented # (i.e. multiple record sets (tables) in one batch if domain_kwargs["table"] != data_object.selectable.name: selectable = sa.Table( domain_kwargs["table"], sa.MetaData(), schema=data_object._schema_name, ) else: selectable = data_object.selectable elif "query" in domain_kwargs: raise ValueError( "query is not currently supported by SqlAlchemyExecutionEngine" ) else: selectable = data_object.selectable # Filtering by row condition. if ( "row_condition" in domain_kwargs and domain_kwargs["row_condition"] is not None ): condition_parser = domain_kwargs["condition_parser"] if condition_parser == "great_expectations__experimental__": parsed_condition = parse_condition_to_sqlalchemy( domain_kwargs["row_condition"] ) selectable = sa.select( "*", from_obj=selectable, whereclause=parsed_condition ) else: raise GreatExpectationsError( "SqlAlchemyExecutionEngine only supports the great_expectations condition_parser." ) if "column" in domain_kwargs: return selectable if ( "column_A" in domain_kwargs and "column_B" in domain_kwargs and "ignore_row_if" in domain_kwargs ): if self.active_batch_data.use_quoted_name: # Checking if case-sensitive and using appropriate name # noinspection PyPep8Naming column_A_name = quoted_name(domain_kwargs["column_A"], quote=True) # noinspection PyPep8Naming column_B_name = quoted_name(domain_kwargs["column_B"], quote=True) else: # noinspection PyPep8Naming column_A_name = domain_kwargs["column_A"] # noinspection PyPep8Naming column_B_name = domain_kwargs["column_B"] ignore_row_if = domain_kwargs["ignore_row_if"] if ignore_row_if == "both_values_are_missing": selectable = ( sa.select([sa.text("*")]) .select_from(selectable) .where( sa.not_( sa.and_( sa.column(column_A_name) == None, sa.column(column_B_name) == None, ) ) ) ) elif ignore_row_if == "either_value_is_missing": selectable = ( sa.select([sa.text("*")]) .select_from(selectable) .where( sa.not_( sa.or_( sa.column(column_A_name) == None, sa.column(column_B_name) == None, ) ) ) ) else: if ignore_row_if not in ["neither", "never"]: raise ValueError( f'Unrecognized value of ignore_row_if ("{ignore_row_if}").' ) if ignore_row_if == "never": warnings.warn( f"""The correct "no-action" value of the "ignore_row_if" directive for the column pair case is \ "neither" (the use of "{ignore_row_if}" will be deprecated). Please update code accordingly. """, DeprecationWarning, ) return selectable if "column_list" in domain_kwargs and "ignore_row_if" in domain_kwargs: if self.active_batch_data.use_quoted_name: # Checking if case-sensitive and using appropriate name column_list = [ quoted_name(domain_kwargs[column_name], quote=True) for column_name in domain_kwargs["column_list"] ] else: column_list = domain_kwargs["column_list"] ignore_row_if = domain_kwargs["ignore_row_if"] if ignore_row_if == "all_values_are_missing": selectable = ( sa.select([sa.text("*")]) .select_from(selectable) .where( sa.not_( sa.and_( *( sa.column(column_name) == None for column_name in column_list ) ) ) ) ) elif ignore_row_if == "any_value_is_missing": selectable = ( sa.select([sa.text("*")]) .select_from(selectable) .where( sa.not_( sa.or_( *( sa.column(column_name) == None for column_name in column_list ) ) ) ) ) else: if ignore_row_if != "never": raise ValueError( f'Unrecognized value of ignore_row_if ("{ignore_row_if}").' ) return selectable return selectable def get_compute_domain( self, domain_kwargs: Dict, domain_type: Union[str, MetricDomainTypes], accessor_keys: Optional[Iterable[str]] = None, ) -> Tuple[Selectable, dict, dict]: """Uses a given batch dictionary and domain kwargs to obtain a SqlAlchemy column object. Args: domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would like to be using, or a corresponding string value representing it. String types include "identity", "column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the class MetricDomainTypes. accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when describing the domain and simply transferred with their associated values into accessor_domain_kwargs. Returns: SqlAlchemy column """ selectable = self.get_domain_records( domain_kwargs=domain_kwargs, ) # Extracting value from enum if it is given for future computation domain_type = MetricDomainTypes(domain_type) # Warning user if accessor keys are in any domain that is not of type table, will be ignored if ( domain_type != MetricDomainTypes.TABLE and accessor_keys is not None and len(list(accessor_keys)) > 0 ): logger.warning( 'Accessor keys ignored since Metric Domain Type is not "table"' ) compute_domain_kwargs = copy.deepcopy(domain_kwargs) accessor_domain_kwargs = {} if domain_type == MetricDomainTypes.TABLE: if accessor_keys is not None and len(list(accessor_keys)) > 0: for key in accessor_keys: accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key) if len(domain_kwargs.keys()) > 0: # Warn user if kwarg not "normal". unexpected_keys: set = set(compute_domain_kwargs.keys()).difference( { "batch_id", "table", "row_condition", "condition_parser", } ) if len(unexpected_keys) > 0: unexpected_keys_str: str = ", ".join( map(lambda element: f'"{element}"', unexpected_keys) ) logger.warning( f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type "{domain_type.value}".' ) return selectable, compute_domain_kwargs, accessor_domain_kwargs elif domain_type == MetricDomainTypes.COLUMN: if "column" not in compute_domain_kwargs: raise GreatExpectationsError( "Column not provided in compute_domain_kwargs" ) # Checking if case-sensitive and using appropriate name if self.active_batch_data.use_quoted_name: accessor_domain_kwargs["column"] = quoted_name( compute_domain_kwargs.pop("column"), quote=True ) else: accessor_domain_kwargs["column"] = compute_domain_kwargs.pop("column") return selectable, compute_domain_kwargs, accessor_domain_kwargs elif domain_type == MetricDomainTypes.COLUMN_PAIR: if not ( "column_A" in compute_domain_kwargs and "column_B" in compute_domain_kwargs ): raise GreatExpectationsError( "column_A or column_B not found within compute_domain_kwargs" ) # Checking if case-sensitive and using appropriate name if self.active_batch_data.use_quoted_name: accessor_domain_kwargs["column_A"] = quoted_name( compute_domain_kwargs.pop("column_A"), quote=True ) accessor_domain_kwargs["column_B"] = quoted_name( compute_domain_kwargs.pop("column_B"), quote=True ) else: accessor_domain_kwargs["column_A"] = compute_domain_kwargs.pop( "column_A" ) accessor_domain_kwargs["column_B"] = compute_domain_kwargs.pop( "column_B" ) return selectable, compute_domain_kwargs, accessor_domain_kwargs elif domain_type == MetricDomainTypes.MULTICOLUMN: if "column_list" not in domain_kwargs: raise GreatExpectationsError( "column_list not found within domain_kwargs" ) column_list = compute_domain_kwargs.pop("column_list") if len(column_list) < 2: raise GreatExpectationsError( "column_list must contain at least 2 columns" ) # Checking if case-sensitive and using appropriate name if self.active_batch_data.use_quoted_name: accessor_domain_kwargs["column_list"] = [ quoted_name(column_name, quote=True) for column_name in column_list ] else: accessor_domain_kwargs["column_list"] = column_list return selectable, compute_domain_kwargs, accessor_domain_kwargs # Letting selectable fall through return selectable, compute_domain_kwargs, accessor_domain_kwargs def resolve_metric_bundle( self, metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Any, dict, dict]], ) -> dict: """For every metric in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds bundles of the metrics into one large query dictionary so that they are all executed simultaneously. Will fail if bundling the metrics together is not possible. Args: metric_fn_bundle (Iterable[Tuple[MetricConfiguration, Callable, dict]): \ A Dictionary containing a MetricProvider's MetricConfiguration (its unique identifier), its metric provider function (the function that actually executes the metric), and the arguments to pass to the metric provider function. A dictionary of metrics defined in the registry and corresponding arguments Returns: A dictionary of metric names and their corresponding now-queried values. """ resolved_metrics = {} # We need a different query for each domain (where clause). queries: Dict[Tuple, dict] = {} for ( metric_to_resolve, engine_fn, compute_domain_kwargs, accessor_domain_kwargs, metric_provider_kwargs, ) in metric_fn_bundle: if not isinstance(compute_domain_kwargs, IDDict): compute_domain_kwargs = IDDict(compute_domain_kwargs) domain_id = compute_domain_kwargs.to_id() if domain_id not in queries: queries[domain_id] = { "select": [], "ids": [], "domain_kwargs": compute_domain_kwargs, } queries[domain_id]["select"].append( engine_fn.label(metric_to_resolve.metric_name) ) queries[domain_id]["ids"].append(metric_to_resolve.id) for query in queries.values(): domain_kwargs = query["domain_kwargs"] selectable = self.get_domain_records( domain_kwargs=domain_kwargs, ) assert len(query["select"]) == len(query["ids"]) try: """ If a custom query is passed, selectable will be TextClause and not formatted as a subquery wrapped in "(subquery) alias". TextClause must first be converted to TextualSelect using sa.columns() before it can be converted to type Subquery """ if isinstance(selectable, TextClause): res = self.engine.execute( sa.select(query["select"]).select_from( selectable.columns().subquery() ) ).fetchall() else: res = self.engine.execute( sa.select(query["select"]).select_from(selectable) ).fetchall() logger.debug( f"SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(domain_kwargs).to_id()}" ) except OperationalError as oe: exception_message: str = "An SQL execution Exception occurred. " exception_traceback: str = traceback.format_exc() exception_message += f'{type(oe).__name__}: "{str(oe)}". Traceback: "{exception_traceback}".' logger.error(exception_message) raise ExecutionEngineError(message=exception_message) assert ( len(res) == 1 ), "all bundle-computed metrics must be single-value statistics" assert len(query["ids"]) == len( res[0] ), "unexpected number of metrics returned" for idx, id in enumerate(query["ids"]): resolved_metrics[id] = convert_to_json_serializable(res[0][idx]) return resolved_metrics def close(self): """ Note: Will 20210729 This is a helper function that will close and dispose Sqlalchemy objects that are used to connect to a database. Databases like Snowflake require the connection and engine to be instantiated and closed separately, and not doing so has caused problems with hanging connections. Currently the ExecutionEngine does not support handling connections and engine separately, and will actually override the engine with a connection in some cases, obfuscating what object is used to actually used by the ExecutionEngine to connect to the external database. This will be handled in an upcoming refactor, which will allow this function to eventually become: self.connection.close() self.engine.dispose() More background can be found here: https://github.com/great-expectations/great_expectations/pull/3104/ """ if self._engine_backup: self.engine.close() self._engine_backup.dispose() else: self.engine.dispose() ### Splitter methods for partitioning tables ### def _split_on_whole_table(self, table_name: str, batch_identifiers: dict): """'Split' by returning the whole table""" # return sa.column(column_name) == batch_identifiers[column_name] return 1 == 1 def _split_on_column_value( self, table_name: str, column_name: str, batch_identifiers: dict ): """Split using the values in the named column""" return sa.column(column_name) == batch_identifiers[column_name] def _split_on_converted_datetime( self, table_name: str, column_name: str, batch_identifiers: dict, date_format_string: str = "%Y-%m-%d", ): """Convert the values in the named column to the given date_format, and split on that""" return ( sa.func.strftime( date_format_string, sa.column(column_name), ) == batch_identifiers[column_name] ) def _split_on_divided_integer( self, table_name: str, column_name: str, divisor: int, batch_identifiers: dict ): """Divide the values in the named column by `divisor`, and split on that""" return ( sa.cast(sa.column(column_name) / divisor, sa.Integer) == batch_identifiers[column_name] ) def _split_on_mod_integer( self, table_name: str, column_name: str, mod: int, batch_identifiers: dict ): """Divide the values in the named column by `divisor`, and split on that""" return sa.column(column_name) % mod == batch_identifiers[column_name] def _split_on_multi_column_values( self, table_name: str, column_names: List[str], batch_identifiers: dict ): """Split on the joint values in the named columns""" return sa.and_( *( sa.column(column_name) == column_value for column_name, column_value in batch_identifiers.items() ) ) def _split_on_hashed_column( self, table_name: str, column_name: str, hash_digits: int, batch_identifiers: dict, ): """Split on the hashed value of the named column""" return ( sa.func.right(sa.func.md5(sa.column(column_name)), hash_digits) == batch_identifiers[column_name] ) ### Sampling methods ### # _sample_using_limit # _sample_using_random # _sample_using_mod # _sample_using_a_list # _sample_using_md5 def _sample_using_mod( self, column_name, mod: int, value: int, ): """Take the mod of named column, and only keep rows that match the given value""" return sa.column(column_name) % mod == value def _sample_using_a_list( self, column_name: str, value_list: list, ): """Match the values in the named column against value_list, and only keep the matches""" return sa.column(column_name).in_(value_list) def _sample_using_md5( self, column_name: str, hash_digits: int = 1, hash_value: str = "f", ): """Hash the values in the named column, and split on that""" return ( sa.func.right( sa.func.md5(sa.cast(sa.column(column_name), sa.Text)), hash_digits ) == hash_value ) def _build_selectable_from_batch_spec(self, batch_spec) -> Union[Selectable, str]: table_name: str = batch_spec["table_name"] if "splitter_method" in batch_spec: splitter_fn = getattr(self, batch_spec["splitter_method"]) split_clause = splitter_fn( table_name=table_name, batch_identifiers=batch_spec["batch_identifiers"], **batch_spec["splitter_kwargs"], ) else: split_clause = True if "sampling_method" in batch_spec: if batch_spec["sampling_method"] == "_sample_using_limit": # SQLalchemy's semantics for LIMIT are different than normal WHERE clauses, # so the business logic for building the query needs to be different. if self.engine.dialect.name.lower() == "oracle": # limit doesn't compile properly for oracle so we will append rownum to query string later raw_query = ( sa.select("*") .select_from( sa.table( table_name, schema=batch_spec.get("schema_name", None) ) ) .where(split_clause) ) query = str( raw_query.compile( self.engine, compile_kwargs={"literal_binds": True} ) ) query += "\nAND ROWNUM <= %d" % batch_spec["sampling_kwargs"]["n"] return query else: return ( sa.select("*") .select_from( sa.table( table_name, schema=batch_spec.get("schema_name", None) ) ) .where(split_clause) .limit(batch_spec["sampling_kwargs"]["n"]) ) elif batch_spec["sampling_method"] == "_sample_using_random": num_rows: int = self.engine.execute( sa.select([sa.func.count()]) .select_from( sa.table(table_name, schema=batch_spec.get("schema_name", None)) ) .where(split_clause) ).scalar() p: Optional[float] = batch_spec["sampling_kwargs"]["p"] or 1.0 sample_size: int = round(p * num_rows) return ( sa.select("*") .select_from( sa.table(table_name, schema=batch_spec.get("schema_name", None)) ) .where(split_clause) .order_by(sa.func.random()) .limit(sample_size) ) else: sampler_fn = getattr(self, batch_spec["sampling_method"]) return ( sa.select("*") .select_from( sa.table(table_name, schema=batch_spec.get("schema_name", None)) ) .where( sa.and_( split_clause, sampler_fn(**batch_spec["sampling_kwargs"]), ) ) ) return ( sa.select("*") .select_from( sa.table(table_name, schema=batch_spec.get("schema_name", None)) ) .where(split_clause) ) def get_batch_data_and_markers( self, batch_spec: BatchSpec ) -> Tuple[Any, BatchMarkers]: if not isinstance( batch_spec, (SqlAlchemyDatasourceBatchSpec, RuntimeQueryBatchSpec) ): raise InvalidBatchSpecError( f"""SqlAlchemyExecutionEngine accepts batch_spec only of type SqlAlchemyDatasourceBatchSpec or RuntimeQueryBatchSpec (illegal type "{str(type(batch_spec))}" was received). """ ) batch_data: Optional[SqlAlchemyBatchData] = None batch_markers: BatchMarkers = BatchMarkers( { "ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime( "%Y%m%dT%H%M%S.%fZ" ) } ) temp_table_name: Optional[str] if "bigquery_temp_table" in batch_spec: temp_table_name = batch_spec.get("bigquery_temp_table") else: temp_table_name = None source_table_name = batch_spec.get("table_name", None) source_schema_name = batch_spec.get("schema_name", None) if isinstance(batch_spec, RuntimeQueryBatchSpec): # query != None is already checked when RuntimeQueryBatchSpec is instantiated query: str = batch_spec.query batch_spec.query = "SQLQuery" batch_data = SqlAlchemyBatchData( execution_engine=self, query=query, temp_table_name=temp_table_name, create_temp_table=batch_spec.get( "create_temp_table", self._create_temp_table ), source_table_name=source_table_name, source_schema_name=source_schema_name, ) elif isinstance(batch_spec, SqlAlchemyDatasourceBatchSpec): if self.engine.dialect.name.lower() == "oracle": selectable: str = self._build_selectable_from_batch_spec( batch_spec=batch_spec ) else: selectable: Selectable = self._build_selectable_from_batch_spec( batch_spec=batch_spec ) batch_data = SqlAlchemyBatchData( execution_engine=self, selectable=selectable, temp_table_name=temp_table_name, create_temp_table=batch_spec.get( "create_temp_table", self._create_temp_table ), source_table_name=source_table_name, source_schema_name=source_schema_name, ) return batch_data, batch_markers
py
1a439523aa3c4d34bed02ce6dce8d360c9fb3a43
# decimal number number = int(input("Enter any decimal number: ")) # print equivalent binary number print("Equivalent Binary Number: ", bin(number))
bzl
1a4395349a044a14e5751e96abc2bb324d7435a7
# TensorFlow external dependencies that can be loaded in WORKSPACE files. load("//third_party/gpus:cuda_configure.bzl", "cuda_configure") load("//third_party/gpus:rocm_configure.bzl", "rocm_configure") load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure") load("//third_party/nccl:nccl_configure.bzl", "nccl_configure") load("//third_party/git:git_configure.bzl", "git_configure") load("//third_party/py:python_configure.bzl", "python_configure") load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure") load("//third_party/toolchains/remote:configure.bzl", "remote_execution_configure") load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure") load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure") load("//third_party/toolchains/embedded/arm-linux:arm_linux_toolchain_configure.bzl", "arm_linux_toolchain_configure") load("//third_party:repo.bzl", "tf_http_archive") load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file") load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external") load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external") load( "//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl", "def_file_filter_configure", ) load("//third_party/FP16:workspace.bzl", FP16 = "repo") load("//third_party/aws:workspace.bzl", aws = "repo") load("//third_party/clog:workspace.bzl", clog = "repo") load("//third_party/cpuinfo:workspace.bzl", cpuinfo = "repo") load("//third_party/dlpack:workspace.bzl", dlpack = "repo") load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo") load("//third_party/hexagon:workspace.bzl", hexagon_nn = "repo") load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo") load("//third_party/hwloc:workspace.bzl", hwloc = "repo") load("//third_party/icu:workspace.bzl", icu = "repo") load("//third_party/jpeg:workspace.bzl", jpeg = "repo") load("//third_party/nasm:workspace.bzl", nasm = "repo") load("//third_party/opencl_headers:workspace.bzl", opencl_headers = "repo") load("//third_party/kissfft:workspace.bzl", kissfft = "repo") load("//third_party/pasta:workspace.bzl", pasta = "repo") load("//third_party/psimd:workspace.bzl", psimd = "repo") load("//third_party/ruy:workspace.bzl", ruy = "repo") load("//third_party/sobol_data:workspace.bzl", sobol_data = "repo") load("//third_party/vulkan_headers:workspace.bzl", vulkan_headers = "repo") load("//third_party/toolchains/remote_config:configs.bzl", "initialize_rbe_configs") def initialize_third_party(): """ Load third party repositories. See above load() statements. """ FP16() aws() clog() cpuinfo() dlpack() flatbuffers() hexagon_nn() highwayhash() hwloc() icu() kissfft() jpeg() nasm() opencl_headers() pasta() psimd() sobol_data() vulkan_headers() ruy() # Sanitize a dependency so that it works correctly from code that includes # TensorFlow as a submodule. def clean_dep(dep): return str(Label(dep)) # If TensorFlow is linked as a submodule. # path_prefix is no longer used. # tf_repo_name is thought to be under consideration. def tf_workspace(path_prefix = "", tf_repo_name = ""): tf_repositories(path_prefix, tf_repo_name) tf_bind() # Toolchains & platforms required by Tensorflow to build. def tf_toolchains(): native.register_execution_platforms("@local_execution_config_platform//:platform") native.register_toolchains("@local_execution_config_python//:py_toolchain") # Define all external repositories required by TensorFlow def tf_repositories(path_prefix = "", tf_repo_name = ""): """All external dependencies for TF builds.""" # Initialize toolchains and platforms. tf_toolchains() # Loads all external repos to configure RBE builds. initialize_rbe_configs() # Note that we check the minimum bazel version in WORKSPACE. clang6_configure(name = "local_config_clang6") cc_download_clang_toolchain(name = "local_config_download_clang") cuda_configure(name = "local_config_cuda") tensorrt_configure(name = "local_config_tensorrt") nccl_configure(name = "local_config_nccl") git_configure(name = "local_config_git") syslibs_configure(name = "local_config_syslibs") python_configure(name = "local_config_python") rocm_configure(name = "local_config_rocm") remote_execution_configure(name = "local_config_remote_execution") initialize_third_party() # For windows bazel build # TODO: Remove def file filter when TensorFlow can export symbols properly on Windows. def_file_filter_configure(name = "local_config_def_file_filter") # Point //external/local_config_arm_compiler to //external/arm_compiler arm_compiler_configure( name = "local_config_arm_compiler", build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"), remote_config_repo_arm = "../arm_compiler", remote_config_repo_aarch64 = "../aarch64_compiler", ) # TFLite crossbuild toolchain for embeddeds Linux arm_linux_toolchain_configure( name = "local_config_embedded_arm", build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:BUILD"), aarch64_repo = "../aarch64_linux_toolchain", armhf_repo = "../armhf_linux_toolchain", ) if path_prefix: print("path_prefix was specified to tf_workspace but is no longer used " + "and will be removed in the future.") tf_http_archive( name = "XNNPACK", sha256 = "4b199c96fb2d551450b48eb5549843b41c023ad200aa86760a7c56d0dc0da806", strip_prefix = "XNNPACK-68447302abcfad0d4b6b19a1efe7d7eef8833f4a", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/68447302abcfad0d4b6b19a1efe7d7eef8833f4a.zip", "https://github.com/google/XNNPACK/archive/68447302abcfad0d4b6b19a1efe7d7eef8833f4a.zip", ], ) tf_http_archive( name = "FXdiv", sha256 = "ab7dfb08829bee33dca38405d647868fb214ac685e379ec7ef2bebcd234cd44d", strip_prefix = "FXdiv-b408327ac2a15ec3e43352421954f5b1967701d1", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/Maratyszcza/FXdiv/archive/b408327ac2a15ec3e43352421954f5b1967701d1.zip", "https://github.com/Maratyszcza/FXdiv/archive/b408327ac2a15ec3e43352421954f5b1967701d1.zip", ], ) tf_http_archive( name = "pthreadpool", sha256 = "03312bd7d8d9e379d685258963ee8820767158b5946cdd00336ff17dae851001", strip_prefix = "pthreadpool-029c88620802e1361ccf41d1970bd5b07fd6b7bb", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/Maratyszcza/pthreadpool/archive/029c88620802e1361ccf41d1970bd5b07fd6b7bb.zip", "https://github.com/Maratyszcza/pthreadpool/archive/029c88620802e1361ccf41d1970bd5b07fd6b7bb.zip", ], ) tf_http_archive( name = "mkl_dnn", build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"), sha256 = "a0211aeb5e7dad50b97fa5dffc1a2fe2fe732572d4164e1ee8750a2ede43fbec", strip_prefix = "oneDNN-0.21.3", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz", "https://github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz", ], ) tf_http_archive( name = "mkl_dnn_v1", build_file = clean_dep("//third_party/mkl_dnn:mkldnn_v1.BUILD"), sha256 = "5369f7b2f0b52b40890da50c0632c3a5d1082d98325d0f2bff125d19d0dcaa1d", strip_prefix = "oneDNN-1.6.4", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v1.6.4.tar.gz", "https://github.com/oneapi-src/oneDNN/archive/v1.6.4.tar.gz", ], ) tf_http_archive( name = "com_google_absl", build_file = clean_dep("//third_party:com_google_absl.BUILD"), # TODO: Remove the patch when https://github.com/abseil/abseil-cpp/issues/326 is resolved # and when TensorFlow is build against CUDA 10.2 patch_file = clean_dep("//third_party:com_google_absl_fix_mac_and_nvcc_build.patch"), sha256 = "f368a8476f4e2e0eccf8a7318b98dafbe30b2600f4e3cf52636e5eb145aba06a", # SHARED_ABSL_SHA strip_prefix = "abseil-cpp-df3ea785d8c30a9503321a3d35ee7d35808f190d", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz", "https://github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz", ], ) tf_http_archive( name = "eigen_archive", build_file = clean_dep("//third_party:eigen.BUILD"), patch_file = clean_dep("//third_party/eigen3:gpu_packet_math.patch"), sha256 = "e807a6a6f3a0e8ab10adeb59bb5a9bbb113e8e1684f9b4b32f73f58fd758b4cf", # SHARED_EIGEN_SHA strip_prefix = "eigen-011e0db31d1bed8b7f73662be6d57d9f30fa457a", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/gitlab.com/libeigen/eigen/-/archive/011e0db31d1bed8b7f73662be6d57d9f30fa457a/eigen-011e0db31d1bed8b7f73662be6d57d9f30fa457a.tar.gz", "https://gitlab.com/libeigen/eigen/-/archive/011e0db31d1bed8b7f73662be6d57d9f30fa457a/eigen-011e0db31d1bed8b7f73662be6d57d9f30fa457a.tar.gz", ], ) tf_http_archive( name = "arm_compiler", build_file = clean_dep("//:arm_compiler.BUILD"), sha256 = "b9e7d50ffd9996ed18900d041d362c99473b382c0ae049b2fce3290632d2656f", strip_prefix = "rpi-newer-crosstools-eb68350c5c8ec1663b7fe52c742ac4271e3217c5/x64-gcc-6.5.0/arm-rpi-linux-gnueabihf/", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz", "https://github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz", ], ) tf_http_archive( # This is the latest `aarch64-none-linux-gnu` compiler provided by ARM # See https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads # The archive contains GCC version 9.2.1 name = "aarch64_compiler", build_file = "//:arm_compiler.BUILD", sha256 = "8dfe681531f0bd04fb9c53cf3c0a3368c616aa85d48938eebe2b516376e06a66", strip_prefix = "gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz", "https://developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz", ], ) tf_http_archive( name = "aarch64_linux_toolchain", build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:aarch64-linux-toolchain.BUILD"), sha256 = "8ce3e7688a47d8cd2d8e8323f147104ae1c8139520eca50ccf8a7fa933002731", strip_prefix = "gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz", "https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz", ], ) tf_http_archive( name = "armhf_linux_toolchain", build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:armhf-linux-toolchain.BUILD"), sha256 = "d4f6480ecaa99e977e3833cc8a8e1263f9eecd1ce2d022bb548a24c4f32670f5", strip_prefix = "gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz", "https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz", ], ) tf_http_archive( name = "libxsmm_archive", build_file = clean_dep("//third_party:libxsmm.BUILD"), sha256 = "9c0af4509ea341d1ee2c6c19fc6f19289318c3bd4b17844efeb9e7f9691abf76", strip_prefix = "libxsmm-1.14", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/hfp/libxsmm/archive/1.14.tar.gz", "https://github.com/hfp/libxsmm/archive/1.14.tar.gz", ], ) tf_http_archive( name = "com_googlesource_code_re2", sha256 = "d070e2ffc5476c496a6a872a6f246bfddce8e7797d6ba605a7c8d72866743bf9", strip_prefix = "re2-506cfa4bffd060c06ec338ce50ea3468daa6c814", system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz", "https://github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz", ], ) tf_http_archive( name = "com_github_google_crc32c", sha256 = "6b3b1d861bb8307658b2407bc7a4c59e566855ef5368a60b35c893551e4788e9", build_file = "@com_github_googlecloudplatform_google_cloud_cpp//bazel:crc32c.BUILD", strip_prefix = "crc32c-1.0.6", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/crc32c/archive/1.0.6.tar.gz", "https://github.com/google/crc32c/archive/1.0.6.tar.gz", ], ) tf_http_archive( name = "com_github_googlecloudplatform_google_cloud_cpp", sha256 = "ff82045b9491f0d880fc8e5c83fd9542eafb156dcac9ff8c6209ced66ed2a7f0", strip_prefix = "google-cloud-cpp-1.17.1", repo_mapping = { "@com_github_curl_curl": "@curl", "@com_github_nlohmann_json": "@nlohmann_json_lib", }, system_build_file = clean_dep("//third_party/systemlibs:google_cloud_cpp.BUILD"), system_link_files = { "//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD", }, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz", "https://github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz", ], ) tf_http_archive( name = "com_github_googlecloudplatform_tensorflow_gcp_tools", sha256 = "5e9ebe17eaa2895eb7f77fefbf52deeda7c4b63f5a616916b823eb74f3a0c542", strip_prefix = "tensorflow-gcp-tools-2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz", "https://github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz", ], ) tf_http_archive( name = "com_google_googleapis", build_file = clean_dep("//third_party/googleapis:googleapis.BUILD"), sha256 = "7ebab01b06c555f4b6514453dc3e1667f810ef91d1d4d2d3aa29bb9fcb40a900", strip_prefix = "googleapis-541b1ded4abadcc38e8178680b0677f65594ea6f", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip", "https://github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip", ], ) tf_http_archive( name = "gemmlowp", sha256 = "43146e6f56cb5218a8caaab6b5d1601a083f1f31c06ff474a4378a7d35be9cfb", # SHARED_GEMMLOWP_SHA strip_prefix = "gemmlowp-fda83bdc38b118cc6b56753bd540caa49e570745", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip", "https://github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip", ], ) tf_http_archive( name = "farmhash_archive", build_file = clean_dep("//third_party:farmhash.BUILD"), sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0", # SHARED_FARMHASH_SHA strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz", "https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz", ], ) tf_http_archive( name = "png", build_file = clean_dep("//third_party:png.BUILD"), patch_file = clean_dep("//third_party:png_fix_rpi.patch"), sha256 = "ca74a0dace179a8422187671aee97dd3892b53e168627145271cad5b5ac81307", strip_prefix = "libpng-1.6.37", system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/glennrp/libpng/archive/v1.6.37.tar.gz", "https://github.com/glennrp/libpng/archive/v1.6.37.tar.gz", ], ) tf_http_archive( name = "org_sqlite", build_file = clean_dep("//third_party:sqlite.BUILD"), sha256 = "b34f4c0c0eefad9a7e515c030c18702e477f4ef7d8ade6142bdab8011b487ac6", strip_prefix = "sqlite-amalgamation-3330000", system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3330000.zip", "https://www.sqlite.org/2020/sqlite-amalgamation-3330000.zip", ], ) tf_http_archive( name = "gif", build_file = clean_dep("//third_party:gif.BUILD"), patch_file = clean_dep("//third_party:gif_fix_strtok_r.patch"), sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd", strip_prefix = "giflib-5.2.1", system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz", "https://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz", ], ) tf_http_archive( name = "six_archive", build_file = clean_dep("//third_party:six.BUILD"), sha256 = "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", strip_prefix = "six-1.15.0", system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz", "https://pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz", ], ) tf_http_archive( name = "astor_archive", build_file = clean_dep("//third_party:astor.BUILD"), sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d", strip_prefix = "astor-0.7.1", system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz", "https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz", ], ) tf_http_archive( name = "astunparse_archive", build_file = clean_dep("//third_party:astunparse.BUILD"), sha256 = "5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872", strip_prefix = "astunparse-1.6.3/lib", system_build_file = clean_dep("//third_party/systemlibs:astunparse.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz", "https://files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz", ], ) filegroup_external( name = "astunparse_license", licenses = ["notice"], # PSFL sha256_urls = { "92fc0e4f4fa9460558eedf3412b988d433a2dcbb3a9c45402a145a4fab8a6ac6": [ "https://storage.googleapis.com/mirror.tensorflow.org/raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE", "https://raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE", ], }, ) tf_http_archive( name = "functools32_archive", build_file = clean_dep("//third_party:functools32.BUILD"), sha256 = "f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d", strip_prefix = "functools32-3.2.3-2", system_build_file = clean_dep("//third_party/systemlibs:functools32.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz", "https://pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz", ], ) tf_http_archive( name = "gast_archive", build_file = clean_dep("//third_party:gast.BUILD"), sha256 = "b881ef288a49aa81440d2c5eb8aeefd4c2bb8993d5f50edae7413a85bfdb3b57", strip_prefix = "gast-0.3.3", system_build_file = clean_dep("//third_party/systemlibs:gast.BUILD"), urls = [ "http://mirror.tensorflow.org/files.pythonhosted.org/packages/12/59/eaa15ab9710a20e22225efd042cd2d6a0b559a0656d5baba9641a2a4a921/gast-0.3.3.tar.gz", "https://files.pythonhosted.org/packages/12/59/eaa15ab9710a20e22225efd042cd2d6a0b559a0656d5baba9641a2a4a921/gast-0.3.3.tar.gz", ], ) tf_http_archive( name = "termcolor_archive", build_file = clean_dep("//third_party:termcolor.BUILD"), sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b", strip_prefix = "termcolor-1.1.0", system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz", "https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz", ], ) tf_http_archive( name = "typing_extensions_archive", build_file = clean_dep("//third_party:typing_extensions.BUILD"), sha256 = "79ee589a3caca649a9bfd2a8de4709837400dfa00b6cc81962a1e6a1815969ae", strip_prefix = "typing_extensions-3.7.4.2/src_py3", system_build_file = clean_dep("//third_party/systemlibs:typing_extensions.BUILD"), urls = [ "http://mirror.tensorflow.org/files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz", "https://files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz", ], ) filegroup_external( name = "typing_extensions_license", licenses = ["notice"], # PSFL sha256_urls = { "ff17ce94e102024deb68773eb1cc74ca76da4e658f373531f0ac22d68a6bb1ad": [ "http://mirror.tensorflow.org/raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE", "https://raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE", ], }, ) tf_http_archive( name = "opt_einsum_archive", build_file = clean_dep("//third_party:opt_einsum.BUILD"), sha256 = "d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b", strip_prefix = "opt_einsum-2.3.2", system_build_file = clean_dep("//third_party/systemlibs:opt_einsum.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz", "https://pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz", ], ) tf_http_archive( name = "absl_py", sha256 = "603febc9b95a8f2979a7bdb77d2f5e4d9b30d4e0d59579f88eba67d4e4cc5462", strip_prefix = "abseil-py-pypi-v0.9.0", system_build_file = clean_dep("//third_party/systemlibs:absl_py.BUILD"), system_link_files = { "//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD", "//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD", "//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD", "//third_party/systemlibs:absl_py.absl.logging.BUILD": "absl/logging/BUILD", }, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz", "https://github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz", ], ) tf_http_archive( name = "enum34_archive", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz", "https://pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz", ], sha256 = "8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1", build_file = clean_dep("//third_party:enum34.BUILD"), system_build_file = clean_dep("//third_party/systemlibs:enum34.BUILD"), strip_prefix = "enum34-1.1.6/enum", ) tf_http_archive( name = "org_python_pypi_backports_weakref", build_file = clean_dep("//third_party:backports_weakref.BUILD"), sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892", strip_prefix = "backports.weakref-1.0rc1/src", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz", "https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz", ], ) tf_http_archive( name = "dill_archive", build_file = clean_dep("//third_party:dill.BUILD"), urls = [ "http://mirror.tensorflow.org/files.pythonhosted.org/packages/c7/11/345f3173809cea7f1a193bfbf02403fff250a3360e0e118a1630985e547d/dill-0.3.1.1.tar.gz", "https://files.pythonhosted.org/packages/c7/11/345f3173809cea7f1a193bfbf02403fff250a3360e0e118a1630985e547d/dill-0.3.1.1.tar.gz", ], sha256 = "42d8ef819367516592a825746a18073ced42ca169ab1f5f4044134703e7a049c", strip_prefix = "dill-0.3.1.1", ) tf_http_archive( name = "tblib_archive", build_file = clean_dep("//third_party:tblib.BUILD"), urls = [ "http://mirror.tensorflow.org/files.pythonhosted.org/packages/ec/c4/8c651f3240a73c28a218194f3d527eb2be5a173d08501060cdee84ade33f/tblib-1.3.2.tar.gz", "https://files.pythonhosted.org/packages/ec/c4/8c651f3240a73c28a218194f3d527eb2be5a173d08501060cdee84ade33f/tblib-1.3.2.tar.gz", ], sha256 = "436e4200e63d92316551179dc540906652878df4ff39b43db30fcf6400444fe7", strip_prefix = "tblib-1.3.2", ) filegroup_external( name = "org_python_license", licenses = ["notice"], # Python 2.0 sha256_urls = { "e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": [ "https://storage.googleapis.com/mirror.tensorflow.org/docs.python.org/2.7/_sources/license.rst.txt", "https://docs.python.org/2.7/_sources/license.rst.txt", ], }, ) tf_http_archive( name = "com_google_protobuf", patch_file = clean_dep("//third_party/protobuf:protobuf.patch"), sha256 = "cfcba2df10feec52a84208693937c17a4b5df7775e1635c1e3baffc487b24c9b", strip_prefix = "protobuf-3.9.2", system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"), system_link_files = { "//third_party/systemlibs:protobuf.bzl": "protobuf.bzl", }, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/v3.9.2.zip", "https://github.com/protocolbuffers/protobuf/archive/v3.9.2.zip", ], ) tf_http_archive( name = "nsync", sha256 = "caf32e6b3d478b78cff6c2ba009c3400f8251f646804bcb65465666a9cea93c4", strip_prefix = "nsync-1.22.0", system_build_file = clean_dep("//third_party/systemlibs:nsync.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/nsync/archive/1.22.0.tar.gz", "https://github.com/google/nsync/archive/1.22.0.tar.gz", ], ) tf_http_archive( name = "com_google_googletest", sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86", strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip", "https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip", ], ) tf_http_archive( name = "com_github_gflags_gflags", sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e", strip_prefix = "gflags-2.2.1", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/gflags/gflags/archive/v2.2.1.tar.gz", "https://github.com/gflags/gflags/archive/v2.2.1.tar.gz", ], ) tf_http_archive( name = "pcre", build_file = clean_dep("//third_party:pcre.BUILD"), sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5", strip_prefix = "pcre-8.42", system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz", "https://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz", ], ) tf_http_archive( name = "curl", build_file = clean_dep("//third_party:curl.BUILD"), sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98", strip_prefix = "curl-7.69.1", system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.69.1.tar.gz", "https://curl.haxx.se/download/curl-7.69.1.tar.gz", ], ) # WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule tf_http_archive( name = "com_github_grpc_grpc", sha256 = "b956598d8cbe168b5ee717b5dafa56563eb5201a947856a6688bbeac9cac4e1f", strip_prefix = "grpc-b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd", system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"), patch_file = clean_dep("//third_party/grpc:generate_cc_env_fix.patch"), system_link_files = { "//third_party/systemlibs:BUILD": "bazel/BUILD", "//third_party/systemlibs:grpc.BUILD": "src/compiler/BUILD", "//third_party/systemlibs:grpc.bazel.grpc_deps.bzl": "bazel/grpc_deps.bzl", }, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz", "https://github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz", ], ) tf_http_archive( name = "linenoise", build_file = clean_dep("//third_party:linenoise.BUILD"), sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7", strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz", "https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz", ], ) # Check out LLVM and MLIR from llvm-project. LLVM_COMMIT = "6154c4115cd4b78d0171892aac21e340e72e32bd" LLVM_SHA256 = "40a0d77e45b4877ab3463074fbdb717a61c2848df4db0331e9b55a23a31cbb17" LLVM_URLS = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ] tf_http_archive( name = "llvm-project", sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = LLVM_URLS, additional_build_files = { clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"): "llvm/BUILD", "//third_party/mlir:BUILD": "mlir/BUILD", "//third_party/mlir:test.BUILD": "mlir/test/BUILD", }, ) # Intel openMP that is part of LLVM sources. tf_http_archive( name = "llvm_openmp", build_file = clean_dep("//third_party/llvm_openmp:BUILD"), sha256 = "d19f728c8e04fb1e94566c8d76aef50ec926cd2f95ef3bf1e0a5de4909b28b44", strip_prefix = "openmp-10.0.1.src", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/releases/download/llvmorg-10.0.1/openmp-10.0.1.src.tar.xz", "https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.1/openmp-10.0.1.src.tar.xz", ], ) tf_http_archive( name = "lmdb", build_file = clean_dep("//third_party:lmdb.BUILD"), sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28", strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb", system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz", "https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz", ], ) tf_http_archive( name = "jsoncpp_git", build_file = clean_dep("//third_party:jsoncpp.BUILD"), sha256 = "77a402fb577b2e0e5d0bdc1cf9c65278915cdb25171e3452c68b6da8a561f8f0", strip_prefix = "jsoncpp-1.9.2", system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz", "https://github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz", ], ) tf_http_archive( name = "boringssl", sha256 = "a9c3b03657d507975a32732f04563132b4553c20747cec6dc04de475c8bdf29f", strip_prefix = "boringssl-80ca9f9f6ece29ab132cce4cf807a9465a18cfac", system_build_file = clean_dep("//third_party/systemlibs:boringssl.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz", "https://github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz", ], ) tf_http_archive( name = "zlib", build_file = clean_dep("//third_party:zlib.BUILD"), sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1", strip_prefix = "zlib-1.2.11", system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz", "https://zlib.net/zlib-1.2.11.tar.gz", ], ) tf_http_archive( name = "fft2d", build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"), sha256 = "5f4dabc2ae21e1f537425d58a49cdca1c49ea11db0d6271e2a4b27e9697548eb", strip_prefix = "OouraFFT-1.0", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/petewarden/OouraFFT/archive/v1.0.tar.gz", "https://github.com/petewarden/OouraFFT/archive/v1.0.tar.gz", ], ) tf_http_archive( name = "snappy", build_file = clean_dep("//third_party:snappy.BUILD"), sha256 = "16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f", strip_prefix = "snappy-1.1.8", system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/snappy/archive/1.1.8.tar.gz", "https://github.com/google/snappy/archive/1.1.8.tar.gz", ], ) tf_http_archive( name = "nccl_archive", build_file = clean_dep("//third_party:nccl/archive.BUILD"), patch_file = clean_dep("//third_party/nccl:archive.patch"), sha256 = "b8eaed1fb2d0cc2f951625dc4e17185bab9ff3ab188ba4d34a6e3a01ce9f0d57", strip_prefix = "nccl-195232556936b39b01cc908296e1650b80d4a3e9", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/nvidia/nccl/archive/195232556936b39b01cc908296e1650b80d4a3e9.tar.gz", "https://github.com/nvidia/nccl/archive/195232556936b39b01cc908296e1650b80d4a3e9.tar.gz", ], ) java_import_external( name = "junit", jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a", jar_urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar", "https://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar", "https://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar", ], licenses = ["reciprocal"], # Common Public License Version 1.0 testonly_ = True, deps = ["@org_hamcrest_core"], ) java_import_external( name = "org_hamcrest_core", jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9", jar_urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar", "https://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar", "https://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar", ], licenses = ["notice"], # New BSD License testonly_ = True, ) java_import_external( name = "com_google_testing_compile", jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8", jar_urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar", "https://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar", ], licenses = ["notice"], # New BSD License testonly_ = True, deps = ["@com_google_guava", "@com_google_truth"], ) java_import_external( name = "com_google_truth", jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df", jar_urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar", "https://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar", ], licenses = ["notice"], # Apache 2.0 testonly_ = True, deps = ["@com_google_guava"], ) java_import_external( name = "org_checkerframework_qual", jar_sha256 = "d261fde25d590f6b69db7721d469ac1b0a19a17ccaaaa751c31f0d8b8260b894", jar_urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar", "https://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar", ], licenses = ["notice"], # Apache 2.0 ) java_import_external( name = "com_squareup_javapoet", jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea", jar_urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar", "https://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar", ], licenses = ["notice"], # Apache 2.0 ) tf_http_archive( name = "com_google_pprof", build_file = clean_dep("//third_party:pprof.BUILD"), sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4", strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz", "https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz", ], ) # The CUDA 11 toolkit ships with CUB. We should be able to delete this rule # once TF drops support for CUDA 10. tf_http_archive( name = "cub_archive", build_file = clean_dep("//third_party:cub.BUILD"), sha256 = "162514b3cc264ac89d91898b58450190b8192e2af1142cf8ccac2d59aa160dda", strip_prefix = "cub-1.9.9", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/NVlabs/cub/archive/1.9.9.zip", "https://github.com/NVlabs/cub/archive/1.9.9.zip", ], ) tf_http_archive( name = "cython", build_file = clean_dep("//third_party:cython.BUILD"), delete = ["BUILD.bazel"], sha256 = "bccc9aa050ea02595b2440188813b936eaf345e85fb9692790cecfe095cf91aa", strip_prefix = "cython-0.28.4", system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/cython/cython/archive/0.28.4.tar.gz", "https://github.com/cython/cython/archive/0.28.4.tar.gz", ], ) tf_http_archive( name = "arm_neon_2_x86_sse", build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"), sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc", strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz", "https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz", ], ) tf_http_archive( name = "double_conversion", build_file = clean_dep("//third_party:double_conversion.BUILD"), sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de", strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8", system_build_file = clean_dep("//third_party/systemlibs:double_conversion.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip", "https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip", ], ) tf_http_archive( name = "tflite_mobilenet_float", build_file = clean_dep("//third_party:tflite_mobilenet_float.BUILD"), sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0", urls = [ "https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz", "https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz", ], ) tf_http_archive( name = "tflite_mobilenet_quant", build_file = clean_dep("//third_party:tflite_mobilenet_quant.BUILD"), sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166", urls = [ "https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz", "https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz", ], ) tf_http_archive( name = "tflite_mobilenet_ssd", build_file = str(Label("//third_party:tflite_mobilenet.BUILD")), sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip", "https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip", ], ) tf_http_archive( name = "tflite_mobilenet_ssd_quant", build_file = str(Label("//third_party:tflite_mobilenet.BUILD")), sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip", "https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip", ], ) tf_http_archive( name = "tflite_mobilenet_ssd_quant_protobuf", build_file = str(Label("//third_party:tflite_mobilenet.BUILD")), sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79", strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz", "https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz", ], ) tf_http_archive( name = "tflite_conv_actions_frozen", build_file = str(Label("//third_party:tflite_mobilenet.BUILD")), sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip", "https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip", ], ) tf_http_archive( name = "tflite_ovic_testdata", build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"), sha256 = "033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2", strip_prefix = "ovic", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip", "https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip", ], ) tf_http_archive( name = "rules_cc", sha256 = "cf3b76a90c86c0554c5b10f4b160f05af71d252026b71362c4674e2fb9936cf9", strip_prefix = "rules_cc-01d4a48911d5e7591ecb1c06d3b8af47fe872371", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip", "https://github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip", ], ) tf_http_archive( name = "rules_python", sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz", "https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz", ], ) tf_http_archive( name = "build_bazel_rules_android", sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806", strip_prefix = "rules_android-0.1.1", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_android/archive/v0.1.1.zip", "https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip", ], ) # Apple and Swift rules. # https://github.com/bazelbuild/rules_apple/releases tf_http_archive( name = "build_bazel_rules_apple", sha256 = "ee9e6073aeb5a65c100cb9c44b0017c937706a4ae03176e14a7e78620a198079", strip_prefix = "rules_apple-5131f3d46794bf227d296c82f30c2499c9de3c5b", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_apple/archive/5131f3d46794bf227d296c82f30c2499c9de3c5b.tar.gz", "https://github.com/bazelbuild/rules_apple/archive/5131f3d46794bf227d296c82f30c2499c9de3c5b.tar.gz", ], ) # https://github.com/bazelbuild/rules_swift/releases tf_http_archive( name = "build_bazel_rules_swift", sha256 = "d0833bc6dad817a367936a5f902a0c11318160b5e80a20ece35fb85a5675c886", strip_prefix = "rules_swift-3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_swift/archive/3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8.tar.gz", "https://github.com/bazelbuild/rules_swift/archive/3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8.tar.gz", ], ) # https://github.com/bazelbuild/apple_support/releases tf_http_archive( name = "build_bazel_apple_support", sha256 = "ad8ae80e93612b8151019367a3d1604d7a51c14480dae1254e10252007e8260c", strip_prefix = "apple_support-501b4afb27745c4813a88ffa28acd901408014e4", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/apple_support/archive/501b4afb27745c4813a88ffa28acd901408014e4.tar.gz", "https://github.com/bazelbuild/apple_support/archive/501b4afb27745c4813a88ffa28acd901408014e4.tar.gz", ], ) # https://github.com/bazelbuild/bazel-skylib/releases tf_http_archive( name = "bazel_skylib", sha256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf043c7ca0", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz", "https://github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz", ], ) # https://github.com/apple/swift-protobuf/releases tf_http_archive( name = "com_github_apple_swift_swift_protobuf", strip_prefix = "swift-protobuf-1.6.0/", sha256 = "4ccf6e5ea558e8287bf6331f9f6e52b3c321fca5f1d181d03680f415c32a6bba", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/apple/swift-protobuf/archive/1.6.0.zip", "https://github.com/apple/swift-protobuf/archive/1.6.0.zip", ], ) # https://github.com/google/xctestrunner/releases http_file( name = "xctestrunner", executable = 1, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par", "https://github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par", ], ) tf_http_archive( name = "tbb", build_file = clean_dep("//third_party/ngraph:tbb.BUILD"), sha256 = "c3245012296f09f1418b78a8c2f17df5188b3bd0db620f7fd5fabe363320805a", strip_prefix = "tbb-2019_U1", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/01org/tbb/archive/2019_U1.zip", "https://github.com/01org/tbb/archive/2019_U1.zip", ], ) tf_http_archive( name = "ngraph", build_file = clean_dep("//third_party/ngraph:ngraph.BUILD"), sha256 = "a1780f24a1381fc25e323b4b2d08b6ef5129f42e011305b2a34dcf43a48030d5", strip_prefix = "ngraph-0.11.0", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz", "https://github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz", ], ) tf_http_archive( name = "nlohmann_json_lib", build_file = clean_dep("//third_party/ngraph:nlohmann_json.BUILD"), sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732", strip_prefix = "json-3.4.0", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/nlohmann/json/archive/v3.4.0.tar.gz", "https://github.com/nlohmann/json/archive/v3.4.0.tar.gz", ], ) tf_http_archive( name = "ngraph_tf", build_file = clean_dep("//third_party/ngraph:ngraph_tf.BUILD"), sha256 = "742a642d2c6622277df4c902b6830d616d0539cc8cd843d6cdb899bb99e66e36", strip_prefix = "ngraph-tf-0.9.0", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip", "https://github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip", ], ) tf_http_archive( name = "pybind11", urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/pybind/pybind11/archive/v2.4.3.tar.gz", "https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz", ], sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d", strip_prefix = "pybind11-2.4.3", build_file = clean_dep("//third_party:pybind11.BUILD"), system_build_file = clean_dep("//third_party/systemlibs:pybind11.BUILD"), ) tf_http_archive( name = "wrapt", build_file = clean_dep("//third_party:wrapt.BUILD"), sha256 = "8a6fb40e8f8b6a66b4ba81a4044c68e6a7b1782f21cfabc06fb765332b4c3e51", strip_prefix = "wrapt-1.11.1/src/wrapt", system_build_file = clean_dep("//third_party/systemlibs:wrapt.BUILD"), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz", "https://github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz", ], ) tf_http_archive( name = "coremltools", sha256 = "0d594a714e8a5fd5bd740ad112ef59155c0482e25fdc8f8efa5758f90abdcf1e", strip_prefix = "coremltools-3.3", build_file = clean_dep("//third_party:coremltools.BUILD"), urls = [ "http://mirror.tensorflow.org/github.com/apple/coremltools/archive/3.3.zip", "https://github.com/apple/coremltools/archive/3.3.zip", ], ) tf_http_archive( name = "tf_toolchains", sha256 = "eb175afa73e5a33d2b5d2aabcfde6c8c3395fd7001eb5ba765a5cd98cce714ba", strip_prefix = "toolchains-0.0.2", build_file = clean_dep("//third_party:tf_toolchains.BUILD"), urls = [ "http://mirror.tensorflow.org/github.com/tensorflow/toolchains/archive/v0.0.2.tar.gz", "https://github.com/tensorflow/toolchains/archive/v0.0.2.tar.gz", ], ) def tf_bind(): """Bind targets for some external repositories""" ############################################################################## # BIND DEFINITIONS # # Please do not add bind() definitions unless we have no other choice. # If that ends up being the case, please leave a comment explaining # why we can't depend on the canonical build target. # Needed by Protobuf native.bind( name = "grpc_cpp_plugin", actual = "@com_github_grpc_grpc//src/compiler:grpc_cpp_plugin", ) native.bind( name = "grpc_python_plugin", actual = "@com_github_grpc_grpc//src/compiler:grpc_python_plugin", ) native.bind( name = "grpc_lib", actual = "@com_github_grpc_grpc//:grpc++", ) native.bind( name = "grpc_lib_unsecure", actual = "@com_github_grpc_grpc//:grpc++_unsecure", ) # Needed by Protobuf native.bind( name = "python_headers", actual = clean_dep("//third_party/python_runtime:headers"), ) # Needed by Protobuf native.bind( name = "six", actual = "@six_archive//:six", )
py
1a4395ba110821214bbae565ab4f8c848d379b4f
num1 = input("Enter your first number?\n") operation = input("Enter Operation?\n") num2 = input("Enter your Second Number?\n") floatnum1 = float(num1) floatnum2 = float(num2) if operation == "+": output=floatnum1+floatnum2 if operation == "-": output=floatnum1-floatnum2 if operation == "*": output=floatnum1*floatnum2 if operation == "/": output=floatnum1/floatnum2 else: print("invalid number") print("Your Answer: "+str(output))
py
1a43966350872c1e2bcae71dff833169fdb4fdbc
from .KubeLibrary import KubeLibrary, BearerTokenWithPrefixException # noqa: F401
py
1a4396d5305be632dc6521da876a707918c32687
import argparse from datetime import datetime from pathlib import Path from .. import cli from ..core.repository import Repository from ..utils import create_filename from ..utils import log from ..utils import output_csv from ..utils import parse_datetime def parse_dateto(s): return parse_datetime(s + ' 23:59:59') def parse_datefrom(s): return parse_datetime(s + ' 00:00:00') def get_common_parser(): parser = argparse.ArgumentParser() parser.set_defaults( datefrom=None, dateto=datetime.now(), enable_cache=True, nop=False, repositories=[], user=None, verbose=False, version=cli.__version__, ) parser.add_argument( '--from', action='store', dest='datefrom', type=parse_datefrom, help='filter created_at FROM: e.g. 2020-04-06' ) parser.add_argument( '--to', action='store', dest='dateto', type=parse_dateto, help='filter created_at TO: e.g. 2020-04-06' ) parser.add_argument( '--disable-cache', action='store_false', dest='enable_cache', help='disable cache' ) parser.add_argument( '--nop', action='store_true', help='use as a separator for option handling of positional argument' ) parser.add_argument( '--repository', nargs='*', dest='repositories', help='set repositories' ) parser.add_argument( '--user', action='store', help='set user to filter assignee of pull request' ) parser.add_argument( '--verbose', action='store_true', help='set verbose mode' ) parser.add_argument( '--version', action='version', version=f'%(prog)s {cli.__version__}', help='show version' ) return parser def get_csv_path(args, repo_name, gh, create_data): filename = create_filename(repo_name, args.api) path = Path(filename) if args.enable_cache and path.exists(): log.info(f'use existent {path}') return path with Repository(args, gh, repo_name) as repo: data = create_data(repo) return output_csv(args, data, filename)
py
1a4396fcc54860c49098e0018fb9691270fefcc1
# -*- coding: UTF-8 -*- from django.db import models from django.contrib.auth.models import AbstractUser from mirage import fields from django.utils.translation import gettext as _ class ResourceGroup(models.Model): """ 资源组 """ group_id = models.AutoField('组ID', primary_key=True) group_name = models.CharField('组名称', max_length=100, unique=True) group_parent_id = models.BigIntegerField('父级id', default=0) group_sort = models.IntegerField('排序', default=1) group_level = models.IntegerField('层级', default=1) ding_webhook = models.CharField('钉钉webhook地址', max_length=255, blank=True) feishu_webhook = models.CharField('飞书webhook地址', max_length=255, blank=True) is_deleted = models.IntegerField('是否删除', choices=((0, '否'), (1, '是')), default=0) create_time = models.DateTimeField(auto_now_add=True) sys_time = models.DateTimeField(auto_now=True) def __str__(self): return self.group_name class Meta: managed = True db_table = 'resource_group' verbose_name = u'资源组管理' verbose_name_plural = u'资源组管理' class Users(AbstractUser): """ 用户信息扩展 """ display = models.CharField('显示的中文名', max_length=50, default='') ding_user_id = models.CharField('钉钉UserID', max_length=64, blank=True) wx_user_id = models.CharField('企业微信UserID', max_length=64, blank=True) feishu_open_id = models.CharField('飞书OpenID', max_length=64, blank=True) failed_login_count = models.IntegerField('失败计数', default=0) last_login_failed_at = models.DateTimeField('上次失败登录时间', blank=True, null=True) resource_group = models.ManyToManyField(ResourceGroup, verbose_name='资源组', blank=True) def save(self, *args, **kwargs): self.failed_login_count = min(127, self.failed_login_count) self.failed_login_count = max(0, self.failed_login_count) super(Users, self).save(*args, **kwargs) def __str__(self): if self.display: return self.display return self.username class Meta: managed = True db_table = 'sql_users' verbose_name = u'用户管理' verbose_name_plural = u'用户管理' class InstanceTag(models.Model): """实例标签配置""" tag_code = models.CharField('标签代码', max_length=20, unique=True) tag_name = models.CharField('标签名称', max_length=20, unique=True) active = models.BooleanField('激活状态', default=True) create_time = models.DateTimeField('创建时间', auto_now_add=True) def __str__(self): return self.tag_name class Meta: managed = True db_table = 'sql_instance_tag' verbose_name = u'实例标签' verbose_name_plural = u'实例标签' DB_TYPE_CHOICES = ( ('mysql', 'MySQL'), ('mssql', 'MsSQL'), ('redis', 'Redis'), ('pgsql', 'PgSQL'), ('oracle', 'Oracle'), ('mongo', 'Mongo'), ('phoenix', 'Phoenix'), ('inception', 'Inception'), ('goinception', 'goInception')) class Instance(models.Model): """ 各个线上实例配置 """ instance_name = models.CharField('实例名称', max_length=50, unique=True) type = models.CharField('实例类型', max_length=6, choices=(('master', '主库'), ('slave', '从库'))) db_type = models.CharField('数据库类型', max_length=20, choices=DB_TYPE_CHOICES) host = models.CharField('实例连接', max_length=200) port = models.IntegerField('端口', default=0) user = fields.EncryptedCharField(verbose_name='用户名', max_length=200, default='', blank=True) password = fields.EncryptedCharField(verbose_name='密码', max_length=300, default='', blank=True) db_name = models.CharField('数据库', max_length=64, default='', blank=True) charset = models.CharField('字符集', max_length=20, default='', blank=True) service_name = models.CharField('Oracle service name', max_length=50, null=True, blank=True) sid = models.CharField('Oracle sid', max_length=50, null=True, blank=True) resource_group = models.ManyToManyField(ResourceGroup, verbose_name='资源组', blank=True) instance_tag = models.ManyToManyField(InstanceTag, verbose_name='实例标签', blank=True) create_time = models.DateTimeField('创建时间', auto_now_add=True) update_time = models.DateTimeField('更新时间', auto_now=True) def __str__(self): return self.instance_name class Meta: managed = True db_table = 'sql_instance' verbose_name = u'实例配置' verbose_name_plural = u'实例配置' SQL_WORKFLOW_CHOICES = ( ('workflow_finish', _('workflow_finish')), ('workflow_abort', _('workflow_abort')), ('workflow_manreviewing', _('workflow_manreviewing')), ('workflow_review_pass', _('workflow_review_pass')), ('workflow_timingtask', _('workflow_timingtask')), ('workflow_executing', _('workflow_executing')), ('workflow_autoreviewwrong', _('workflow_autoreviewwrong')), ('workflow_exception', _('workflow_exception'))) class SqlWorkflow(models.Model): """ 存放各个SQL上线工单的基础内容 """ workflow_name = models.CharField('工单内容', max_length=50) demand_url = models.CharField('需求链接', max_length=500) group_id = models.IntegerField('组ID') group_name = models.CharField('组名称', max_length=100) instance = models.ForeignKey(Instance, on_delete=models.CASCADE) db_name = models.CharField('数据库', max_length=64) syntax_type = models.IntegerField('工单类型 0、未知,1、DDL,2、DML', choices=((0, '其他'), (1, 'DDL'), (2, 'DML')), default=0) is_backup = models.BooleanField('是否备份', choices=((False, '否'), (True, '是'),), default=True) engineer = models.CharField('发起人', max_length=30) engineer_display = models.CharField('发起人中文名', max_length=50, default='') status = models.CharField(max_length=50, choices=SQL_WORKFLOW_CHOICES) audit_auth_groups = models.CharField('审批权限组列表', max_length=255) run_date_start = models.DateTimeField('可执行起始时间', null=True, blank=True) run_date_end = models.DateTimeField('可执行结束时间', null=True, blank=True) create_time = models.DateTimeField('创建时间', auto_now_add=True) finish_time = models.DateTimeField('结束时间', null=True, blank=True) is_manual = models.IntegerField('是否原生执行', choices=((0, '否'), (1, '是')), default=0) def __str__(self): return self.workflow_name class Meta: managed = True db_table = 'sql_workflow' verbose_name = u'SQL工单' verbose_name_plural = u'SQL工单' class SqlWorkflowContent(models.Model): """ 存放各个SQL上线工单的SQL|审核|执行内容 可定期归档或清理历史数据,也可通过``alter table sql_workflow_content row_format=compressed; ``来进行压缩 """ workflow = models.OneToOneField(SqlWorkflow, on_delete=models.CASCADE) sql_content = models.TextField('具体sql内容') review_content = models.TextField('自动审核内容的JSON格式') execute_result = models.TextField('执行结果的JSON格式', blank=True) def __str__(self): return self.workflow.workflow_name class Meta: managed = True db_table = 'sql_workflow_content' verbose_name = u'SQL工单内容' verbose_name_plural = u'SQL工单内容' workflow_type_choices = ((1, _('sql_query')), (2, _('sql_review'))) workflow_status_choices = ((0, '待审核'), (1, '审核通过'), (2, '审核不通过'), (3, '审核取消')) class WorkflowAudit(models.Model): """ 工作流审核状态表 """ audit_id = models.AutoField(primary_key=True) group_id = models.IntegerField('组ID') group_name = models.CharField('组名称', max_length=100) workflow_id = models.BigIntegerField('关联业务id') workflow_type = models.IntegerField('申请类型', choices=workflow_type_choices) workflow_title = models.CharField('申请标题', max_length=50) workflow_remark = models.CharField('申请备注', default='', max_length=140, blank=True) audit_auth_groups = models.CharField('审批权限组列表', max_length=255) current_audit = models.CharField('当前审批权限组', max_length=20) next_audit = models.CharField('下级审批权限组', max_length=20) current_status = models.IntegerField('审核状态', choices=workflow_status_choices) create_user = models.CharField('申请人', max_length=30) create_user_display = models.CharField('申请人中文名', max_length=50, default='') create_time = models.DateTimeField('申请时间', auto_now_add=True) sys_time = models.DateTimeField('系统时间', auto_now=True) def __int__(self): return self.audit_id class Meta: managed = True db_table = 'workflow_audit' unique_together = ('workflow_id', 'workflow_type') verbose_name = u'工作流审批列表' verbose_name_plural = u'工作流审批列表' class WorkflowAuditDetail(models.Model): """ 审批明细表 """ audit_detail_id = models.AutoField(primary_key=True) audit_id = models.IntegerField('审核主表id') audit_user = models.CharField('审核人', max_length=30) audit_time = models.DateTimeField('审核时间') audit_status = models.IntegerField('审核状态', choices=workflow_status_choices) remark = models.CharField('审核备注', default='', max_length=1000) sys_time = models.DateTimeField('系统时间', auto_now=True) def __int__(self): return self.audit_detail_id class Meta: managed = True db_table = 'workflow_audit_detail' verbose_name = u'工作流审批明细' verbose_name_plural = u'工作流审批明细' class WorkflowAuditSetting(models.Model): """ 审批配置表 """ audit_setting_id = models.AutoField(primary_key=True) group_id = models.IntegerField('组ID') group_name = models.CharField('组名称', max_length=100) workflow_type = models.IntegerField('审批类型', choices=workflow_type_choices) audit_auth_groups = models.CharField('审批权限组列表', max_length=255) create_time = models.DateTimeField(auto_now_add=True) sys_time = models.DateTimeField(auto_now=True) def __int__(self): return self.audit_setting_id class Meta: managed = True db_table = 'workflow_audit_setting' unique_together = ('group_id', 'workflow_type') verbose_name = u'审批流程配置' verbose_name_plural = u'审批流程配置' class WorkflowLog(models.Model): """ 工作流日志表 """ id = models.AutoField(primary_key=True) audit_id = models.IntegerField('工单审批id', db_index=True) operation_type = models.SmallIntegerField('操作类型,0提交/待审核、1审核通过、2审核不通过、3审核取消、4定时、5执行、6执行结束') operation_type_desc = models.CharField('操作类型描述', max_length=10) operation_info = models.CharField('操作信息', max_length=1000) operator = models.CharField('操作人', max_length=30) operator_display = models.CharField('操作人中文名', max_length=50, default='') operation_time = models.DateTimeField(auto_now_add=True) def __int__(self): return self.audit_id class Meta: managed = True db_table = 'workflow_log' verbose_name = u'工作流日志' verbose_name_plural = u'工作流日志' class QueryPrivilegesApply(models.Model): """ 查询权限申请记录表 """ apply_id = models.AutoField(primary_key=True) group_id = models.IntegerField('组ID') group_name = models.CharField('组名称', max_length=100) title = models.CharField('申请标题', max_length=50) # TODO user_name display 改为外键 user_name = models.CharField('申请人', max_length=30) user_display = models.CharField('申请人中文名', max_length=50, default='') instance = models.ForeignKey(Instance, on_delete=models.CASCADE) db_list = models.TextField('数据库', default='') # 逗号分隔的数据库列表 table_list = models.TextField('表', default='') # 逗号分隔的表列表 valid_date = models.DateField('有效时间') limit_num = models.IntegerField('行数限制', default=100) priv_type = models.IntegerField('权限类型', choices=((1, 'DATABASE'), (2, 'TABLE'),), default=0) status = models.IntegerField('审核状态', choices=workflow_status_choices) audit_auth_groups = models.CharField('审批权限组列表', max_length=255) create_time = models.DateTimeField(auto_now_add=True) sys_time = models.DateTimeField(auto_now=True) def __int__(self): return self.apply_id class Meta: managed = True db_table = 'query_privileges_apply' verbose_name = u'查询权限申请记录表' verbose_name_plural = u'查询权限申请记录表' class QueryPrivileges(models.Model): """ 用户权限关系表 """ privilege_id = models.AutoField(primary_key=True) user_name = models.CharField('用户名', max_length=30) user_display = models.CharField('申请人中文名', max_length=50, default='') instance = models.ForeignKey(Instance, on_delete=models.CASCADE) db_name = models.CharField('数据库', max_length=64, default='') table_name = models.CharField('表', max_length=64, default='') valid_date = models.DateField('有效时间') limit_num = models.IntegerField('行数限制', default=100) priv_type = models.IntegerField('权限类型', choices=((1, 'DATABASE'), (2, 'TABLE'),), default=0) is_deleted = models.IntegerField('是否删除', default=0) create_time = models.DateTimeField(auto_now_add=True) sys_time = models.DateTimeField(auto_now=True) def __int__(self): return self.privilege_id class Meta: managed = True db_table = 'query_privileges' index_together = ["user_name", "instance", "db_name", "valid_date"] verbose_name = u'查询权限记录' verbose_name_plural = u'查询权限记录' class QueryLog(models.Model): """ 记录在线查询sql的日志 """ # TODO 改为实例外键 instance_name = models.CharField('实例名称', max_length=50) db_name = models.CharField('数据库名称', max_length=64) sqllog = models.TextField('执行的查询语句') effect_row = models.BigIntegerField('返回行数') cost_time = models.CharField('执行耗时', max_length=10, default='') # TODO 改为user 外键 username = models.CharField('操作人', max_length=30) user_display = models.CharField('操作人中文名', max_length=50, default='') priv_check = models.BooleanField('查询权限是否正常校验', choices=((False, '跳过'), (True, '正常'),), default=False) hit_rule = models.BooleanField('查询是否命中脱敏规则', choices=((False, '未命中/未知'), (True, '命中')), default=False) masking = models.BooleanField('查询结果是否正常脱敏', choices=((False, '否'), (True, '是'),), default=False) favorite = models.BooleanField('是否收藏', choices=((False, '否'), (True, '是'),), default=False) alias = models.CharField('语句标识', max_length=64, default='', blank=True) create_time = models.DateTimeField('操作时间', auto_now_add=True) sys_time = models.DateTimeField(auto_now=True) class Meta: managed = True db_table = 'query_log' verbose_name = u'查询日志' verbose_name_plural = u'查询日志' rule_type_choices = ((1, '手机号'), (2, '证件号码'), (3, '银行卡'), (4, '邮箱'), (5, '金额'), (6, '其他')) class DataMaskingColumns(models.Model): """ 脱敏字段配置 """ column_id = models.AutoField('字段id', primary_key=True) rule_type = models.IntegerField('规则类型', choices=rule_type_choices) active = models.BooleanField('激活状态', choices=((False, '未激活'), (True, '激活'))) instance = models.ForeignKey(Instance, on_delete=models.CASCADE) table_schema = models.CharField('字段所在库名', max_length=64) table_name = models.CharField('字段所在表名', max_length=64) column_name = models.CharField('字段名', max_length=64) column_comment = models.CharField('字段描述', max_length=1024, default='', blank=True) create_time = models.DateTimeField(auto_now_add=True) sys_time = models.DateTimeField(auto_now=True) class Meta: managed = True db_table = 'data_masking_columns' verbose_name = u'脱敏字段配置' verbose_name_plural = u'脱敏字段配置' class DataMaskingRules(models.Model): """ 脱敏规则配置 """ rule_type = models.IntegerField('规则类型', choices=rule_type_choices, unique=True) rule_regex = models.CharField('规则脱敏所用的正则表达式,表达式必须分组,隐藏的组会使用****代替', max_length=255) hide_group = models.IntegerField('需要隐藏的组') rule_desc = models.CharField('规则描述', max_length=100, default='', blank=True) sys_time = models.DateTimeField(auto_now=True) class Meta: managed = True db_table = 'data_masking_rules' verbose_name = u'脱敏规则配置' verbose_name_plural = u'脱敏规则配置' class InstanceAccount(models.Model): """ 实例账号列表 """ instance = models.ForeignKey(Instance, on_delete=models.CASCADE) user = fields.EncryptedCharField(verbose_name='账号', max_length=128) host = models.CharField(verbose_name='主机', max_length=64) password = fields.EncryptedCharField(verbose_name='密码', max_length=128, default='', blank=True) remark = models.CharField('备注', max_length=255) sys_time = models.DateTimeField('系统修改时间', auto_now=True) class Meta: managed = True db_table = 'instance_account' unique_together = ('instance', 'user', 'host') verbose_name = '实例账号列表' verbose_name_plural = '实例账号列表' class InstanceDatabase(models.Model): """ 实例数据库列表 """ instance = models.ForeignKey(Instance, on_delete=models.CASCADE) db_name = models.CharField('数据库名', max_length=128) owner = models.CharField('负责人', max_length=50, default='', blank=True) owner_display = models.CharField('负责人中文名', max_length=50, default='', blank=True) remark = models.CharField('备注', max_length=255, default='', blank=True) sys_time = models.DateTimeField('系统修改时间', auto_now=True) class Meta: managed = True db_table = 'instance_database' unique_together = ('instance', 'db_name') verbose_name = '实例数据库' verbose_name_plural = '实例数据库列表' class ParamTemplate(models.Model): """ 实例参数模板配置 """ db_type = models.CharField('数据库类型', max_length=20, choices=DB_TYPE_CHOICES) variable_name = models.CharField('参数名', max_length=64) default_value = models.CharField('默认参数值', max_length=1024) editable = models.BooleanField('是否支持修改', default=False) valid_values = models.CharField('有效参数值,范围参数[1-65535],值参数[ON|OFF]', max_length=1024, blank=True) description = models.CharField('参数描述', max_length=1024, blank=True) create_time = models.DateTimeField('创建时间', auto_now_add=True) sys_time = models.DateTimeField('系统时间修改', auto_now=True) class Meta: managed = True db_table = 'param_template' unique_together = ('db_type', 'variable_name') verbose_name = u'实例参数模板配置' verbose_name_plural = u'实例参数模板配置' class ParamHistory(models.Model): """ 可在线修改的动态参数配置 """ instance = models.ForeignKey(Instance, on_delete=models.CASCADE) variable_name = models.CharField('参数名', max_length=64) old_var = models.CharField('修改前参数值', max_length=1024) new_var = models.CharField('修改后参数值', max_length=1024) set_sql = models.CharField('在线变更配置执行的SQL语句', max_length=1024) user_name = models.CharField('修改人', max_length=30) user_display = models.CharField('修改人中文名', max_length=50) create_time = models.DateTimeField('参数被修改时间点', auto_now_add=True) class Meta: managed = True ordering = ['-create_time'] db_table = 'param_history' verbose_name = u'实例参数修改历史' verbose_name_plural = u'实例参数修改历史' class ArchiveConfig(models.Model): """ 归档配置表 """ title = models.CharField('归档配置说明', max_length=50) resource_group = models.ForeignKey(ResourceGroup, on_delete=models.CASCADE) audit_auth_groups = models.CharField('审批权限组列表', max_length=255, blank=True) src_instance = models.ForeignKey(Instance, related_name='src_instance', on_delete=models.CASCADE) src_db_name = models.CharField('源数据库', max_length=64) src_table_name = models.CharField('源表', max_length=64) dest_instance = models.ForeignKey(Instance, related_name='dest_instance', on_delete=models.CASCADE, blank=True, null=True) dest_db_name = models.CharField('目标数据库', max_length=64, blank=True, null=True) dest_table_name = models.CharField('目标表', max_length=64, blank=True, null=True) condition = models.CharField('归档条件,where条件', max_length=1000) mode = models.CharField('归档模式', max_length=10, choices=(('file', '文件'), ('dest', '其他实例'), ('purge', '直接删除'))) no_delete = models.BooleanField('是否保留源数据') sleep = models.IntegerField('归档limit行后的休眠秒数', default=1) status = models.IntegerField('审核状态', choices=workflow_status_choices, blank=True, default=1) state = models.BooleanField('是否启用归档', default=True) user_name = models.CharField('申请人', max_length=30, blank=True, default='') user_display = models.CharField('申请人中文名', max_length=50, blank=True, default='') create_time = models.DateTimeField('创建时间', auto_now_add=True) last_archive_time = models.DateTimeField('最近归档时间', blank=True, null=True) sys_time = models.DateTimeField('系统时间修改', auto_now=True) class Meta: managed = True db_table = 'archive_config' verbose_name = u'归档配置表' verbose_name_plural = u'归档配置表' class ArchiveLog(models.Model): """ 归档日志表 """ archive = models.ForeignKey(ArchiveConfig, on_delete=models.CASCADE) cmd = models.CharField('归档命令', max_length=2000) condition = models.CharField('归档条件,where条件', max_length=1000) mode = models.CharField('归档模式', max_length=10, choices=(('file', '文件'), ('dest', '其他实例'), ('purge', '直接删除'))) no_delete = models.BooleanField('是否保留源数据') sleep = models.IntegerField('归档limit行记录后的休眠秒数', default=0) select_cnt = models.IntegerField('查询数量') insert_cnt = models.IntegerField('插入数量') delete_cnt = models.IntegerField('删除数量') statistics = models.TextField('归档统计日志') success = models.BooleanField('是否归档成功') error_info = models.TextField('错误信息') start_time = models.DateTimeField('开始时间', auto_now_add=True) end_time = models.DateTimeField('结束时间') sys_time = models.DateTimeField('系统时间修改', auto_now=True) class Meta: managed = True db_table = 'archive_log' verbose_name = u'归档日志表' verbose_name_plural = u'归档日志表' class Config(models.Model): """ 配置信息表 """ item = models.CharField('配置项', max_length=200, primary_key=True) value = fields.EncryptedCharField(verbose_name='配置项值', max_length=500) description = models.CharField('描述', max_length=200, default='', blank=True) class Meta: managed = True db_table = 'sql_config' verbose_name = u'系统配置' verbose_name_plural = u'系统配置' class AliyunRdsConfig(models.Model): """ 阿里云rds配置信息 """ instance = models.OneToOneField(Instance, on_delete=models.CASCADE) rds_dbinstanceid = models.CharField('对应阿里云RDS实例ID', max_length=100) is_enable = models.BooleanField('是否启用', default=False) def __int__(self): return self.rds_dbinstanceid class Meta: managed = True db_table = 'aliyun_rds_config' verbose_name = u'阿里云rds配置' verbose_name_plural = u'阿里云rds配置' class Permission(models.Model): """ 自定义业务权限 """ class Meta: managed = True permissions = ( ('menu_dashboard', '菜单 Dashboard'), ('menu_sqlcheck', '菜单 SQL审核'), ('menu_sqlworkflow', '菜单 SQL上线'), ('menu_sqlanalyze', '菜单 SQL分析'), ('menu_query', '菜单 SQL查询'), ('menu_sqlquery', '菜单 在线查询'), ('menu_queryapplylist', '菜单 权限管理'), ('menu_sqloptimize', '菜单 SQL优化'), ('menu_sqladvisor', '菜单 优化工具'), ('menu_slowquery', '菜单 慢查日志'), ('menu_instance', '菜单 实例管理'), ('menu_instance_list', '菜单 实例列表'), ('menu_dbdiagnostic', '菜单 会话管理'), ('menu_database', '菜单 数据库管理'), ('menu_instance_account', '菜单 实例账号管理'), ('menu_param', '菜单 参数配置'), ('menu_data_dictionary', '菜单 数据字典'), ('menu_tools', '菜单 工具插件'), ('menu_archive', '菜单 数据归档'), ('menu_binlog2sql', '菜单 Binlog2SQL'), ('menu_schemasync', '菜单 SchemaSync'), ('menu_system', '菜单 系统管理'), ('menu_document', '菜单 相关文档'), ('sql_submit', '提交SQL上线工单'), ('sql_review', '审核SQL上线工单'), ('sql_execute_for_resource_group', '执行SQL上线工单(资源组粒度)'), ('sql_execute', '执行SQL上线工单(仅自己提交的)'), ('sql_analyze', '执行SQL分析'), ('optimize_sqladvisor', '执行SQLAdvisor'), ('optimize_sqltuning', '执行SQLTuning'), ('optimize_soar', '执行SOAR'), ('query_applypriv', '申请查询权限'), ('query_mgtpriv', '管理查询权限'), ('query_review', '审核查询权限'), ('query_submit', '提交SQL查询'), ('query_all_instances', '可查询所有实例'), ('query_resource_group_instance', '可查询所在资源组内的所有实例'), ('process_view', '查看会话'), ('process_kill', '终止会话'), ('tablespace_view', '查看表空间'), ('trx_view', '查看事务信息'), ('trxandlocks_view', '查看锁信息'), ('instance_account_manage', '管理实例账号'), ('param_view', '查看实例参数列表'), ('param_edit', '修改实例参数'), ('data_dictionary_export', '导出数据字典'), ('archive_apply', '提交归档申请'), ('archive_review', '审核归档申请'), ('archive_mgt', '管理归档申请'), ) class SlowQuery(models.Model): """ SlowQuery """ checksum = models.CharField(max_length=32, primary_key=True) fingerprint = models.TextField() sample = models.TextField() first_seen = models.DateTimeField(blank=True, null=True) last_seen = models.DateTimeField(blank=True, null=True, db_index=True) reviewed_by = models.CharField(max_length=20, blank=True, null=True) reviewed_on = models.DateTimeField(blank=True, null=True) comments = models.TextField(blank=True, null=True) class Meta: managed = False db_table = 'mysql_slow_query_review' verbose_name = u'慢日志统计' verbose_name_plural = u'慢日志统计' class SlowQueryHistory(models.Model): """ SlowQueryHistory """ hostname_max = models.CharField(max_length=64, null=False) client_max = models.CharField(max_length=64, null=True) user_max = models.CharField(max_length=64, null=False) db_max = models.CharField(max_length=64, null=True, default=None) bytes_max = models.CharField(max_length=64, null=True) checksum = models.ForeignKey(SlowQuery, db_constraint=False, to_field='checksum', db_column='checksum', on_delete=models.CASCADE) sample = models.TextField() ts_min = models.DateTimeField(db_index=True) ts_max = models.DateTimeField() ts_cnt = models.FloatField(blank=True, null=True) query_time_sum = models.FloatField(db_column='Query_time_sum', blank=True, null=True) query_time_min = models.FloatField(db_column='Query_time_min', blank=True, null=True) query_time_max = models.FloatField(db_column='Query_time_max', blank=True, null=True) query_time_pct_95 = models.FloatField(db_column='Query_time_pct_95', blank=True, null=True) query_time_stddev = models.FloatField(db_column='Query_time_stddev', blank=True, null=True) query_time_median = models.FloatField(db_column='Query_time_median', blank=True, null=True) lock_time_sum = models.FloatField(db_column='Lock_time_sum', blank=True, null=True) lock_time_min = models.FloatField(db_column='Lock_time_min', blank=True, null=True) lock_time_max = models.FloatField(db_column='Lock_time_max', blank=True, null=True) lock_time_pct_95 = models.FloatField(db_column='Lock_time_pct_95', blank=True, null=True) lock_time_stddev = models.FloatField(db_column='Lock_time_stddev', blank=True, null=True) lock_time_median = models.FloatField(db_column='Lock_time_median', blank=True, null=True) rows_sent_sum = models.FloatField(db_column='Rows_sent_sum', blank=True, null=True) rows_sent_min = models.FloatField(db_column='Rows_sent_min', blank=True, null=True) rows_sent_max = models.FloatField(db_column='Rows_sent_max', blank=True, null=True) rows_sent_pct_95 = models.FloatField(db_column='Rows_sent_pct_95', blank=True, null=True) rows_sent_stddev = models.FloatField(db_column='Rows_sent_stddev', blank=True, null=True) rows_sent_median = models.FloatField(db_column='Rows_sent_median', blank=True, null=True) rows_examined_sum = models.FloatField(db_column='Rows_examined_sum', blank=True, null=True) rows_examined_min = models.FloatField(db_column='Rows_examined_min', blank=True, null=True) rows_examined_max = models.FloatField(db_column='Rows_examined_max', blank=True, null=True) rows_examined_pct_95 = models.FloatField(db_column='Rows_examined_pct_95', blank=True, null=True) rows_examined_stddev = models.FloatField(db_column='Rows_examined_stddev', blank=True, null=True) rows_examined_median = models.FloatField(db_column='Rows_examined_median', blank=True, null=True) rows_affected_sum = models.FloatField(db_column='Rows_affected_sum', blank=True, null=True) rows_affected_min = models.FloatField(db_column='Rows_affected_min', blank=True, null=True) rows_affected_max = models.FloatField(db_column='Rows_affected_max', blank=True, null=True) rows_affected_pct_95 = models.FloatField(db_column='Rows_affected_pct_95', blank=True, null=True) rows_affected_stddev = models.FloatField(db_column='Rows_affected_stddev', blank=True, null=True) rows_affected_median = models.FloatField(db_column='Rows_affected_median', blank=True, null=True) rows_read_sum = models.FloatField(db_column='Rows_read_sum', blank=True, null=True) rows_read_min = models.FloatField(db_column='Rows_read_min', blank=True, null=True) rows_read_max = models.FloatField(db_column='Rows_read_max', blank=True, null=True) rows_read_pct_95 = models.FloatField(db_column='Rows_read_pct_95', blank=True, null=True) rows_read_stddev = models.FloatField(db_column='Rows_read_stddev', blank=True, null=True) rows_read_median = models.FloatField(db_column='Rows_read_median', blank=True, null=True) merge_passes_sum = models.FloatField(db_column='Merge_passes_sum', blank=True, null=True) merge_passes_min = models.FloatField(db_column='Merge_passes_min', blank=True, null=True) merge_passes_max = models.FloatField(db_column='Merge_passes_max', blank=True, null=True) merge_passes_pct_95 = models.FloatField(db_column='Merge_passes_pct_95', blank=True, null=True) merge_passes_stddev = models.FloatField(db_column='Merge_passes_stddev', blank=True, null=True) merge_passes_median = models.FloatField(db_column='Merge_passes_median', blank=True, null=True) innodb_io_r_ops_min = models.FloatField(db_column='InnoDB_IO_r_ops_min', blank=True, null=True) innodb_io_r_ops_max = models.FloatField(db_column='InnoDB_IO_r_ops_max', blank=True, null=True) innodb_io_r_ops_pct_95 = models.FloatField(db_column='InnoDB_IO_r_ops_pct_95', blank=True, null=True) innodb_io_r_ops_stddev = models.FloatField(db_column='InnoDB_IO_r_ops_stddev', blank=True, null=True) innodb_io_r_ops_median = models.FloatField(db_column='InnoDB_IO_r_ops_median', blank=True, null=True) innodb_io_r_bytes_min = models.FloatField(db_column='InnoDB_IO_r_bytes_min', blank=True, null=True) innodb_io_r_bytes_max = models.FloatField(db_column='InnoDB_IO_r_bytes_max', blank=True, null=True) innodb_io_r_bytes_pct_95 = models.FloatField(db_column='InnoDB_IO_r_bytes_pct_95', blank=True, null=True) innodb_io_r_bytes_stddev = models.FloatField(db_column='InnoDB_IO_r_bytes_stddev', blank=True, null=True) innodb_io_r_bytes_median = models.FloatField(db_column='InnoDB_IO_r_bytes_median', blank=True, null=True) innodb_io_r_wait_min = models.FloatField(db_column='InnoDB_IO_r_wait_min', blank=True, null=True) innodb_io_r_wait_max = models.FloatField(db_column='InnoDB_IO_r_wait_max', blank=True, null=True) innodb_io_r_wait_pct_95 = models.FloatField(db_column='InnoDB_IO_r_wait_pct_95', blank=True, null=True) innodb_io_r_wait_stddev = models.FloatField(db_column='InnoDB_IO_r_wait_stddev', blank=True, null=True) innodb_io_r_wait_median = models.FloatField(db_column='InnoDB_IO_r_wait_median', blank=True, null=True) innodb_rec_lock_wait_min = models.FloatField(db_column='InnoDB_rec_lock_wait_min', blank=True, null=True) innodb_rec_lock_wait_max = models.FloatField(db_column='InnoDB_rec_lock_wait_max', blank=True, null=True) innodb_rec_lock_wait_pct_95 = models.FloatField(db_column='InnoDB_rec_lock_wait_pct_95', blank=True, null=True) innodb_rec_lock_wait_stddev = models.FloatField(db_column='InnoDB_rec_lock_wait_stddev', blank=True, null=True) innodb_rec_lock_wait_median = models.FloatField(db_column='InnoDB_rec_lock_wait_median', blank=True, null=True) innodb_queue_wait_min = models.FloatField(db_column='InnoDB_queue_wait_min', blank=True, null=True) innodb_queue_wait_max = models.FloatField(db_column='InnoDB_queue_wait_max', blank=True, null=True) innodb_queue_wait_pct_95 = models.FloatField(db_column='InnoDB_queue_wait_pct_95', blank=True, null=True) innodb_queue_wait_stddev = models.FloatField(db_column='InnoDB_queue_wait_stddev', blank=True, null=True) innodb_queue_wait_median = models.FloatField(db_column='InnoDB_queue_wait_median', blank=True, null=True) innodb_pages_distinct_min = models.FloatField(db_column='InnoDB_pages_distinct_min', blank=True, null=True) innodb_pages_distinct_max = models.FloatField(db_column='InnoDB_pages_distinct_max', blank=True, null=True) innodb_pages_distinct_pct_95 = models.FloatField(db_column='InnoDB_pages_distinct_pct_95', blank=True, null=True) innodb_pages_distinct_stddev = models.FloatField(db_column='InnoDB_pages_distinct_stddev', blank=True, null=True) innodb_pages_distinct_median = models.FloatField(db_column='InnoDB_pages_distinct_median', blank=True, null=True) qc_hit_cnt = models.FloatField(db_column='QC_Hit_cnt', blank=True, null=True) qc_hit_sum = models.FloatField(db_column='QC_Hit_sum', blank=True, null=True) full_scan_cnt = models.FloatField(db_column='Full_scan_cnt', blank=True, null=True) full_scan_sum = models.FloatField(db_column='Full_scan_sum', blank=True, null=True) full_join_cnt = models.FloatField(db_column='Full_join_cnt', blank=True, null=True) full_join_sum = models.FloatField(db_column='Full_join_sum', blank=True, null=True) tmp_table_cnt = models.FloatField(db_column='Tmp_table_cnt', blank=True, null=True) tmp_table_sum = models.FloatField(db_column='Tmp_table_sum', blank=True, null=True) tmp_table_on_disk_cnt = models.FloatField(db_column='Tmp_table_on_disk_cnt', blank=True, null=True) tmp_table_on_disk_sum = models.FloatField(db_column='Tmp_table_on_disk_sum', blank=True, null=True) filesort_cnt = models.FloatField(db_column='Filesort_cnt', blank=True, null=True) filesort_sum = models.FloatField(db_column='Filesort_sum', blank=True, null=True) filesort_on_disk_cnt = models.FloatField(db_column='Filesort_on_disk_cnt', blank=True, null=True) filesort_on_disk_sum = models.FloatField(db_column='Filesort_on_disk_sum', blank=True, null=True) class Meta: managed = False db_table = 'mysql_slow_query_review_history' unique_together = ('checksum', 'ts_min', 'ts_max') index_together = ('hostname_max', 'ts_min') verbose_name = u'慢日志明细' verbose_name_plural = u'慢日志明细'
py
1a43977d25771ab011f039ed6bde7dc0e6069480
import json from tornado.web import RequestHandler __author__ = 'TIF' class BaseHandler(RequestHandler): @property def sched(self): return self.application.scheduler def from_body_get_arguments(self): body = self.request.body return json.load(body)
py
1a4397c2e978dcf65e2afe63112f250361a5cd65
# -*- coding: utf-8 -*- import unittest import doctest from django.conf import settings # doc testing in some modules from cms.utils import urlutils from cms.tests.page import PagesTestCase from cms.tests.permmod import PermissionModeratorTestCase from cms.tests.site import SiteTestCase from cms.tests.navextender import NavExtenderTestCase from cms.tests.plugins import PluginsTestCase from cms.tests.menu import MenusTestCase from cms.tests.rendering import RenderingTestCase from cms.tests.placeholder import PlaceholderTestCase settings.CMS_PERMISSION = True settings.CMS_MODERATOR = True settings.CMS_NAVIGATION_EXTENDERS = ( ('example.categories.navigation.get_nodes', 'Categories'), ('example.sampleapp.menu_extender.get_nodes', 'SampleApp Menu'), ) settings.CMS_FLAT_URLS = False settings.CMS_MENU_TITLE_OVERWRITE = True settings.CMS_HIDE_UNTRANSLATED = False settings.CMS_URL_OVERWRITE = True if not "example.sampleapp" in settings.INSTALLED_APPS: settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) + ["example.sampleapp"] def suite(): s = unittest.TestSuite() s.addTest(doctest.DocTestSuite(urlutils)) s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(PagesTestCase)) s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(SiteTestCase)) s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(NavExtenderTestCase)) if "cms.plugins.text" in settings.INSTALLED_APPS: s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(PluginsTestCase)) if "reversion" in settings.INSTALLED_APPS: from cms.tests.reversion_tests import ReversionTestCase s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ReversionTestCase)) s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(PermissionModeratorTestCase)) s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(MenusTestCase)) s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(RenderingTestCase)) s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(PlaceholderTestCase)) return s def test_runner_with_coverage(test_labels, verbosity=1, interactive=True, extra_tests=[]): """Custom test runner. Follows the django.test.simple.run_tests() interface.""" import os, shutil, sys # Look for coverage.py in __file__/lib as well as sys.path sys.path = [os.path.join(os.path.dirname(__file__), "lib")] + sys.path import coverage from django.test.simple import run_tests as django_test_runner from django.conf import settings # Start code coverage before anything else if necessary #if hasattr(settings, 'COVERAGE_MODULES') and not test_labels: coverage.use_cache(0) # Do not cache any of the coverage.py stuff coverage.start() test_results = django_test_runner(test_labels, verbosity, interactive, extra_tests) # Stop code coverage after tests have completed #if hasattr(settings, 'COVERAGE_MODULES') and not test_labels: coverage.stop() # Print code metrics header print '' print '----------------------------------------------------------------------' print ' Unit Test Code Coverage Results' print '----------------------------------------------------------------------' # Report code coverage metrics coverage_modules = [] if hasattr(settings, 'COVERAGE_MODULES') and (not test_labels or 'cms' in test_labels): for module in settings.COVERAGE_MODULES: coverage_modules.append(__import__(module, globals(), locals(), [''])) coverage.report(coverage_modules, show_missing=1) #Print code metrics footer print '----------------------------------------------------------------------' return test_results
py
1a4397f582df1eade38d2130192b792d29592f46
''' Miscellaneous data generator utilities. Copyright (C) 2018 Pierluigi Ferrari Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from __future__ import division import numpy as np def apply_inverse_transforms(y_pred_decoded, inverse_transforms): ''' Takes a list or Numpy array of decoded predictions and applies a given list of transforms to them. The list of inverse transforms would usually contain the inverter functions that some of the image transformations that come with this data generator return. This function would normally be used to transform predictions that were made on a transformed image back to the original image. Arguments: y_pred_decoded (list or array): Either a list of length `batch_size` that contains Numpy arrays that contain the predictions for each batch item or a Numpy array. If this is a list of Numpy arrays, the arrays would usually have the shape `(num_predictions, 6)`, where `num_predictions` is different for each batch item. If this is a Numpy array, it would usually have the shape `(batch_size, num_predictions, 6)`. The last axis would usually contain the class ID, confidence score, and four bounding box coordinates for each prediction. inverse_predictions (list): A nested list of length `batch_size` that contains for each batch item a list of functions that take one argument (one element of `y_pred_decoded` if it is a list or one slice along the first axis of `y_pred_decoded` if it is an array) and return an output of the same shape and data type. Returns: The transformed predictions, which have the same structure as `y_pred_decoded`. ''' if isinstance(y_pred_decoded, list): y_pred_decoded_inv = [] for i in range(len(y_pred_decoded)): y_pred_decoded_inv.append(np.copy(y_pred_decoded[i])) if y_pred_decoded_inv[i].size > 0: # If there are any predictions for this batch item. for inverter in inverse_transforms[i]: if not (inverter is None): y_pred_decoded_inv[i] = inverter(y_pred_decoded_inv[i]) elif isinstance(y_pred_decoded, np.ndarray): y_pred_decoded_inv = np.copy(y_pred_decoded) for i in range(len(y_pred_decoded)): if y_pred_decoded_inv[i].size > 0: # If there are any predictions for this batch item. for inverter in inverse_transforms[i]: if not (inverter is None): y_pred_decoded_inv[i] = inverter(y_pred_decoded_inv[i]) else: raise ValueError("`y_pred_decoded` must be either a list or a Numpy array.") return y_pred_decoded_inv
py
1a4399ed59b18c6f2fbe8794dc92edf9d5928eaa
#! python """ Class `CountRunner` to report read status of FL reads, as well as make abundance report. """ import logging import os.path as op from .CountingUtils import read_group_file, output_read_count_FL, make_abundance_file __author__ = '[email protected]' log = logging.getLogger(__name__) class CountRunner(object): """ Compute read status of FL CCS reads and output abundance report. """ def __init__(self, group_filename, transcript_to_reads_dict, read_to_length_dict, output_read_stat_filename, output_abundance_filename): """ Parameters: group_filename -- an input group file associating collapsed isoforms with FL reads. transcript_to_reads_dict -- {isoform: zmws} dict output_read_stat_filename -- an output FL read status report output_abundance_filename -- an output abundance report """ # output read status report and abundance report self.read_stat_fn = output_read_stat_filename self.abundance_fn = output_abundance_filename self.group_filename = group_filename # input, group file of collapsed isoforms self.transcript_to_reads_dict = transcript_to_reads_dict # input: {isoforms: reads} self.read_to_length_dict = read_to_length_dict # input {read: length} self.sample_prefix = None self.validate_inputs() def __str__(self): return ("<%s (%s, zmw) to count reads abundance of isoforms>\n" % (self.__class__.__name__, self.group_filename)) def validate_inputs(self): """Validate existence of input files.""" logging.info("Validing inputs.") if not op.exists(self.group_filename): raise IOError("Input group file %s does not exist" % self.group_filename) def run(self, restricted_movies=None): """ Compute read status for FL reads, and make abundance report. Parameters: restricted_movies -- if is None, process reads from ALL movies; otherwise only process reads in the list of restricted movies. """ # Read cid info from the input group file. cid_info = read_group_file(group_filename=self.group_filename, sample_prefixes=self.sample_prefix) # Output FL read status logging.debug("Computing read status of FL reads.") output_read_count_FL(cid_info=cid_info, output_filename=self.read_stat_fn, sample_prefix=self.sample_prefix, transcript_to_reads_dict=self.transcript_to_reads_dict, read_to_length_dict=self.read_to_length_dict, output_mode='w', restricted_movies=restricted_movies) logging.info("Read status of FL reads written to %s", self.read_stat_fn) logging.info("IsoSeqS does not use NFL reads, don't append read status of nFL reads.") # Make abundance file make_abundance_file(read_stat_filename=self.read_stat_fn, output_filename=self.abundance_fn, given_total=None, restricted_movies=restricted_movies, write_header_comments=True) logging.info("Abundance file written to %s", self.abundance_fn)
py
1a439b0da67b3a861333cf15e75ab021dac0ec24
#!/usr/bin/python from collections import OrderedDict from Qt import QtGui, QtCore, QtWidgets from NodeGraphQt.constants import (IN_PORT, OUT_PORT, NODE_WIDTH, NODE_HEIGHT, NODE_ICON_SIZE, ICON_NODE_BASE, NODE_SEL_COLOR, NODE_SEL_BORDER_COLOR, PORT_FALLOFF, Z_VAL_NODE, ITEM_CACHE_MODE) from NodeGraphQt.errors import NodeWidgetError from NodeGraphQt.qgraphics.node_abstract import AbstractNodeItem from NodeGraphQt.qgraphics.node_overlay_disabled import XDisabledItem from NodeGraphQt.qgraphics.node_text_item import NodeTextItem from NodeGraphQt.qgraphics.port import PortItem, CustomPortItem class NodeItem(AbstractNodeItem): """ Base Node item. Args: name (str): name displayed on the node. parent (QtWidgets.QGraphicsItem): parent item. """ def __init__(self, name='node', parent=None): super(NodeItem, self).__init__(name, parent) pixmap = QtGui.QPixmap(ICON_NODE_BASE) if pixmap.size().height() > NODE_ICON_SIZE: pixmap = pixmap.scaledToHeight( NODE_ICON_SIZE, QtCore.Qt.SmoothTransformation ) self._properties['icon'] = ICON_NODE_BASE self._icon_item = QtWidgets.QGraphicsPixmapItem(pixmap, self) self._icon_item.setTransformationMode(QtCore.Qt.SmoothTransformation) self._text_item = NodeTextItem(self.name, self) self._x_item = XDisabledItem(self, 'DISABLED') self._input_items = OrderedDict() self._output_items = OrderedDict() self._widgets = OrderedDict() self._proxy_mode = False self._proxy_mode_threshold = 70 def paint(self, painter, option, widget): """ Draws the node base not the ports or text. Args: painter (QtGui.QPainter): painter used for drawing the item. option (QtGui.QStyleOptionGraphicsItem): used to describe the parameters needed to draw. widget (QtWidgets.QWidget): not used. """ self.auto_switch_mode() painter.save() painter.setPen(QtCore.Qt.NoPen) painter.setBrush(QtCore.Qt.NoBrush) # base background. margin = 1.0 rect = self.boundingRect() rect = QtCore.QRectF(rect.left() + margin, rect.top() + margin, rect.width() - (margin * 2), rect.height() - (margin * 2)) radius = 4.0 painter.setBrush(QtGui.QColor(*self.color)) painter.drawRoundedRect(rect, radius, radius) # light overlay on background when selected. if self.selected: painter.setBrush(QtGui.QColor(*NODE_SEL_COLOR)) painter.drawRoundedRect(rect, radius, radius) # node name background. padding = 3.0, 2.0 text_rect = self._text_item.boundingRect() text_rect = QtCore.QRectF(text_rect.x() + padding[0], rect.y() + padding[1], rect.width() - padding[0] - margin, text_rect.height() - (padding[1] * 2)) if self.selected: painter.setBrush(QtGui.QColor(*NODE_SEL_COLOR)) else: painter.setBrush(QtGui.QColor(0, 0, 0, 80)) painter.drawRoundedRect(text_rect, 3.0, 3.0) # node border if self.selected: border_width = 1.2 border_color = QtGui.QColor(*NODE_SEL_BORDER_COLOR) else: border_width = 0.8 border_color = QtGui.QColor(*self.border_color) border_rect = QtCore.QRectF(rect.left(), rect.top(), rect.width(), rect.height()) pen = QtGui.QPen(border_color, border_width) pen.setCosmetic(self.viewer().get_zoom() < 0.0) path = QtGui.QPainterPath() path.addRoundedRect(border_rect, radius, radius) painter.setBrush(QtCore.Qt.NoBrush) painter.setPen(pen) painter.drawPath(path) painter.restore() def mousePressEvent(self, event): """ Re-implemented to ignore event if LMB is over port collision area. Args: event (QtWidgets.QGraphicsSceneMouseEvent): mouse event. """ if event.button() == QtCore.Qt.LeftButton: for p in self._input_items.keys(): if p.hovered: event.ignore() return for p in self._output_items.keys(): if p.hovered: event.ignore() return super(NodeItem, self).mousePressEvent(event) def mouseReleaseEvent(self, event): """ Re-implemented to ignore event if Alt modifier is pressed. Args: event (QtWidgets.QGraphicsSceneMouseEvent): mouse event. """ if event.modifiers() == QtCore.Qt.AltModifier: event.ignore() return super(NodeItem, self).mouseReleaseEvent(event) def mouseDoubleClickEvent(self, event): """ Re-implemented to emit "node_double_clicked" signal. Args: event (QtWidgets.QGraphicsSceneMouseEvent): mouse event. """ if event.button() == QtCore.Qt.LeftButton: # enable text item edit mode. items = self.scene().items(event.scenePos()) if self._text_item in items: self._text_item.set_editable(True) self._text_item.setFocus() event.ignore() return viewer = self.viewer() if viewer: viewer.node_double_clicked.emit(self.id) super(NodeItem, self).mouseDoubleClickEvent(event) def itemChange(self, change, value): """ Re-implemented to update pipes on selection changed. Args: change: value: """ if change == self.ItemSelectedChange and self.scene(): self.reset_pipes() if value: self.highlight_pipes() self.setZValue(Z_VAL_NODE) if not self.selected: self.setZValue(Z_VAL_NODE + 1) return super(NodeItem, self).itemChange(change, value) def _tooltip_disable(self, state): """ Updates the node tooltip when the node is enabled/disabled. Args: state (bool): node disable state. """ tooltip = '<b>{}</b>'.format(self.name) if state: tooltip += ' <font color="red"><b>(DISABLED)</b></font>' tooltip += '<br/>{}<br/>'.format(self.type_) self.setToolTip(tooltip) def _set_base_size(self, add_w=0.0, add_h=0.0): """ Sets the initial base size for the node. Args: add_w (float): add additional width. add_h (float): add additional height. """ self._width, self._height = self.calc_size(add_w, add_h) if self._width < NODE_WIDTH: self._width = NODE_WIDTH if self._height < NODE_HEIGHT: self._height = NODE_HEIGHT def _set_text_color(self, color): """ set text color. Args: color (tuple): color value in (r, g, b, a). """ text_color = QtGui.QColor(*color) for port, text in self._input_items.items(): text.setDefaultTextColor(text_color) for port, text in self._output_items.items(): text.setDefaultTextColor(text_color) self._text_item.setDefaultTextColor(text_color) def activate_pipes(self): """ active pipe color. """ ports = self.inputs + self.outputs for port in ports: for pipe in port.connected_pipes: pipe.activate() def highlight_pipes(self): """ Highlight pipe color. """ ports = self.inputs + self.outputs for port in ports: for pipe in port.connected_pipes: pipe.highlight() def reset_pipes(self): """ Reset all the pipe colors. """ ports = self.inputs + self.outputs for port in ports: for pipe in port.connected_pipes: pipe.reset() def calc_size(self, add_w=0.0, add_h=0.0): """ Calculates the minimum node size. Args: add_w (float): additional width. add_h (float): additional height. Returns: tuple(float, float): width, height. """ # width, height from node name text. text_w = self._text_item.boundingRect().width() text_h = self._text_item.boundingRect().height() # width, height from node ports. port_width = 0.0 p_input_text_width = 0.0 p_output_text_width = 0.0 p_input_height = 0.0 p_output_height = 0.0 for port, text in self._input_items.items(): if not port.isVisible(): continue if not port_width: port_width = port.boundingRect().width() t_width = text.boundingRect().width() if text.isVisible() and t_width > p_input_text_width: p_input_text_width = text.boundingRect().width() p_input_height += port.boundingRect().height() for port, text in self._output_items.items(): if not port.isVisible(): continue if not port_width: port_width = port.boundingRect().width() t_width = text.boundingRect().width() if text.isVisible() and t_width > p_output_text_width: p_output_text_width = text.boundingRect().width() p_output_height += port.boundingRect().height() port_text_width = p_input_text_width + p_output_text_width # width, height from node embedded widgets. widget_width = 0.0 widget_height = 0.0 for widget in self._widgets.values(): w_width = widget.boundingRect().width() w_height = widget.boundingRect().height() if w_width > widget_width: widget_width = w_width widget_height += w_height side_padding = 0.0 if all([widget_width, p_input_text_width, p_output_text_width]): port_text_width = max([p_input_text_width, p_output_text_width]) port_text_width *= 2 elif widget_width: side_padding = 10 width = port_width + max([text_w, port_text_width]) + side_padding height = max([text_h, p_input_height, p_output_height, widget_height]) if widget_width: # add additional width for node widget. width += widget_width if widget_height: # add bottom margin for node widget. height += 4.0 height *= 1.05 # additional width, height. width += add_w height += add_h return width, height def align_icon(self, h_offset=0.0, v_offset=0.0): """ Align node icon to the default top left of the node. Args: v_offset (float): additional vertical offset. h_offset (float): additional horizontal offset. """ icon_rect = self._icon_item.boundingRect() text_rect = self._text_item.boundingRect() x = self.boundingRect().left() + 2.0 y = text_rect.center().y() - (icon_rect.height() / 2) self._icon_item.setPos(x + h_offset, y + v_offset) def align_label(self, h_offset=0.0, v_offset=0.0): """ Center node label text to the top of the node. Args: v_offset (float): vertical offset. h_offset (float): horizontal offset. """ rect = self.boundingRect() text_rect = self._text_item.boundingRect() x = rect.center().x() - (text_rect.width() / 2) self._text_item.setPos(x + h_offset, rect.y() + v_offset) def align_widgets(self, v_offset=0.0): """ Align node widgets to the default center of the node. Args: v_offset (float): vertical offset. """ if not self._widgets: return rect = self.boundingRect() y = rect.y() + v_offset inputs = [p for p in self.inputs if p.isVisible()] outputs = [p for p in self.outputs if p.isVisible()] for widget in self._widgets.values(): widget_rect = widget.boundingRect() if not inputs: x = rect.left() + 10 widget.widget().setTitleAlign('left') elif not outputs: x = rect.right() - widget_rect.width() - 10 widget.widget().setTitleAlign('right') else: x = rect.center().x() - (widget_rect.width() / 2) widget.widget().setTitleAlign('center') widget.setPos(x, y) y += widget_rect.height() def align_ports(self, v_offset=0.0): """ Align input, output ports in the node layout. Args: v_offset (float): port vertical offset. """ width = self._width txt_offset = PORT_FALLOFF - 2 spacing = 1 # adjust input position inputs = [p for p in self.inputs if p.isVisible()] if inputs: port_width = inputs[0].boundingRect().width() port_height = inputs[0].boundingRect().height() port_x = (port_width / 2) * -1 port_y = v_offset for port in inputs: port.setPos(port_x, port_y) port_y += port_height + spacing # adjust input text position for port, text in self._input_items.items(): if port.isVisible(): txt_x = port.boundingRect().width() / 2 - txt_offset text.setPos(txt_x, port.y() - 1.5) # adjust output position outputs = [p for p in self.outputs if p.isVisible()] if outputs: port_width = outputs[0].boundingRect().width() port_height = outputs[0].boundingRect().height() port_x = width - (port_width / 2) port_y = v_offset for port in outputs: port.setPos(port_x, port_y) port_y += port_height + spacing # adjust output text position for port, text in self._output_items.items(): if port.isVisible(): txt_width = text.boundingRect().width() - txt_offset txt_x = port.x() - txt_width text.setPos(txt_x, port.y() - 1.5) def draw_node(self): """ Re-draw the node item in the scene. (re-implemented for vertical layout design) """ height = self._text_item.boundingRect().height() + 4.0 # setup initial base size. self._set_base_size(add_h=height) # set text color when node is initialized. self._set_text_color(self.text_color) # set the tooltip self._tooltip_disable(self.disabled) # --- set the initial node layout --- # (do all the graphic item layout offsets here) # align label text self.align_label() # align icon self.align_icon(h_offset=2.0, v_offset=1.0) # arrange input and output ports. self.align_ports(v_offset=height) # arrange node widgets self.align_widgets(v_offset=height) self.update() def post_init(self, viewer=None, pos=None): """ Called after node has been added into the scene. Adjust the node layout and form after the node has been added. Args: viewer (NodeGraphQt.widgets.viewer.NodeViewer): not used pos (tuple): cursor position. """ self.draw_node() # set initial node position. if pos: self.xy_pos = pos def auto_switch_mode(self): """ Decide whether to draw the node with proxy mode. (this is called at the start in the "self.paint()" function.) """ if ITEM_CACHE_MODE is QtWidgets.QGraphicsItem.ItemCoordinateCache: return rect = self.sceneBoundingRect() l = self.viewer().mapToGlobal( self.viewer().mapFromScene(rect.topLeft())) r = self.viewer().mapToGlobal( self.viewer().mapFromScene(rect.topRight())) # width is the node width in screen width = r.x() - l.x() self.set_proxy_mode(width < self._proxy_mode_threshold) def set_proxy_mode(self, mode): """ Set whether to draw the node with proxy mode. (proxy mode toggles visibility for some qgraphic items in the node.) Args: mode (bool): true to enable proxy mode. """ if mode is self._proxy_mode: return self._proxy_mode = mode visible = not mode # disable overlay item. self._x_item.proxy_mode = self._proxy_mode # node widget visibility. for w in self._widgets.values(): w.widget().setVisible(visible) # input port text visibility. for port, text in self._input_items.items(): if port.display_name: text.setVisible(visible) # output port text visibility. for port, text in self._output_items.items(): if port.display_name: text.setVisible(visible) self._text_item.setVisible(visible) self._icon_item.setVisible(visible) @property def icon(self): return self._properties['icon'] @icon.setter def icon(self, path=None): self._properties['icon'] = path path = path or ICON_NODE_BASE pixmap = QtGui.QPixmap(path) if pixmap.size().height() > NODE_ICON_SIZE: pixmap = pixmap.scaledToHeight(NODE_ICON_SIZE, QtCore.Qt.SmoothTransformation) self._icon_item.setPixmap(pixmap) if self.scene(): self.post_init() self.update() @AbstractNodeItem.width.setter def width(self, width=0.0): w, h = self.calc_size() width = width if width > w else w AbstractNodeItem.width.fset(self, width) @AbstractNodeItem.height.setter def height(self, height=0.0): w, h = self.calc_size() h = 70 if h < 70 else h height = height if height > h else h AbstractNodeItem.height.fset(self, height) @AbstractNodeItem.disabled.setter def disabled(self, state=False): AbstractNodeItem.disabled.fset(self, state) for n, w in self._widgets.items(): w.widget().setDisabled(state) self._tooltip_disable(state) self._x_item.setVisible(state) @AbstractNodeItem.selected.setter def selected(self, selected=False): AbstractNodeItem.selected.fset(self, selected) if selected: self.highlight_pipes() @AbstractNodeItem.name.setter def name(self, name=''): AbstractNodeItem.name.fset(self, name) if name == self._text_item.toPlainText(): return self._text_item.setPlainText(name) if self.scene(): self.align_label() self.update() @AbstractNodeItem.color.setter def color(self, color=(100, 100, 100, 255)): AbstractNodeItem.color.fset(self, color) if self.scene(): self.scene().update() self.update() @AbstractNodeItem.text_color.setter def text_color(self, color=(100, 100, 100, 255)): AbstractNodeItem.text_color.fset(self, color) self._set_text_color(color) self.update() @property def text_item(self): """ Get the node name text qgraphics item. Returns: NodeTextItem: node text object. """ return self._text_item @property def inputs(self): """ Returns: list[PortItem]: input port graphic items. """ return list(self._input_items.keys()) @property def outputs(self): """ Returns: list[PortItem]: output port graphic items. """ return list(self._output_items.keys()) def _add_port(self, port): """ Adds a port qgraphics item into the node. Args: port (PortItem): port item. Returns: PortItem: port qgraphics item. """ text = QtWidgets.QGraphicsTextItem(port.name, self) text.font().setPointSize(8) text.setFont(text.font()) text.setVisible(port.display_name) text.setCacheMode(ITEM_CACHE_MODE) if port.port_type == IN_PORT: self._input_items[port] = text elif port.port_type == OUT_PORT: self._output_items[port] = text if self.scene(): self.post_init() return port def add_input(self, name='input', multi_port=False, display_name=True, locked=False, painter_func=None): """ Adds a port qgraphics item into the node with the "port_type" set as IN_PORT. Args: name (str): name for the port. multi_port (bool): allow multiple connections. display_name (bool): display the port name. locked (bool): locked state. painter_func (function): custom paint function. Returns: PortItem: input port qgraphics item. """ if painter_func: port = CustomPortItem(self, painter_func) else: port = PortItem(self) port.name = name port.port_type = IN_PORT port.multi_connection = multi_port port.display_name = display_name port.locked = locked return self._add_port(port) def add_output(self, name='output', multi_port=False, display_name=True, locked=False, painter_func=None): """ Adds a port qgraphics item into the node with the "port_type" set as OUT_PORT. Args: name (str): name for the port. multi_port (bool): allow multiple connections. display_name (bool): display the port name. locked (bool): locked state. painter_func (function): custom paint function. Returns: PortItem: output port qgraphics item. """ if painter_func: port = CustomPortItem(self, painter_func) else: port = PortItem(self) port.name = name port.port_type = OUT_PORT port.multi_connection = multi_port port.display_name = display_name port.locked = locked return self._add_port(port) def _delete_port(self, port, text): """ Removes port item and port text from node. Args: port (PortItem): port object. text (QtWidgets.QGraphicsTextItem): port text object. """ port.setParentItem(None) text.setParentItem(None) self.scene().removeItem(port) self.scene().removeItem(text) del port del text def delete_input(self, port): """ Remove input port from node. Args: port (PortItem): port object. """ self._delete_port(port, self._input_items.pop(port)) def delete_output(self, port): """ Remove output port from node. Args: port (PortItem): port object. """ self._delete_port(port, self._output_items.pop(port)) def get_input_text_item(self, port_item): """ Args: port_item (PortItem): port item. Returns: QGraphicsTextItem: graphic item used for the port text. """ return self._input_items[port_item] def get_output_text_item(self, port_item): """ Args: port_item (PortItem): port item. Returns: QGraphicsTextItem: graphic item used for the port text. """ return self._output_items[port_item] @property def widgets(self): return self._widgets.copy() def add_widget(self, widget): self._widgets[widget.get_name()] = widget def get_widget(self, name): widget = self._widgets.get(name) if widget: return widget raise NodeWidgetError('node has no widget "{}"'.format(name)) def has_widget(self, name): return name in self._widgets.keys() def from_dict(self, node_dict): super(NodeItem, self).from_dict(node_dict) widgets = node_dict.pop('widgets', {}) for name, value in widgets.items(): if self._widgets.get(name): self._widgets[name].value = value class NodeItemVertical(NodeItem): """ Vertical Node item. Args: name (str): name displayed on the node. parent (QtWidgets.QGraphicsItem): parent item. """ def __init__(self, name='node', parent=None): super(NodeItemVertical, self).__init__(name, parent) font = QtGui.QFont() font.setPointSize(15) self.text_item.setFont(font) def paint(self, painter, option, widget): """ Draws the node base not the ports. Args: painter (QtGui.QPainter): painter used for drawing the item. option (QtGui.QStyleOptionGraphicsItem): used to describe the parameters needed to draw. widget (QtWidgets.QWidget): not used. """ self.auto_switch_mode() painter.save() painter.setPen(QtCore.Qt.NoPen) painter.setBrush(QtCore.Qt.NoBrush) # base background. margin = 1.0 rect = self.boundingRect() rect = QtCore.QRectF(rect.left() + margin, rect.top() + margin, rect.width() - (margin * 2), rect.height() - (margin * 2)) radius = 4.0 painter.setBrush(QtGui.QColor(*self.color)) painter.drawRoundedRect(rect, radius, radius) # light overlay on background when selected. if self.selected: painter.setBrush(QtGui.QColor(*NODE_SEL_COLOR)) painter.drawRoundedRect(rect, radius, radius) # top & bottom edge background. padding = 2.0 height = 10 if self.selected: painter.setBrush(QtGui.QColor(*NODE_SEL_COLOR)) else: painter.setBrush(QtGui.QColor(0, 0, 0, 80)) for y in [rect.y() + padding, rect.height() - height - 1]: edge_rect = QtCore.QRectF(rect.x() + padding, y, rect.width() - (padding * 2), height) painter.drawRoundedRect(edge_rect, 3.0, 3.0) # node border border_width = 0.8 border_color = QtGui.QColor(*self.border_color) if self.selected: border_width = 1.2 border_color = QtGui.QColor(*NODE_SEL_BORDER_COLOR) border_rect = QtCore.QRectF(rect.left(), rect.top(), rect.width(), rect.height()) pen = QtGui.QPen(border_color, border_width) pen.setCosmetic(self.viewer().get_zoom() < 0.0) painter.setBrush(QtCore.Qt.NoBrush) painter.setPen(pen) painter.drawRoundedRect(border_rect, radius, radius) painter.restore() def align_icon(self, h_offset=0.0, v_offset=0.0): """ Align node icon to the right side of the node. Args: v_offset (float): vertical offset. h_offset (float): horizontal offset. """ center_y = self.boundingRect().center().y() icon_rect = self._icon_item.boundingRect() text_rect = self._text_item.boundingRect() x = self.boundingRect().right() + h_offset y = center_y - text_rect.height() - (icon_rect.height() / 2) + v_offset self._icon_item.setPos(x, y) def align_label(self, h_offset=0.0, v_offset=0.0): """ Align node label to the right side of the node. Args: v_offset (float): vertical offset. h_offset (float): horizontal offset. """ rect = self._text_item.boundingRect() x = self.boundingRect().right() + h_offset y = self.boundingRect().center().y() - (rect.height() / 2) + v_offset self.text_item.setPos(x, y) def align_ports(self, v_offset=0.0): """ Align input, output ports in the node layout. """ # adjust input position inputs = [p for p in self.inputs if p.isVisible()] if inputs: port_width = inputs[0].boundingRect().width() port_height = inputs[0].boundingRect().height() half_width = port_width/2 delta = self._width / (len(inputs)+1) port_x = delta port_y = (port_height / 2) * -1 for port in inputs: port.setPos(port_x - half_width, port_y) port_x += delta # adjust output position outputs = [p for p in self.outputs if p.isVisible()] if outputs: port_width = outputs[0].boundingRect().width() port_height = outputs[0].boundingRect().height() half_width = port_width / 2 delta = self._width / (len(outputs)+1) port_x = delta port_y = self._height - (port_height / 2) for port in outputs: port.setPos(port_x-half_width, port_y) port_x += delta def align_widgets(self, v_offset=0.0): """ Align node widgets to the default center of the node. Args: v_offset (float): vertical offset. """ if not self._widgets: return rect = self.boundingRect() y = rect.center().y() + v_offset widget_height = 0.0 for widget in self._widgets.values(): widget_rect = widget.boundingRect() widget_height += widget_rect.height() y -= widget_height / 2 for widget in self._widgets.values(): widget_rect = widget.boundingRect() x = rect.center().x() - (widget_rect.width() / 2) widget.widget().setTitleAlign('center') widget.setPos(x, y) y += widget_rect.height() def draw_node(self): """ Re-draw the node item in the scene. """ # setup initial base size. self._set_base_size() # set text color when node is initialized. self._set_text_color(self.text_color) # set the tooltip self._tooltip_disable(self.disabled) # --- setup node layout --- # (do all the graphic item layout offsets here) # align label text self.align_label(h_offset=6) # align icon self.align_icon(v_offset=4) # arrange input and output ports. self.align_ports() # arrange node widgets self.align_widgets() self.update() def calc_size(self, add_w=0.0, add_h=0.0): """ Calculate minimum node size. Args: add_w (float): additional width. add_h (float): additional height. """ p_input_width = 0.0 p_output_width = 0.0 p_input_height = 0.0 p_output_height = 0.0 for port in self._input_items.keys(): if port.isVisible(): p_input_width += port.boundingRect().width() if not p_input_height: p_input_height = port.boundingRect().height() for port in self._output_items.keys(): if port.isVisible(): p_output_width += port.boundingRect().width() if not p_output_height: p_output_height = port.boundingRect().height() widget_width = 0.0 widget_height = 0.0 for widget in self._widgets.values(): if widget.boundingRect().width() > widget_width: widget_width = widget.boundingRect().width() widget_height += widget.boundingRect().height() width = max([p_input_width, p_output_width, widget_width]) + add_w height = p_input_height + p_output_height + widget_height + add_h return width, height def add_input(self, name='input', multi_port=False, display_name=True, locked=False, painter_func=None): """ Adds a port qgraphics item into the node with the "port_type" set as IN_PORT Args: name (str): name for the port. multi_port (bool): allow multiple connections. display_name (bool): (not used). locked (bool): locked state. painter_func (function): custom paint function. Returns: PortItem: port qgraphics item. """ return super(NodeItemVertical, self).add_input( name, multi_port, False, locked, painter_func) def add_output(self, name='output', multi_port=False, display_name=True, locked=False, painter_func=None): """ Adds a port qgraphics item into the node with the "port_type" set as OUT_PORT Args: name (str): name for the port. multi_port (bool): allow multiple connections. display_name (bool): (not used). locked (bool): locked state. painter_func (function): custom paint function. Returns: PortItem: port qgraphics item. """ return super(NodeItemVertical, self).add_output( name, multi_port, False, locked, painter_func)
py
1a439cbe2d6bd57cb9205a55a668fc7329b1b1ea
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright (c) 2011 Piston Cloud Computing, Inc # Copyright (c) 2012 University Of Minho # (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A connection to a hypervisor through libvirt. Supports KVM, LXC, QEMU, UML, XEN and Parallels. """ import binascii import collections from collections import deque import contextlib import errno import functools import glob import itertools import operator import os import pwd import random import shutil import tempfile import time import uuid from castellan import key_manager import eventlet from eventlet import greenthread from eventlet import tpool from lxml import etree from os_brick import encryptors from os_brick.encryptors import luks as luks_encryptor from os_brick import exception as brick_exception from os_brick.initiator import connector from oslo_concurrency import processutils from oslo_log import log as logging from oslo_serialization import base64 from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import fileutils from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils import six from six.moves import range from nova.api.metadata import base as instance_metadata from nova.api.metadata import password from nova import block_device from nova.compute import power_state from nova.compute import task_states from nova.compute import utils as compute_utils import nova.conf from nova.console import serial as serial_console from nova.console import type as ctype from nova import context as nova_context from nova import crypto from nova import exception from nova.i18n import _ from nova import image from nova.network import model as network_model from nova import objects from nova.objects import diagnostics as diagnostics_obj from nova.objects import fields from nova.objects import migrate_data as migrate_data_obj from nova.pci import manager as pci_manager from nova.pci import utils as pci_utils import nova.privsep.libvirt import nova.privsep.path from nova import rc_fields from nova import utils from nova import version from nova.virt import block_device as driver_block_device from nova.virt import configdrive from nova.virt.disk import api as disk_api from nova.virt.disk.vfs import guestfs from nova.virt import driver from nova.virt import firewall from nova.virt import hardware from nova.virt.image import model as imgmodel from nova.virt import images from nova.virt.libvirt import blockinfo from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import designer from nova.virt.libvirt import firewall as libvirt_firewall from nova.virt.libvirt import guest as libvirt_guest from nova.virt.libvirt import host from nova.virt.libvirt import imagebackend from nova.virt.libvirt import imagecache from nova.virt.libvirt import instancejobtracker from nova.virt.libvirt import migration as libvirt_migrate from nova.virt.libvirt.storage import dmcrypt from nova.virt.libvirt.storage import lvm from nova.virt.libvirt.storage import rbd_utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt import vif as libvirt_vif from nova.virt.libvirt.volume import mount from nova.virt.libvirt.volume import remotefs from nova.virt import netutils from nova.volume import cinder libvirt = None uefi_logged = False LOG = logging.getLogger(__name__) CONF = nova.conf.CONF DEFAULT_FIREWALL_DRIVER = "%s.%s" % ( libvirt_firewall.__name__, libvirt_firewall.IptablesFirewallDriver.__name__) DEFAULT_UEFI_LOADER_PATH = { "x86_64": "/usr/share/OVMF/OVMF_CODE.fd", "aarch64": "/usr/share/AAVMF/AAVMF_CODE.fd" } MAX_CONSOLE_BYTES = 100 * units.Ki # The libvirt driver will prefix any disable reason codes with this string. DISABLE_PREFIX = 'AUTO: ' # Disable reason for the service which was enabled or disabled without reason DISABLE_REASON_UNDEFINED = None # Guest config console string CONSOLE = "console=tty0 console=ttyS0 console=hvc0" GuestNumaConfig = collections.namedtuple( 'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune']) class InjectionInfo(collections.namedtuple( 'InjectionInfo', ['network_info', 'files', 'admin_pass'])): __slots__ = () def __repr__(self): return ('InjectionInfo(network_info=%r, files=%r, ' 'admin_pass=<SANITIZED>)') % (self.network_info, self.files) libvirt_volume_drivers = [ 'iscsi=nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver', 'iser=nova.virt.libvirt.volume.iser.LibvirtISERVolumeDriver', 'local=nova.virt.libvirt.volume.volume.LibvirtVolumeDriver', 'drbd=nova.virt.libvirt.volume.drbd.LibvirtDRBDVolumeDriver', 'fake=nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver', 'rbd=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver', 'sheepdog=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver', 'nfs=nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver', 'smbfs=nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver', 'aoe=nova.virt.libvirt.volume.aoe.LibvirtAOEVolumeDriver', 'fibre_channel=' 'nova.virt.libvirt.volume.fibrechannel.' 'LibvirtFibreChannelVolumeDriver', 'gpfs=nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver', 'quobyte=nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver', 'hgst=nova.virt.libvirt.volume.hgst.LibvirtHGSTVolumeDriver', 'scaleio=nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver', 'disco=nova.virt.libvirt.volume.disco.LibvirtDISCOVolumeDriver', 'vzstorage=' 'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver', 'veritas_hyperscale=' 'nova.virt.libvirt.volume.vrtshyperscale.' 'LibvirtHyperScaleVolumeDriver', 'storpool=nova.virt.libvirt.volume.storpool.LibvirtStorPoolVolumeDriver', ] def patch_tpool_proxy(): """eventlet.tpool.Proxy doesn't work with old-style class in __str__() or __repr__() calls. See bug #962840 for details. We perform a monkey patch to replace those two instance methods. """ def str_method(self): return str(self._obj) def repr_method(self): return repr(self._obj) tpool.Proxy.__str__ = str_method tpool.Proxy.__repr__ = repr_method patch_tpool_proxy() # For information about when MIN_LIBVIRT_VERSION and # NEXT_MIN_LIBVIRT_VERSION can be changed, consult # # https://wiki.openstack.org/wiki/LibvirtDistroSupportMatrix # # Currently this is effectively the min version for i686/x86_64 # + KVM/QEMU, as other architectures/hypervisors require newer # versions. Over time, this will become a common min version # for all architectures/hypervisors, as this value rises to # meet them. MIN_LIBVIRT_VERSION = (1, 2, 9) MIN_QEMU_VERSION = (2, 1, 0) # TODO(berrange): Re-evaluate this at start of each release cycle # to decide if we want to plan a future min version bump. # MIN_LIBVIRT_VERSION can be updated to match this after # NEXT_MIN_LIBVIRT_VERSION has been at a higher value for # one cycle NEXT_MIN_LIBVIRT_VERSION = (3, 0, 0) NEXT_MIN_QEMU_VERSION = (2, 8, 0) # When the above version matches/exceeds this version # delete it & corresponding code using it # Libvirt version 1.2.17 is required for successful block live migration # of vm booted from image with attached devices MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION = (1, 2, 17) # PowerPC based hosts that support NUMA using libvirt MIN_LIBVIRT_NUMA_VERSION_PPC = (1, 2, 19) # Versions of libvirt with known NUMA topology issues # See bug #1449028 BAD_LIBVIRT_NUMA_VERSIONS = [(1, 2, 9, 2)] # Versions of libvirt with broken cpu pinning support. This excludes # versions of libvirt with broken NUMA support since pinning needs # NUMA # See bug #1438226 BAD_LIBVIRT_CPU_POLICY_VERSIONS = [(1, 2, 10)] # Virtuozzo driver support MIN_VIRTUOZZO_VERSION = (7, 0, 0) MIN_LIBVIRT_VIRTUOZZO_VERSION = (1, 2, 12) # Ability to set the user guest password with Qemu MIN_LIBVIRT_SET_ADMIN_PASSWD = (1, 2, 16) # Ability to set the user guest password with parallels MIN_LIBVIRT_PARALLELS_SET_ADMIN_PASSWD = (2, 0, 0) # s/390 & s/390x architectures with KVM MIN_LIBVIRT_KVM_S390_VERSION = (1, 2, 13) MIN_QEMU_S390_VERSION = (2, 3, 0) # libvirt < 1.3 reported virt_functions capability # only when VFs are enabled. # libvirt 1.3 fix f391889f4e942e22b9ef8ecca492de05106ce41e MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION = (1, 3, 0) # Use the "logd" backend for handling stdout/stderr from QEMU processes. MIN_LIBVIRT_VIRTLOGD = (1, 3, 3) MIN_QEMU_VIRTLOGD = (2, 7, 0) # ppc64/ppc64le architectures with KVM # NOTE(rfolco): Same levels for Libvirt/Qemu on Big Endian and Little # Endian giving the nuance around guest vs host architectures MIN_LIBVIRT_KVM_PPC64_VERSION = (1, 2, 12) # aarch64 architecture with KVM # 'chardev' support got sorted out in 3.6.0 MIN_LIBVIRT_KVM_AARCH64_VERSION = (3, 6, 0) # Names of the types that do not get compressed during migration NO_COMPRESSION_TYPES = ('qcow2',) # number of serial console limit QEMU_MAX_SERIAL_PORTS = 4 # Qemu supports 4 serial consoles, we remove 1 because of the PTY one defined ALLOWED_QEMU_SERIAL_PORTS = QEMU_MAX_SERIAL_PORTS - 1 # realtime support MIN_LIBVIRT_REALTIME_VERSION = (1, 2, 13) # libvirt postcopy support MIN_LIBVIRT_POSTCOPY_VERSION = (1, 3, 3) # qemu postcopy support MIN_QEMU_POSTCOPY_VERSION = (2, 5, 0) MIN_LIBVIRT_OTHER_ARCH = { fields.Architecture.S390: MIN_LIBVIRT_KVM_S390_VERSION, fields.Architecture.S390X: MIN_LIBVIRT_KVM_S390_VERSION, fields.Architecture.PPC: MIN_LIBVIRT_KVM_PPC64_VERSION, fields.Architecture.PPC64: MIN_LIBVIRT_KVM_PPC64_VERSION, fields.Architecture.PPC64LE: MIN_LIBVIRT_KVM_PPC64_VERSION, fields.Architecture.AARCH64: MIN_LIBVIRT_KVM_AARCH64_VERSION, } MIN_QEMU_OTHER_ARCH = { fields.Architecture.S390: MIN_QEMU_S390_VERSION, fields.Architecture.S390X: MIN_QEMU_S390_VERSION, } # perf events support MIN_LIBVIRT_PERF_VERSION = (2, 0, 0) LIBVIRT_PERF_EVENT_PREFIX = 'VIR_PERF_PARAM_' PERF_EVENTS_CPU_FLAG_MAPPING = {'cmt': 'cmt', 'mbml': 'mbm_local', 'mbmt': 'mbm_total', } # Mediated devices support MIN_LIBVIRT_MDEV_SUPPORT = (3, 4, 0) # libvirt>=3.10 is required for volume multiattach if qemu<2.10. # See https://bugzilla.redhat.com/show_bug.cgi?id=1378242 # for details. MIN_LIBVIRT_MULTIATTACH = (3, 10, 0) MIN_LIBVIRT_LUKS_VERSION = (2, 2, 0) MIN_QEMU_LUKS_VERSION = (2, 6, 0) VGPU_RESOURCE_SEMAPHORE = "vgpu_resources" MIN_MIGRATION_SPEED_BW = 1 # 1 MiB/s class LibvirtDriver(driver.ComputeDriver): capabilities = { "has_imagecache": True, "supports_recreate": True, "supports_migrate_to_same_host": False, "supports_attach_interface": True, "supports_device_tagging": True, "supports_tagged_attach_interface": True, "supports_tagged_attach_volume": True, "supports_extend_volume": True, # Multiattach support is conditional on qemu and libvirt versions # determined in init_host. "supports_multiattach": False } def __init__(self, virtapi, read_only=False): super(LibvirtDriver, self).__init__(virtapi) global libvirt if libvirt is None: libvirt = importutils.import_module('libvirt') libvirt_migrate.libvirt = libvirt self._host = host.Host(self._uri(), read_only, lifecycle_event_handler=self.emit_event, conn_event_handler=self._handle_conn_event) self._initiator = None self._fc_wwnns = None self._fc_wwpns = None self._caps = None self._supported_perf_events = [] self.firewall_driver = firewall.load_driver( DEFAULT_FIREWALL_DRIVER, host=self._host) self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver() # TODO(mriedem): Long-term we should load up the volume drivers on # demand as needed rather than doing this on startup, as there might # be unsupported volume drivers in this list based on the underlying # platform. self.volume_drivers = self._get_volume_drivers() self._disk_cachemode = None self.image_cache_manager = imagecache.ImageCacheManager() self.image_backend = imagebackend.Backend(CONF.use_cow_images) self.disk_cachemodes = {} self.valid_cachemodes = ["default", "none", "writethrough", "writeback", "directsync", "unsafe", ] self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm', 'qemu') for mode_str in CONF.libvirt.disk_cachemodes: disk_type, sep, cache_mode = mode_str.partition('=') if cache_mode not in self.valid_cachemodes: LOG.warning('Invalid cachemode %(cache_mode)s specified ' 'for disk type %(disk_type)s.', {'cache_mode': cache_mode, 'disk_type': disk_type}) continue self.disk_cachemodes[disk_type] = cache_mode self._volume_api = cinder.API() self._image_api = image.API() sysinfo_serial_funcs = { 'none': lambda: None, 'hardware': self._get_host_sysinfo_serial_hardware, 'os': self._get_host_sysinfo_serial_os, 'auto': self._get_host_sysinfo_serial_auto, } self._sysinfo_serial_func = sysinfo_serial_funcs.get( CONF.libvirt.sysinfo_serial) self.job_tracker = instancejobtracker.InstanceJobTracker() self._remotefs = remotefs.RemoteFilesystem() self._live_migration_flags = self._block_migration_flags = 0 self.active_migrations = {} # Compute reserved hugepages from conf file at the very # beginning to ensure any syntax error will be reported and # avoid any re-calculation when computing resources. self._reserved_hugepages = hardware.numa_get_reserved_huge_pages() def _get_volume_drivers(self): driver_registry = dict() for driver_str in libvirt_volume_drivers: driver_type, _sep, driver = driver_str.partition('=') driver_class = importutils.import_class(driver) try: driver_registry[driver_type] = driver_class(self._host) except brick_exception.InvalidConnectorProtocol: LOG.debug('Unable to load volume driver %s. It is not ' 'supported on this host.', driver) return driver_registry @property def disk_cachemode(self): if self._disk_cachemode is None: # We prefer 'none' for consistent performance, host crash # safety & migration correctness by avoiding host page cache. # Some filesystems don't support O_DIRECT though. For those we # fallback to 'writethrough' which gives host crash safety, and # is safe for migration provided the filesystem is cache coherent # (cluster filesystems typically are, but things like NFS are not). self._disk_cachemode = "none" if not utils.supports_direct_io(CONF.instances_path): self._disk_cachemode = "writethrough" return self._disk_cachemode def _set_cache_mode(self, conf): """Set cache mode on LibvirtConfigGuestDisk object.""" try: source_type = conf.source_type driver_cache = conf.driver_cache except AttributeError: return # Shareable disks like for a multi-attach volume need to have the # driver cache disabled. if getattr(conf, 'shareable', False): conf.driver_cache = 'none' else: cache_mode = self.disk_cachemodes.get(source_type, driver_cache) conf.driver_cache = cache_mode def _do_quality_warnings(self): """Warn about potential configuration issues. This will log a warning message for things such as untested driver or host arch configurations in order to indicate potential issues to administrators. """ caps = self._host.get_capabilities() hostarch = caps.host.cpu.arch if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or hostarch not in (fields.Architecture.I686, fields.Architecture.X86_64)): LOG.warning('The libvirt driver is not tested on ' '%(type)s/%(arch)s by the OpenStack project and ' 'thus its quality can not be ensured. For more ' 'information, see: https://docs.openstack.org/' 'nova/latest/user/support-matrix.html', {'type': CONF.libvirt.virt_type, 'arch': hostarch}) if CONF.vnc.keymap: LOG.warning('The option "[vnc] keymap" has been deprecated ' 'in favor of configuration within the guest. ' 'Update nova.conf to address this change and ' 'refer to bug #1682020 for more information.') if CONF.spice.keymap: LOG.warning('The option "[spice] keymap" has been deprecated ' 'in favor of configuration within the guest. ' 'Update nova.conf to address this change and ' 'refer to bug #1682020 for more information.') def _handle_conn_event(self, enabled, reason): LOG.info("Connection event '%(enabled)d' reason '%(reason)s'", {'enabled': enabled, 'reason': reason}) self._set_host_enabled(enabled, reason) def init_host(self, host): self._host.initialize() self._do_quality_warnings() self._parse_migration_flags() self._supported_perf_events = self._get_supported_perf_events() self._set_multiattach_support() if (CONF.libvirt.virt_type == 'lxc' and not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)): LOG.warning("Running libvirt-lxc without user namespaces is " "dangerous. Containers spawned by Nova will be run " "as the host's root user. It is highly suggested " "that user namespaces be used in a public or " "multi-tenant environment.") # Stop libguestfs using KVM unless we're also configured # to use this. This solves problem where people need to # stop Nova use of KVM because nested-virt is broken if CONF.libvirt.virt_type != "kvm": guestfs.force_tcg() if not self._host.has_min_version(MIN_LIBVIRT_VERSION): raise exception.InternalError( _('Nova requires libvirt version %s or greater.') % libvirt_utils.version_to_string(MIN_LIBVIRT_VERSION)) if CONF.libvirt.virt_type in ("qemu", "kvm"): if self._host.has_min_version(hv_ver=MIN_QEMU_VERSION): # "qemu-img info" calls are version dependent, so we need to # store the version in the images module. images.QEMU_VERSION = self._host.get_connection().getVersion() else: raise exception.InternalError( _('Nova requires QEMU version %s or greater.') % libvirt_utils.version_to_string(MIN_QEMU_VERSION)) if CONF.libvirt.virt_type == 'parallels': if not self._host.has_min_version(hv_ver=MIN_VIRTUOZZO_VERSION): raise exception.InternalError( _('Nova requires Virtuozzo version %s or greater.') % libvirt_utils.version_to_string(MIN_VIRTUOZZO_VERSION)) if not self._host.has_min_version(MIN_LIBVIRT_VIRTUOZZO_VERSION): raise exception.InternalError( _('Running Nova with parallels virt_type requires ' 'libvirt version %s') % libvirt_utils.version_to_string( MIN_LIBVIRT_VIRTUOZZO_VERSION)) # Give the cloud admin a heads up if we are intending to # change the MIN_LIBVIRT_VERSION in the next release. if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION): LOG.warning('Running Nova with a libvirt version less than ' '%(version)s is deprecated. The required minimum ' 'version of libvirt will be raised to %(version)s ' 'in the next release.', {'version': libvirt_utils.version_to_string( NEXT_MIN_LIBVIRT_VERSION)}) if (CONF.libvirt.virt_type in ("qemu", "kvm") and not self._host.has_min_version(hv_ver=NEXT_MIN_QEMU_VERSION)): LOG.warning('Running Nova with a QEMU version less than ' '%(version)s is deprecated. The required minimum ' 'version of QEMU will be raised to %(version)s ' 'in the next release.', {'version': libvirt_utils.version_to_string( NEXT_MIN_QEMU_VERSION)}) kvm_arch = fields.Architecture.from_host() if (CONF.libvirt.virt_type in ('kvm', 'qemu') and kvm_arch in MIN_LIBVIRT_OTHER_ARCH and not self._host.has_min_version( MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch), MIN_QEMU_OTHER_ARCH.get(kvm_arch))): if MIN_QEMU_OTHER_ARCH.get(kvm_arch): raise exception.InternalError( _('Running Nova with qemu/kvm virt_type on %(arch)s ' 'requires libvirt version %(libvirt_ver)s and ' 'qemu version %(qemu_ver)s, or greater') % {'arch': kvm_arch, 'libvirt_ver': libvirt_utils.version_to_string( MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch)), 'qemu_ver': libvirt_utils.version_to_string( MIN_QEMU_OTHER_ARCH.get(kvm_arch))}) # no qemu version in the error message raise exception.InternalError( _('Running Nova with qemu/kvm virt_type on %(arch)s ' 'requires libvirt version %(libvirt_ver)s or greater') % {'arch': kvm_arch, 'libvirt_ver': libvirt_utils.version_to_string( MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch))}) # TODO(sbauza): Remove this code once mediated devices are persisted # across reboots. if self._host.has_min_version(MIN_LIBVIRT_MDEV_SUPPORT): self._recreate_assigned_mediated_devices() @staticmethod def _is_existing_mdev(uuid): # FIXME(sbauza): Some kernel can have a uevent race meaning that the # libvirt daemon won't know when a mediated device is created unless # you restart that daemon. Until all kernels we support are not having # that possible race, check the sysfs directly instead of asking the # libvirt API. # See https://bugzilla.redhat.com/show_bug.cgi?id=1376907 for ref. return os.path.exists('/sys/bus/mdev/devices/{0}'.format(uuid)) def _recreate_assigned_mediated_devices(self): """Recreate assigned mdevs that could have disappeared if we reboot the host. """ mdevs = self._get_all_assigned_mediated_devices() requested_types = self._get_supported_vgpu_types() for (mdev_uuid, instance_uuid) in six.iteritems(mdevs): if not self._is_existing_mdev(mdev_uuid): self._create_new_mediated_device(requested_types, mdev_uuid) def _set_multiattach_support(self): # Check to see if multiattach is supported. Based on bugzilla # https://bugzilla.redhat.com/show_bug.cgi?id=1378242 and related # clones, the shareable flag on a disk device will only work with # qemu<2.10 or libvirt>=3.10. So check those versions here and set # the capability appropriately. if (self._host.has_min_version(lv_ver=MIN_LIBVIRT_MULTIATTACH) or not self._host.has_min_version(hv_ver=(2, 10, 0))): self.capabilities['supports_multiattach'] = True else: LOG.debug('Volume multiattach is not supported based on current ' 'versions of QEMU and libvirt. QEMU must be less than ' '2.10 or libvirt must be greater than or equal to 3.10.') def _prepare_migration_flags(self): migration_flags = 0 migration_flags |= libvirt.VIR_MIGRATE_LIVE # Adding p2p flag only if xen is not in use, because xen does not # support p2p migrations if CONF.libvirt.virt_type != 'xen': migration_flags |= libvirt.VIR_MIGRATE_PEER2PEER # Adding VIR_MIGRATE_UNDEFINE_SOURCE because, without it, migrated # instance will remain defined on the source host migration_flags |= libvirt.VIR_MIGRATE_UNDEFINE_SOURCE # Adding VIR_MIGRATE_PERSIST_DEST to persist the VM on the # destination host migration_flags |= libvirt.VIR_MIGRATE_PERSIST_DEST live_migration_flags = block_migration_flags = migration_flags # Adding VIR_MIGRATE_NON_SHARED_INC, otherwise all block-migrations # will be live-migrations instead block_migration_flags |= libvirt.VIR_MIGRATE_NON_SHARED_INC return (live_migration_flags, block_migration_flags) def _handle_live_migration_tunnelled(self, migration_flags): if (CONF.libvirt.live_migration_tunnelled is None or CONF.libvirt.live_migration_tunnelled): migration_flags |= libvirt.VIR_MIGRATE_TUNNELLED return migration_flags def _is_post_copy_available(self): if self._host.has_min_version(lv_ver=MIN_LIBVIRT_POSTCOPY_VERSION, hv_ver=MIN_QEMU_POSTCOPY_VERSION): return True return False def _is_virtlogd_available(self): return self._host.has_min_version(MIN_LIBVIRT_VIRTLOGD, MIN_QEMU_VIRTLOGD) def _is_native_luks_available(self): return self._host.has_min_version(MIN_LIBVIRT_LUKS_VERSION, MIN_QEMU_LUKS_VERSION) def _handle_live_migration_post_copy(self, migration_flags): if CONF.libvirt.live_migration_permit_post_copy: if self._is_post_copy_available(): migration_flags |= libvirt.VIR_MIGRATE_POSTCOPY else: LOG.info('The live_migration_permit_post_copy is set ' 'to True, but it is not supported.') return migration_flags def _handle_live_migration_auto_converge(self, migration_flags): if (self._is_post_copy_available() and (migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0): LOG.info('The live_migration_permit_post_copy is set to ' 'True and post copy live migration is available ' 'so auto-converge will not be in use.') elif CONF.libvirt.live_migration_permit_auto_converge: migration_flags |= libvirt.VIR_MIGRATE_AUTO_CONVERGE return migration_flags def _parse_migration_flags(self): (live_migration_flags, block_migration_flags) = self._prepare_migration_flags() live_migration_flags = self._handle_live_migration_tunnelled( live_migration_flags) block_migration_flags = self._handle_live_migration_tunnelled( block_migration_flags) live_migration_flags = self._handle_live_migration_post_copy( live_migration_flags) block_migration_flags = self._handle_live_migration_post_copy( block_migration_flags) live_migration_flags = self._handle_live_migration_auto_converge( live_migration_flags) block_migration_flags = self._handle_live_migration_auto_converge( block_migration_flags) self._live_migration_flags = live_migration_flags self._block_migration_flags = block_migration_flags # TODO(sahid): This method is targeted for removal when the tests # have been updated to avoid its use # # All libvirt API calls on the libvirt.Connect object should be # encapsulated by methods on the nova.virt.libvirt.host.Host # object, rather than directly invoking the libvirt APIs. The goal # is to avoid a direct dependency on the libvirt API from the # driver.py file. def _get_connection(self): return self._host.get_connection() _conn = property(_get_connection) @staticmethod def _uri(): if CONF.libvirt.virt_type == 'uml': uri = CONF.libvirt.connection_uri or 'uml:///system' elif CONF.libvirt.virt_type == 'xen': uri = CONF.libvirt.connection_uri or 'xen:///' elif CONF.libvirt.virt_type == 'lxc': uri = CONF.libvirt.connection_uri or 'lxc:///' elif CONF.libvirt.virt_type == 'parallels': uri = CONF.libvirt.connection_uri or 'parallels:///system' else: uri = CONF.libvirt.connection_uri or 'qemu:///system' return uri @staticmethod def _live_migration_uri(dest): uris = { 'kvm': 'qemu+%s://%s/system', 'qemu': 'qemu+%s://%s/system', 'xen': 'xenmigr://%s/system', 'parallels': 'parallels+tcp://%s/system', } virt_type = CONF.libvirt.virt_type # TODO(pkoniszewski): Remove fetching live_migration_uri in Pike uri = CONF.libvirt.live_migration_uri if uri: return uri % dest uri = uris.get(virt_type) if uri is None: raise exception.LiveMigrationURINotAvailable(virt_type=virt_type) str_format = (dest,) if virt_type in ('kvm', 'qemu'): scheme = CONF.libvirt.live_migration_scheme or 'tcp' str_format = (scheme, dest) return uris.get(virt_type) % str_format @staticmethod def _migrate_uri(dest): uri = None # Only QEMU live migrations supports migrate-uri parameter virt_type = CONF.libvirt.virt_type if virt_type in ('qemu', 'kvm'): # QEMU accept two schemes: tcp and rdma. By default # libvirt build the URI using the remote hostname and the # tcp schema. uri = 'tcp://%s' % dest # Because dest might be of type unicode, here we might return value of # type unicode as well which is not acceptable by libvirt python # binding when Python 2.7 is in use, so let's convert it explicitly # back to string. When Python 3.x is in use, libvirt python binding # accepts unicode type so it is completely fine to do a no-op str(uri) # conversion which will return value of type unicode. return uri and str(uri) def instance_exists(self, instance): """Efficient override of base instance_exists method.""" try: self._host.get_guest(instance) return True except (exception.InternalError, exception.InstanceNotFound): return False def estimate_instance_overhead(self, instance_info): overhead = super(LibvirtDriver, self).estimate_instance_overhead( instance_info) if isinstance(instance_info, objects.Flavor): # A flavor object is passed during case of migrate emu_policy = hardware.get_emulator_thread_policy_constraint( instance_info) if emu_policy == fields.CPUEmulatorThreadsPolicy.ISOLATE: overhead['vcpus'] += 1 else: # An instance object is passed during case of spawing or a # dict is passed when computing resource for an instance numa_topology = hardware.instance_topology_from_instance( instance_info) if numa_topology and numa_topology.emulator_threads_isolated: overhead['vcpus'] += 1 return overhead def list_instances(self): names = [] for guest in self._host.list_guests(only_running=False): names.append(guest.name) return names def list_instance_uuids(self): uuids = [] for guest in self._host.list_guests(only_running=False): uuids.append(guest.uuid) return uuids def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" for vif in network_info: self.vif_driver.plug(instance, vif) def _unplug_vifs(self, instance, network_info, ignore_errors): """Unplug VIFs from networks.""" for vif in network_info: try: self.vif_driver.unplug(instance, vif) except exception.NovaException: if not ignore_errors: raise def unplug_vifs(self, instance, network_info): self._unplug_vifs(instance, network_info, False) def _teardown_container(self, instance): inst_path = libvirt_utils.get_instance_path(instance) container_dir = os.path.join(inst_path, 'rootfs') rootfs_dev = instance.system_metadata.get('rootfs_device_name') LOG.debug('Attempting to teardown container at path %(dir)s with ' 'root device: %(rootfs_dev)s', {'dir': container_dir, 'rootfs_dev': rootfs_dev}, instance=instance) disk_api.teardown_container(container_dir, rootfs_dev) def _destroy(self, instance, attempt=1): try: guest = self._host.get_guest(instance) if CONF.serial_console.enabled: # This method is called for several events: destroy, # rebuild, hard-reboot, power-off - For all of these # events we want to release the serial ports acquired # for the guest before destroying it. serials = self._get_serial_ports_from_guest(guest) for hostname, port in serials: serial_console.release_port(host=hostname, port=port) except exception.InstanceNotFound: guest = None # If the instance is already terminated, we're still happy # Otherwise, destroy it old_domid = -1 if guest is not None: try: old_domid = guest.id guest.poweroff() except libvirt.libvirtError as e: is_okay = False errcode = e.get_error_code() if errcode == libvirt.VIR_ERR_NO_DOMAIN: # Domain already gone. This can safely be ignored. is_okay = True elif errcode == libvirt.VIR_ERR_OPERATION_INVALID: # If the instance is already shut off, we get this: # Code=55 Error=Requested operation is not valid: # domain is not running state = guest.get_power_state(self._host) if state == power_state.SHUTDOWN: is_okay = True elif errcode == libvirt.VIR_ERR_INTERNAL_ERROR: errmsg = e.get_error_message() if (CONF.libvirt.virt_type == 'lxc' and errmsg == 'internal error: ' 'Some processes refused to die'): # Some processes in the container didn't die # fast enough for libvirt. The container will # eventually die. For now, move on and let # the wait_for_destroy logic take over. is_okay = True elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT: LOG.warning("Cannot destroy instance, operation time out", instance=instance) reason = _("operation time out") raise exception.InstancePowerOffFailure(reason=reason) elif errcode == libvirt.VIR_ERR_SYSTEM_ERROR: if e.get_int1() == errno.EBUSY: # NOTE(danpb): When libvirt kills a process it sends it # SIGTERM first and waits 10 seconds. If it hasn't gone # it sends SIGKILL and waits another 5 seconds. If it # still hasn't gone then you get this EBUSY error. # Usually when a QEMU process fails to go away upon # SIGKILL it is because it is stuck in an # uninterruptible kernel sleep waiting on I/O from # some non-responsive server. # Given the CPU load of the gate tests though, it is # conceivable that the 15 second timeout is too short, # particularly if the VM running tempest has a high # steal time from the cloud host. ie 15 wallclock # seconds may have passed, but the VM might have only # have a few seconds of scheduled run time. LOG.warning('Error from libvirt during destroy. ' 'Code=%(errcode)s Error=%(e)s; ' 'attempt %(attempt)d of 3', {'errcode': errcode, 'e': e, 'attempt': attempt}, instance=instance) with excutils.save_and_reraise_exception() as ctxt: # Try up to 3 times before giving up. if attempt < 3: ctxt.reraise = False self._destroy(instance, attempt + 1) return if not is_okay: with excutils.save_and_reraise_exception(): LOG.error('Error from libvirt during destroy. ' 'Code=%(errcode)s Error=%(e)s', {'errcode': errcode, 'e': e}, instance=instance) def _wait_for_destroy(expected_domid): """Called at an interval until the VM is gone.""" # NOTE(vish): If the instance disappears during the destroy # we ignore it so the cleanup can still be # attempted because we would prefer destroy to # never fail. try: dom_info = self.get_info(instance) state = dom_info.state new_domid = dom_info.internal_id except exception.InstanceNotFound: LOG.debug("During wait destroy, instance disappeared.", instance=instance) state = power_state.SHUTDOWN if state == power_state.SHUTDOWN: LOG.info("Instance destroyed successfully.", instance=instance) raise loopingcall.LoopingCallDone() # NOTE(wangpan): If the instance was booted again after destroy, # this may be an endless loop, so check the id of # domain here, if it changed and the instance is # still running, we should destroy it again. # see https://bugs.launchpad.net/nova/+bug/1111213 for more details if new_domid != expected_domid: LOG.info("Instance may be started again.", instance=instance) kwargs['is_running'] = True raise loopingcall.LoopingCallDone() kwargs = {'is_running': False} timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy, old_domid) timer.start(interval=0.5).wait() if kwargs['is_running']: LOG.info("Going to destroy instance again.", instance=instance) self._destroy(instance) else: # NOTE(GuanQiang): teardown container to avoid resource leak if CONF.libvirt.virt_type == 'lxc': self._teardown_container(instance) def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): self._destroy(instance) self.cleanup(context, instance, network_info, block_device_info, destroy_disks) def _undefine_domain(self, instance): try: guest = self._host.get_guest(instance) try: support_uefi = self._has_uefi_support() guest.delete_configuration(support_uefi) except libvirt.libvirtError as e: with excutils.save_and_reraise_exception() as ctxt: errcode = e.get_error_code() if errcode == libvirt.VIR_ERR_NO_DOMAIN: LOG.debug("Called undefine, but domain already gone.", instance=instance) ctxt.reraise = False else: LOG.error('Error from libvirt during undefine. ' 'Code=%(errcode)s Error=%(e)s', {'errcode': errcode, 'e': encodeutils.exception_to_unicode(e)}, instance=instance) except exception.InstanceNotFound: pass def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None, destroy_vifs=True): if destroy_vifs: self._unplug_vifs(instance, network_info, True) # Continue attempting to remove firewall filters for the instance # until it's done or there is a failure to remove the filters. If # unfilter fails because the instance is not yet shutdown, try to # destroy the guest again and then retry the unfilter. while True: try: self.unfilter_instance(instance, network_info) break except libvirt.libvirtError as e: try: state = self.get_info(instance).state except exception.InstanceNotFound: state = power_state.SHUTDOWN if state != power_state.SHUTDOWN: LOG.warning("Instance may be still running, destroy " "it again.", instance=instance) self._destroy(instance) else: errcode = e.get_error_code() LOG.exception(_('Error from libvirt during unfilter. ' 'Code=%(errcode)s Error=%(e)s'), {'errcode': errcode, 'e': e}, instance=instance) reason = _("Error unfiltering instance.") raise exception.InstanceTerminationFailure(reason=reason) except Exception: raise # FIXME(wangpan): if the instance is booted again here, such as the # soft reboot operation boot it here, it will become # "running deleted", should we check and destroy it # at the end of this method? # NOTE(vish): we disconnect from volumes regardless block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_dev = vol['mount_device'] if disk_dev is not None: disk_dev = disk_dev.rpartition("/")[2] try: self._disconnect_volume(context, connection_info, instance) except Exception as exc: with excutils.save_and_reraise_exception() as ctxt: if destroy_disks: # Don't block on Volume errors if we're trying to # delete the instance as we may be partially created # or deleted ctxt.reraise = False LOG.warning( "Ignoring Volume Error on vol %(vol_id)s " "during delete %(exc)s", {'vol_id': vol.get('volume_id'), 'exc': encodeutils.exception_to_unicode(exc)}, instance=instance) if destroy_disks: # NOTE(haomai): destroy volumes if needed if CONF.libvirt.images_type == 'lvm': self._cleanup_lvm(instance, block_device_info) if CONF.libvirt.images_type == 'rbd': self._cleanup_rbd(instance) is_shared_block_storage = False if migrate_data and 'is_shared_block_storage' in migrate_data: is_shared_block_storage = migrate_data.is_shared_block_storage if destroy_disks or is_shared_block_storage: attempts = int(instance.system_metadata.get('clean_attempts', '0')) success = self.delete_instance_files(instance) # NOTE(mriedem): This is used in the _run_pending_deletes periodic # task in the compute manager. The tight coupling is not great... instance.system_metadata['clean_attempts'] = str(attempts + 1) if success: instance.cleaned = True instance.save() self._undefine_domain(instance) def _detach_encrypted_volumes(self, instance, block_device_info): """Detaches encrypted volumes attached to instance.""" disks = self._get_instance_disk_info(instance, block_device_info) encrypted_volumes = filter(dmcrypt.is_encrypted, [disk['path'] for disk in disks]) for path in encrypted_volumes: dmcrypt.delete_volume(path) def _get_serial_ports_from_guest(self, guest, mode=None): """Returns an iterator over serial port(s) configured on guest. :param mode: Should be a value in (None, bind, connect) """ xml = guest.get_xml_desc() tree = etree.fromstring(xml) # The 'serial' device is the base for x86 platforms. Other platforms # (e.g. kvm on system z = S390X) can only use 'console' devices. xpath_mode = "[@mode='%s']" % mode if mode else "" serial_tcp = "./devices/serial[@type='tcp']/source" + xpath_mode console_tcp = "./devices/console[@type='tcp']/source" + xpath_mode tcp_devices = tree.findall(serial_tcp) if len(tcp_devices) == 0: tcp_devices = tree.findall(console_tcp) for source in tcp_devices: yield (source.get("host"), int(source.get("service"))) def _get_scsi_controller_max_unit(self, guest): """Returns the max disk unit used by scsi controller""" xml = guest.get_xml_desc() tree = etree.fromstring(xml) addrs = "./devices/disk[@device='disk']/address[@type='drive']" ret = [] for obj in tree.findall(addrs): ret.append(int(obj.get('unit', 0))) return max(ret) @staticmethod def _get_rbd_driver(): return rbd_utils.RBDDriver( pool=CONF.libvirt.images_rbd_pool, ceph_conf=CONF.libvirt.images_rbd_ceph_conf, rbd_user=CONF.libvirt.rbd_user) def _cleanup_rbd(self, instance): # NOTE(nic): On revert_resize, the cleanup steps for the root # volume are handled with an "rbd snap rollback" command, # and none of this is needed (and is, in fact, harmful) so # filter out non-ephemerals from the list if instance.task_state == task_states.RESIZE_REVERTING: filter_fn = lambda disk: (disk.startswith(instance.uuid) and disk.endswith('disk.local')) else: filter_fn = lambda disk: disk.startswith(instance.uuid) LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn) def _cleanup_lvm(self, instance, block_device_info): """Delete all LVM disks for given instance object.""" if instance.get('ephemeral_key_uuid') is not None: self._detach_encrypted_volumes(instance, block_device_info) disks = self._lvm_disks(instance) if disks: lvm.remove_volumes(disks) def _lvm_disks(self, instance): """Returns all LVM disks for given instance object.""" if CONF.libvirt.images_volume_group: vg = os.path.join('/dev', CONF.libvirt.images_volume_group) if not os.path.exists(vg): return [] pattern = '%s_' % instance.uuid def belongs_to_instance(disk): return disk.startswith(pattern) def fullpath(name): return os.path.join(vg, name) logical_volumes = lvm.list_volumes(vg) disks = [fullpath(disk) for disk in logical_volumes if belongs_to_instance(disk)] return disks return [] def get_volume_connector(self, instance): root_helper = utils.get_root_helper() return connector.get_connector_properties( root_helper, CONF.my_block_storage_ip, CONF.libvirt.volume_use_multipath, enforce_multipath=True, host=CONF.host) def _cleanup_resize(self, context, instance, network_info): inst_base = libvirt_utils.get_instance_path(instance) target = inst_base + '_resize' # Deletion can fail over NFS, so retry the deletion as required. # Set maximum attempt as 5, most test can remove the directory # for the second time. attempts = 0 while(os.path.exists(target) and attempts < 5): shutil.rmtree(target, ignore_errors=True) if os.path.exists(target): time.sleep(random.randint(20, 200) / 100.0) attempts += 1 root_disk = self.image_backend.by_name(instance, 'disk') # TODO(nic): Set ignore_errors=False in a future release. # It is set to True here to avoid any upgrade issues surrounding # instances being in pending resize state when the software is updated; # in that case there will be no snapshot to remove. Once it can be # reasonably assumed that no such instances exist in the wild # anymore, it should be set back to False (the default) so it will # throw errors, like it should. if root_disk.exists(): root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) # NOTE(mjozefcz): # self.image_backend.image for some backends recreates instance # directory and image disk.info - remove it here if exists # Do not remove inst_base for volume-backed instances since that # could potentially remove the files on the destination host # if using shared storage. if (os.path.exists(inst_base) and not root_disk.exists() and not compute_utils.is_volume_backed_instance( context, instance)): try: shutil.rmtree(inst_base) except OSError as e: if e.errno != errno.ENOENT: raise if instance.host != CONF.host: self._undefine_domain(instance) self.unplug_vifs(instance, network_info) self.unfilter_instance(instance, network_info) def _get_volume_driver(self, connection_info): driver_type = connection_info.get('driver_volume_type') if driver_type not in self.volume_drivers: raise exception.VolumeDriverNotFound(driver_type=driver_type) return self.volume_drivers[driver_type] def _connect_volume(self, context, connection_info, instance, encryption=None, allow_native_luks=True): vol_driver = self._get_volume_driver(connection_info) vol_driver.connect_volume(connection_info, instance) self._attach_encryptor(context, connection_info, encryption, allow_native_luks) def _should_disconnect_target(self, context, connection_info, instance): connection_count = 0 # NOTE(jdg): Multiattach is a special case (not to be confused # with shared_targets). With multiattach we may have a single volume # attached multiple times to *this* compute node (ie Server-1 and # Server-2). So, if we receive a call to delete the attachment for # Server-1 we need to take special care to make sure that the Volume # isn't also attached to another Server on this Node. Otherwise we # will indiscriminantly delete the connection for all Server and that's # no good. So check if it's attached multiple times on this node # if it is we skip the call to brick to delete the connection. if connection_info.get('multiattach', False): volume = self._volume_api.get( context, driver_block_device.get_volume_id(connection_info)) attachments = volume.get('attachments', {}) if len(attachments) > 1: # First we get a list of all Server UUID's associated with # this Host (Compute Node). We're going to use this to # determine if the Volume being detached is also in-use by # another Server on this Host, ie just check to see if more # than one attachment.server_id for this volume is in our # list of Server UUID's for this Host servers_this_host = objects.InstanceList.get_uuids_by_host( context, instance.host) # NOTE(jdg): nova.volume.cinder translates the # volume['attachments'] response into a dict which includes # the Server UUID as the key, so we're using that # here to check against our server_this_host list for server_id, data in attachments.items(): if server_id in servers_this_host: connection_count += 1 return (False if connection_count > 1 else True) def _disconnect_volume(self, context, connection_info, instance, encryption=None): self._detach_encryptor(context, connection_info, encryption=encryption) if self._should_disconnect_target(context, connection_info, instance): vol_driver = self._get_volume_driver(connection_info) vol_driver.disconnect_volume(connection_info, instance) else: LOG.info("Detected multiple connections on this host for volume: " "%s, skipping target disconnect.", driver_block_device.get_volume_id(connection_info), instance=instance) def _extend_volume(self, connection_info, instance): vol_driver = self._get_volume_driver(connection_info) return vol_driver.extend_volume(connection_info, instance) def _use_native_luks(self, encryption=None): """Is LUKS the required provider and native QEMU LUKS available """ provider = None if encryption: provider = encryption.get('provider', None) if provider in encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP: provider = encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP[provider] return provider == encryptors.LUKS and self._is_native_luks_available() def _get_volume_config(self, connection_info, disk_info): vol_driver = self._get_volume_driver(connection_info) conf = vol_driver.get_config(connection_info, disk_info) self._set_cache_mode(conf) return conf def _get_volume_encryptor(self, connection_info, encryption): root_helper = utils.get_root_helper() return encryptors.get_volume_encryptor(root_helper=root_helper, keymgr=key_manager.API(CONF), connection_info=connection_info, **encryption) def _get_volume_encryption(self, context, connection_info): """Get the encryption metadata dict if it is not provided """ encryption = {} volume_id = driver_block_device.get_volume_id(connection_info) if volume_id: encryption = encryptors.get_encryption_metadata(context, self._volume_api, volume_id, connection_info) return encryption def _attach_encryptor(self, context, connection_info, encryption, allow_native_luks): """Attach the frontend encryptor if one is required by the volume. The request context is only used when an encryption metadata dict is not provided. The encryption metadata dict being populated is then used to determine if an attempt to attach the encryptor should be made. If native LUKS decryption is enabled then create a Libvirt volume secret containing the LUKS passphrase for the volume. """ if encryption is None: encryption = self._get_volume_encryption(context, connection_info) if (encryption and allow_native_luks and self._use_native_luks(encryption)): # NOTE(lyarwood): Fetch the associated key for the volume and # decode the passphrase from the key. # FIXME(lyarwood): c-vol currently creates symmetric keys for use # with volumes, leading to the binary to hex to string conversion # below. keymgr = key_manager.API(CONF) key = keymgr.get(context, encryption['encryption_key_id']) key_encoded = key.get_encoded() passphrase = binascii.hexlify(key_encoded).decode('utf-8') # NOTE(lyarwood): Retain the behaviour of the original os-brick # encryptors and format any volume that does not identify as # encrypted with LUKS. # FIXME(lyarwood): Remove this once c-vol correctly formats # encrypted volumes during their initial creation: # https://bugs.launchpad.net/cinder/+bug/1739442 device_path = connection_info.get('data').get('device_path') if device_path: root_helper = utils.get_root_helper() if not luks_encryptor.is_luks(root_helper, device_path): encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor._format_volume(passphrase, **encryption) # NOTE(lyarwood): Store the passphrase as a libvirt secret locally # on the compute node. This secret is used later when generating # the volume config. volume_id = driver_block_device.get_volume_id(connection_info) self._host.create_secret('volume', volume_id, password=passphrase) elif encryption: encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor.attach_volume(context, **encryption) def _detach_encryptor(self, context, connection_info, encryption): """Detach the frontend encryptor if one is required by the volume. The request context is only used when an encryption metadata dict is not provided. The encryption metadata dict being populated is then used to determine if an attempt to detach the encryptor should be made. If native LUKS decryption is enabled then delete previously created Libvirt volume secret from the host. """ volume_id = driver_block_device.get_volume_id(connection_info) if volume_id and self._host.find_secret('volume', volume_id): return self._host.delete_secret('volume', volume_id) if encryption is None: encryption = self._get_volume_encryption(context, connection_info) if encryption: encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor.detach_volume(**encryption) def _check_discard_for_attach_volume(self, conf, instance): """Perform some checks for volumes configured for discard support. If discard is configured for the volume, and the guest is using a configuration known to not work, we will log a message explaining the reason why. """ if conf.driver_discard == 'unmap' and conf.target_bus == 'virtio': LOG.debug('Attempting to attach volume %(id)s with discard ' 'support enabled to an instance using an ' 'unsupported configuration. target_bus = ' '%(bus)s. Trim commands will not be issued to ' 'the storage device.', {'bus': conf.target_bus, 'id': conf.serial}, instance=instance) def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): guest = self._host.get_guest(instance) disk_dev = mountpoint.rpartition("/")[2] bdm = { 'device_name': disk_dev, 'disk_bus': disk_bus, 'device_type': device_type} # Note(cfb): If the volume has a custom block size, check that # that we are using QEMU/KVM and libvirt >= 0.10.2. The # presence of a block size is considered mandatory by # cinder so we fail if we can't honor the request. data = {} if ('data' in connection_info): data = connection_info['data'] if ('logical_block_size' in data or 'physical_block_size' in data): if ((CONF.libvirt.virt_type != "kvm" and CONF.libvirt.virt_type != "qemu")): msg = _("Volume sets block size, but the current " "libvirt hypervisor '%s' does not support custom " "block size") % CONF.libvirt.virt_type raise exception.InvalidHypervisorType(msg) self._connect_volume(context, connection_info, instance, encryption=encryption) disk_info = blockinfo.get_info_from_bdm( instance, CONF.libvirt.virt_type, instance.image_meta, bdm) if disk_info['bus'] == 'scsi': disk_info['unit'] = self._get_scsi_controller_max_unit(guest) + 1 conf = self._get_volume_config(connection_info, disk_info) self._check_discard_for_attach_volume(conf, instance) try: state = guest.get_power_state(self._host) live = state in (power_state.RUNNING, power_state.PAUSED) guest.attach_device(conf, persistent=True, live=live) # NOTE(artom) If we're attaching with a device role tag, we need to # rebuild device_metadata. If we're attaching without a role # tag, we're rebuilding it here needlessly anyways. This isn't a # massive deal, and it helps reduce code complexity by not having # to indicate to the virt driver that the attach is tagged. The # really important optimization of not calling the database unless # device_metadata has actually changed is done for us by # instance.save(). instance.device_metadata = self._build_device_metadata( context, instance) instance.save() except Exception: LOG.exception(_('Failed to attach volume at mountpoint: %s'), mountpoint, instance=instance) with excutils.save_and_reraise_exception(): self._disconnect_volume(context, connection_info, instance, encryption=encryption) def _swap_volume(self, guest, disk_path, conf, resize_to): """Swap existing disk with a new block device.""" dev = guest.get_block_device(disk_path) # Save a copy of the domain's persistent XML file. We'll use this # to redefine the domain if anything fails during the volume swap. xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True) # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. try: dev.abort_job() except Exception: pass try: # NOTE (rmk): blockRebase cannot be executed on persistent # domains, so we need to temporarily undefine it. # If any part of this block fails, the domain is # re-defined regardless. if guest.has_persistent_configuration(): support_uefi = self._has_uefi_support() guest.delete_configuration(support_uefi) try: # Start copy with VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT flag to # allow writing to existing external volume file. Use # VIR_DOMAIN_BLOCK_REBASE_COPY_DEV if it's a block device to # make sure XML is generated correctly (bug 1691195) copy_dev = conf.source_type == 'block' dev.rebase(conf.source_path, copy=True, reuse_ext=True, copy_dev=copy_dev) while not dev.is_job_complete(): time.sleep(0.5) dev.abort_job(pivot=True) except Exception as exc: LOG.exception("Failure rebasing volume %(new_path)s on " "%(old_path)s.", {'new_path': conf.source_path, 'old_path': disk_path}) raise exception.VolumeRebaseFailed(reason=six.text_type(exc)) if resize_to: dev.resize(resize_to * units.Gi / units.Ki) # Make sure we will redefine the domain using the updated # configuration after the volume was swapped. The dump_inactive # keyword arg controls whether we pull the inactive (persistent) # or active (live) config from the domain. We want to pull the # live config after the volume was updated to use when we redefine # the domain. xml = guest.get_xml_desc(dump_inactive=False, dump_sensitive=True) finally: self._host.write_instance_config(xml) def swap_volume(self, context, old_connection_info, new_connection_info, instance, mountpoint, resize_to): # NOTE(lyarwood): https://bugzilla.redhat.com/show_bug.cgi?id=760547 old_encrypt = self._get_volume_encryption(context, old_connection_info) new_encrypt = self._get_volume_encryption(context, new_connection_info) if ((old_encrypt and self._use_native_luks(old_encrypt)) or (new_encrypt and self._use_native_luks(new_encrypt))): raise NotImplementedError(_("Swap volume is not supported for " "encrypted volumes when native LUKS decryption is enabled.")) guest = self._host.get_guest(instance) disk_dev = mountpoint.rpartition("/")[2] if not guest.get_disk(disk_dev): raise exception.DiskNotFound(location=disk_dev) disk_info = { 'dev': disk_dev, 'bus': blockinfo.get_disk_bus_for_disk_dev( CONF.libvirt.virt_type, disk_dev), 'type': 'disk', } # NOTE (lyarwood): new_connection_info will be modified by the # following _connect_volume call down into the volume drivers. The # majority of the volume drivers will add a device_path that is in turn # used by _get_volume_config to set the source_path of the # LibvirtConfigGuestDisk object it returns. We do not explicitly save # this to the BDM here as the upper compute swap_volume method will # eventually do this for us. self._connect_volume(context, new_connection_info, instance) conf = self._get_volume_config(new_connection_info, disk_info) if not conf.source_path: self._disconnect_volume(context, new_connection_info, instance) raise NotImplementedError(_("Swap only supports host devices")) try: self._swap_volume(guest, disk_dev, conf, resize_to) except exception.VolumeRebaseFailed: with excutils.save_and_reraise_exception(): self._disconnect_volume(context, new_connection_info, instance) self._disconnect_volume(context, old_connection_info, instance) def _get_existing_domain_xml(self, instance, network_info, block_device_info=None): try: guest = self._host.get_guest(instance) xml = guest.get_xml_desc() except exception.InstanceNotFound: disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, instance.image_meta, block_device_info) xml = self._get_guest_xml(nova_context.get_admin_context(), instance, network_info, disk_info, instance.image_meta, block_device_info=block_device_info) return xml def detach_volume(self, context, connection_info, instance, mountpoint, encryption=None): disk_dev = mountpoint.rpartition("/")[2] try: guest = self._host.get_guest(instance) state = guest.get_power_state(self._host) live = state in (power_state.RUNNING, power_state.PAUSED) # NOTE(lyarwood): The volume must be detached from the VM before # detaching any attached encryptors or disconnecting the underlying # volume in _disconnect_volume. Otherwise, the encryptor or volume # driver may report that the volume is still in use. wait_for_detach = guest.detach_device_with_retry(guest.get_disk, disk_dev, live=live) wait_for_detach() except exception.InstanceNotFound: # NOTE(zhaoqin): If the instance does not exist, _lookup_by_name() # will throw InstanceNotFound exception. Need to # disconnect volume under this circumstance. LOG.warning("During detach_volume, instance disappeared.", instance=instance) except exception.DeviceNotFound: # We should still try to disconnect logical device from # host, an error might have happened during a previous # call. LOG.info("Device %s not found in instance.", disk_dev, instance=instance) except libvirt.libvirtError as ex: # NOTE(vish): This is called to cleanup volumes after live # migration, so we should still disconnect even if # the instance doesn't exist here anymore. error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: # NOTE(vish): LOG.warning("During detach_volume, instance disappeared.", instance=instance) else: raise self._disconnect_volume(context, connection_info, instance, encryption=encryption) def extend_volume(self, connection_info, instance): try: new_size = self._extend_volume(connection_info, instance) except NotImplementedError: raise exception.ExtendVolumeNotSupported() # Resize the device in QEMU so its size is updated and # detected by the instance without rebooting. try: guest = self._host.get_guest(instance) state = guest.get_power_state(self._host) active_state = state in (power_state.RUNNING, power_state.PAUSED) if active_state: disk_path = connection_info['data']['device_path'] LOG.debug('resizing block device %(dev)s to %(size)u kb', {'dev': disk_path, 'size': new_size}) dev = guest.get_block_device(disk_path) dev.resize(new_size // units.Ki) else: LOG.debug('Skipping block device resize, guest is not running', instance=instance) except exception.InstanceNotFound: with excutils.save_and_reraise_exception(): LOG.warning('During extend_volume, instance disappeared.', instance=instance) except libvirt.libvirtError: with excutils.save_and_reraise_exception(): LOG.exception('resizing block device failed.', instance=instance) def attach_interface(self, context, instance, image_meta, vif): guest = self._host.get_guest(instance) self.vif_driver.plug(instance, vif) self.firewall_driver.setup_basic_filtering(instance, [vif]) cfg = self.vif_driver.get_config(instance, vif, image_meta, instance.flavor, CONF.libvirt.virt_type, self._host) try: state = guest.get_power_state(self._host) live = state in (power_state.RUNNING, power_state.PAUSED) guest.attach_device(cfg, persistent=True, live=live) except libvirt.libvirtError: LOG.error('attaching network adapter failed.', instance=instance, exc_info=True) self.vif_driver.unplug(instance, vif) raise exception.InterfaceAttachFailed( instance_uuid=instance.uuid) try: # NOTE(artom) If we're attaching with a device role tag, we need to # rebuild device_metadata. If we're attaching without a role # tag, we're rebuilding it here needlessly anyways. This isn't a # massive deal, and it helps reduce code complexity by not having # to indicate to the virt driver that the attach is tagged. The # really important optimization of not calling the database unless # device_metadata has actually changed is done for us by # instance.save(). instance.device_metadata = self._build_device_metadata( context, instance) instance.save() except Exception: # NOTE(artom) If we fail here it means the interface attached # successfully but building and/or saving the device metadata # failed. Just unplugging the vif is therefore not enough cleanup, # we need to detach the interface. with excutils.save_and_reraise_exception(reraise=False): LOG.error('Interface attached successfully but building ' 'and/or saving device metadata failed.', instance=instance, exc_info=True) self.detach_interface(context, instance, vif) raise exception.InterfaceAttachFailed( instance_uuid=instance.uuid) def detach_interface(self, context, instance, vif): guest = self._host.get_guest(instance) cfg = self.vif_driver.get_config(instance, vif, instance.image_meta, instance.flavor, CONF.libvirt.virt_type, self._host) interface = guest.get_interface_by_cfg(cfg) try: self.vif_driver.unplug(instance, vif) # NOTE(mriedem): When deleting an instance and using Neutron, # we can be racing against Neutron deleting the port and # sending the vif-deleted event which then triggers a call to # detach the interface, so if the interface is not found then # we can just log it as a warning. if not interface: mac = vif.get('address') # The interface is gone so just log it as a warning. LOG.warning('Detaching interface %(mac)s failed because ' 'the device is no longer found on the guest.', {'mac': mac}, instance=instance) return state = guest.get_power_state(self._host) live = state in (power_state.RUNNING, power_state.PAUSED) # Now we are going to loop until the interface is detached or we # timeout. wait_for_detach = guest.detach_device_with_retry( guest.get_interface_by_cfg, cfg, live=live, alternative_device_name=self.vif_driver.get_vif_devname(vif)) wait_for_detach() except exception.DeviceDetachFailed: # We failed to detach the device even with the retry loop, so let's # dump some debug information to the logs before raising back up. with excutils.save_and_reraise_exception(): devname = self.vif_driver.get_vif_devname(vif) interface = guest.get_interface_by_cfg(cfg) if interface: LOG.warning( 'Failed to detach interface %(devname)s after ' 'repeated attempts. Final interface xml:\n' '%(interface_xml)s\nFinal guest xml:\n%(guest_xml)s', {'devname': devname, 'interface_xml': interface.to_xml(), 'guest_xml': guest.get_xml_desc()}, instance=instance) except exception.DeviceNotFound: # The interface is gone so just log it as a warning. LOG.warning('Detaching interface %(mac)s failed because ' 'the device is no longer found on the guest.', {'mac': vif.get('address')}, instance=instance) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: LOG.warning("During detach_interface, instance disappeared.", instance=instance) else: # NOTE(mriedem): When deleting an instance and using Neutron, # we can be racing against Neutron deleting the port and # sending the vif-deleted event which then triggers a call to # detach the interface, so we might have failed because the # network device no longer exists. Libvirt will fail with # "operation failed: no matching network device was found" # which unfortunately does not have a unique error code so we # need to look up the interface by config and if it's not found # then we can just log it as a warning rather than tracing an # error. mac = vif.get('address') interface = guest.get_interface_by_cfg(cfg) if interface: LOG.error('detaching network adapter failed.', instance=instance, exc_info=True) raise exception.InterfaceDetachFailed( instance_uuid=instance.uuid) # The interface is gone so just log it as a warning. LOG.warning('Detaching interface %(mac)s failed because ' 'the device is no longer found on the guest.', {'mac': mac}, instance=instance) def _create_snapshot_metadata(self, image_meta, instance, img_fmt, snp_name): metadata = {'is_public': False, 'status': 'active', 'name': snp_name, 'properties': { 'kernel_id': instance.kernel_id, 'image_location': 'snapshot', 'image_state': 'available', 'owner_id': instance.project_id, 'ramdisk_id': instance.ramdisk_id, } } if instance.os_type: metadata['properties']['os_type'] = instance.os_type # NOTE(vish): glance forces ami disk format to be ami if image_meta.disk_format == 'ami': metadata['disk_format'] = 'ami' else: metadata['disk_format'] = img_fmt if image_meta.obj_attr_is_set("container_format"): metadata['container_format'] = image_meta.container_format else: metadata['container_format'] = "bare" return metadata def snapshot(self, context, instance, image_id, update_task_state): """Create snapshot from a running VM instance. This command only works with qemu 0.14+ """ try: guest = self._host.get_guest(instance) # TODO(sahid): We are converting all calls from a # virDomain object to use nova.virt.libvirt.Guest. # We should be able to remove virt_dom at the end. virt_dom = guest._domain except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance.uuid) snapshot = self._image_api.get(context, image_id) # source_format is an on-disk format # source_type is a backend type disk_path, source_format = libvirt_utils.find_disk(guest) source_type = libvirt_utils.get_disk_type_from_path(disk_path) # We won't have source_type for raw or qcow2 disks, because we can't # determine that from the path. We should have it from the libvirt # xml, though. if source_type is None: source_type = source_format # For lxc instances we won't have it either from libvirt xml # (because we just gave libvirt the mounted filesystem), or the path, # so source_type is still going to be None. In this case, # root_disk is going to default to CONF.libvirt.images_type # below, which is still safe. image_format = CONF.libvirt.snapshot_image_format or source_type # NOTE(bfilippov): save lvm and rbd as raw if image_format == 'lvm' or image_format == 'rbd': image_format = 'raw' metadata = self._create_snapshot_metadata(instance.image_meta, instance, image_format, snapshot['name']) snapshot_name = uuidutils.generate_uuid(dashed=False) state = guest.get_power_state(self._host) # NOTE(dgenin): Instances with LVM encrypted ephemeral storage require # cold snapshots. Currently, checking for encryption is # redundant because LVM supports only cold snapshots. # It is necessary in case this situation changes in the # future. if (self._host.has_min_version(hv_type=host.HV_DRIVER_QEMU) and source_type not in ('lvm') and not CONF.ephemeral_storage_encryption.enabled and not CONF.workarounds.disable_libvirt_livesnapshot # NOTE(rmk): We cannot perform live snapshots when a # managedSave file is present, so we will use the cold/legacy # method for instances which are shutdown or paused. # NOTE(mriedem): Live snapshot doesn't work with paused # instances on older versions of libvirt/qemu. We can likely # remove the restriction on PAUSED once we require # libvirt>=3.6.0 and qemu>=2.10 since that works with the # Pike Ubuntu Cloud Archive testing in Queens. and state not in (power_state.SHUTDOWN, power_state.PAUSED)): live_snapshot = True # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. This operation also # confirms the running instance, as opposed to the system as a # whole, has a new enough version of the hypervisor (bug 1193146). try: guest.get_block_device(disk_path).abort_job() except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED: live_snapshot = False else: pass else: live_snapshot = False self._prepare_domain_for_snapshot(context, live_snapshot, state, instance) root_disk = self.image_backend.by_libvirt_path( instance, disk_path, image_type=source_type) if live_snapshot: LOG.info("Beginning live snapshot process", instance=instance) else: LOG.info("Beginning cold snapshot process", instance=instance) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) try: update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) metadata['location'] = root_disk.direct_snapshot( context, snapshot_name, image_format, image_id, instance.image_ref) self._snapshot_domain(context, live_snapshot, virt_dom, state, instance) self._image_api.update(context, image_id, metadata, purge_props=False) except (NotImplementedError, exception.ImageUnacceptable, exception.Forbidden) as e: if type(e) != NotImplementedError: LOG.warning('Performing standard snapshot because direct ' 'snapshot failed: %(error)s', {'error': encodeutils.exception_to_unicode(e)}) failed_snap = metadata.pop('location', None) if failed_snap: failed_snap = {'url': str(failed_snap)} root_disk.cleanup_direct_snapshot(failed_snap, also_destroy_volume=True, ignore_errors=True) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD, expected_state=task_states.IMAGE_UPLOADING) # TODO(nic): possibly abstract this out to the root_disk if source_type == 'rbd' and live_snapshot: # Standard snapshot uses qemu-img convert from RBD which is # not safe to run with live_snapshot. live_snapshot = False # Suspend the guest, so this is no longer a live snapshot self._prepare_domain_for_snapshot(context, live_snapshot, state, instance) snapshot_directory = CONF.libvirt.snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, snapshot_name) if live_snapshot: # NOTE(xqueralt): libvirt needs o+x in the tempdir os.chmod(tmpdir, 0o701) self._live_snapshot(context, instance, guest, disk_path, out_path, source_format, image_format, instance.image_meta) else: root_disk.snapshot_extract(out_path, image_format) LOG.info("Snapshot extracted, beginning image upload", instance=instance) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: LOG.info('Instance %(instance_name)s disappeared ' 'while taking snapshot of it: [Error Code ' '%(error_code)s] %(ex)s', {'instance_name': instance.name, 'error_code': error_code, 'ex': ex}, instance=instance) raise exception.InstanceNotFound( instance_id=instance.uuid) else: raise finally: self._snapshot_domain(context, live_snapshot, virt_dom, state, instance) # Upload that image to the image service update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) with libvirt_utils.file_open(out_path, 'rb') as image_file: self._image_api.update(context, image_id, metadata, image_file) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_("Failed to snapshot image")) failed_snap = metadata.pop('location', None) if failed_snap: failed_snap = {'url': str(failed_snap)} root_disk.cleanup_direct_snapshot( failed_snap, also_destroy_volume=True, ignore_errors=True) LOG.info("Snapshot image upload complete", instance=instance) def _prepare_domain_for_snapshot(self, context, live_snapshot, state, instance): # NOTE(dkang): managedSave does not work for LXC if CONF.libvirt.virt_type != 'lxc' and not live_snapshot: if state == power_state.RUNNING or state == power_state.PAUSED: self.suspend(context, instance) def _snapshot_domain(self, context, live_snapshot, virt_dom, state, instance): guest = None # NOTE(dkang): because previous managedSave is not called # for LXC, _create_domain must not be called. if CONF.libvirt.virt_type != 'lxc' and not live_snapshot: if state == power_state.RUNNING: guest = self._create_domain(domain=virt_dom) elif state == power_state.PAUSED: guest = self._create_domain(domain=virt_dom, pause=True) if guest is not None: self._attach_pci_devices( guest, pci_manager.get_instance_pci_devs(instance)) self._attach_direct_passthrough_ports( context, instance, guest) def _can_set_admin_password(self, image_meta): if CONF.libvirt.virt_type == 'parallels': if not self._host.has_min_version( MIN_LIBVIRT_PARALLELS_SET_ADMIN_PASSWD): raise exception.SetAdminPasswdNotSupported() elif CONF.libvirt.virt_type in ('kvm', 'qemu'): if not self._host.has_min_version( MIN_LIBVIRT_SET_ADMIN_PASSWD): raise exception.SetAdminPasswdNotSupported() if not image_meta.properties.get('hw_qemu_guest_agent', False): raise exception.QemuGuestAgentNotEnabled() else: raise exception.SetAdminPasswdNotSupported() # TODO(melwitt): Combine this with the similar xenapi code at some point. def _save_instance_password_if_sshkey_present(self, instance, new_pass): sshkey = instance.key_data if 'key_data' in instance else None if sshkey and sshkey.startswith("ssh-rsa"): enc = crypto.ssh_encrypt_text(sshkey, new_pass) # NOTE(melwitt): The convert_password method doesn't actually do # anything with the context argument, so we can pass None. instance.system_metadata.update( password.convert_password(None, base64.encode_as_text(enc))) instance.save() def set_admin_password(self, instance, new_pass): self._can_set_admin_password(instance.image_meta) guest = self._host.get_guest(instance) user = instance.image_meta.properties.get("os_admin_user") if not user: if instance.os_type == "windows": user = "Administrator" else: user = "root" try: guest.set_user_password(user, new_pass) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE: LOG.debug('Failed to set password: QEMU agent unresponsive', instance_uuid=instance.uuid) raise NotImplementedError() err_msg = encodeutils.exception_to_unicode(ex) msg = (_('Error from libvirt while set password for username ' '"%(user)s": [Error Code %(error_code)s] %(ex)s') % {'user': user, 'error_code': error_code, 'ex': err_msg}) raise exception.InternalError(msg) else: # Save the password in sysmeta so it may be retrieved from the # metadata service. self._save_instance_password_if_sshkey_present(instance, new_pass) def _can_quiesce(self, instance, image_meta): if CONF.libvirt.virt_type not in ('kvm', 'qemu'): raise exception.InstanceQuiesceNotSupported( instance_id=instance.uuid) if not image_meta.properties.get('hw_qemu_guest_agent', False): raise exception.QemuGuestAgentNotEnabled() def _requires_quiesce(self, image_meta): return image_meta.properties.get('os_require_quiesce', False) def _set_quiesced(self, context, instance, image_meta, quiesced): self._can_quiesce(instance, image_meta) try: guest = self._host.get_guest(instance) if quiesced: guest.freeze_filesystems() else: guest.thaw_filesystems() except libvirt.libvirtError as ex: error_code = ex.get_error_code() err_msg = encodeutils.exception_to_unicode(ex) msg = (_('Error from libvirt while quiescing %(instance_name)s: ' '[Error Code %(error_code)s] %(ex)s') % {'instance_name': instance.name, 'error_code': error_code, 'ex': err_msg}) raise exception.InternalError(msg) def quiesce(self, context, instance, image_meta): """Freeze the guest filesystems to prepare for snapshot. The qemu-guest-agent must be setup to execute fsfreeze. """ self._set_quiesced(context, instance, image_meta, True) def unquiesce(self, context, instance, image_meta): """Thaw the guest filesystems after snapshot.""" self._set_quiesced(context, instance, image_meta, False) def _live_snapshot(self, context, instance, guest, disk_path, out_path, source_format, image_format, image_meta): """Snapshot an instance without downtime.""" dev = guest.get_block_device(disk_path) # Save a copy of the domain's persistent XML file xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True) # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. try: dev.abort_job() except Exception: pass # NOTE (rmk): We are using shallow rebases as a workaround to a bug # in QEMU 1.3. In order to do this, we need to create # a destination image with the original backing file # and matching size of the instance root disk. src_disk_size = libvirt_utils.get_disk_size(disk_path, format=source_format) src_back_path = libvirt_utils.get_disk_backing_file(disk_path, format=source_format, basename=False) disk_delta = out_path + '.delta' libvirt_utils.create_cow_image(src_back_path, disk_delta, src_disk_size) quiesced = False try: self._set_quiesced(context, instance, image_meta, True) quiesced = True except exception.NovaException as err: if self._requires_quiesce(image_meta): raise LOG.info('Skipping quiescing instance: %(reason)s.', {'reason': err}, instance=instance) try: # NOTE (rmk): blockRebase cannot be executed on persistent # domains, so we need to temporarily undefine it. # If any part of this block fails, the domain is # re-defined regardless. if guest.has_persistent_configuration(): support_uefi = self._has_uefi_support() guest.delete_configuration(support_uefi) # NOTE (rmk): Establish a temporary mirror of our root disk and # issue an abort once we have a complete copy. dev.rebase(disk_delta, copy=True, reuse_ext=True, shallow=True) while not dev.is_job_complete(): time.sleep(0.5) dev.abort_job() nova.privsep.path.chown(disk_delta, uid=os.getuid()) finally: self._host.write_instance_config(xml) if quiesced: self._set_quiesced(context, instance, image_meta, False) # Convert the delta (CoW) image with a backing file to a flat # image with no backing file. libvirt_utils.extract_snapshot(disk_delta, 'qcow2', out_path, image_format) def _volume_snapshot_update_status(self, context, snapshot_id, status): """Send a snapshot status update to Cinder. This method captures and logs exceptions that occur since callers cannot do anything useful with these exceptions. Operations on the Cinder side waiting for this will time out if a failure occurs sending the update. :param context: security context :param snapshot_id: id of snapshot being updated :param status: new status value """ try: self._volume_api.update_snapshot_status(context, snapshot_id, status) except Exception: LOG.exception(_('Failed to send updated snapshot status ' 'to volume service.')) def _volume_snapshot_create(self, context, instance, guest, volume_id, new_file): """Perform volume snapshot. :param guest: VM that volume is attached to :param volume_id: volume UUID to snapshot :param new_file: relative path to new qcow2 file present on share """ xml = guest.get_xml_desc() xml_doc = etree.fromstring(xml) device_info = vconfig.LibvirtConfigGuest() device_info.parse_dom(xml_doc) disks_to_snap = [] # to be snapshotted by libvirt network_disks_to_snap = [] # network disks (netfs, etc.) disks_to_skip = [] # local disks not snapshotted for guest_disk in device_info.devices: if (guest_disk.root_name != 'disk'): continue if (guest_disk.target_dev is None): continue if (guest_disk.serial is None or guest_disk.serial != volume_id): disks_to_skip.append(guest_disk.target_dev) continue # disk is a Cinder volume with the correct volume_id disk_info = { 'dev': guest_disk.target_dev, 'serial': guest_disk.serial, 'current_file': guest_disk.source_path, 'source_protocol': guest_disk.source_protocol, 'source_name': guest_disk.source_name, 'source_hosts': guest_disk.source_hosts, 'source_ports': guest_disk.source_ports } # Determine path for new_file based on current path if disk_info['current_file'] is not None: current_file = disk_info['current_file'] new_file_path = os.path.join(os.path.dirname(current_file), new_file) disks_to_snap.append((current_file, new_file_path)) # NOTE(mriedem): This used to include a check for gluster in # addition to netfs since they were added together. Support for # gluster was removed in the 16.0.0 Pike release. It is unclear, # however, if other volume drivers rely on the netfs disk source # protocol. elif disk_info['source_protocol'] == 'netfs': network_disks_to_snap.append((disk_info, new_file)) if not disks_to_snap and not network_disks_to_snap: msg = _('Found no disk to snapshot.') raise exception.InternalError(msg) snapshot = vconfig.LibvirtConfigGuestSnapshot() for current_name, new_filename in disks_to_snap: snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk() snap_disk.name = current_name snap_disk.source_path = new_filename snap_disk.source_type = 'file' snap_disk.snapshot = 'external' snap_disk.driver_name = 'qcow2' snapshot.add_disk(snap_disk) for disk_info, new_filename in network_disks_to_snap: snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk() snap_disk.name = disk_info['dev'] snap_disk.source_type = 'network' snap_disk.source_protocol = disk_info['source_protocol'] snap_disk.snapshot = 'external' snap_disk.source_path = new_filename old_dir = disk_info['source_name'].split('/')[0] snap_disk.source_name = '%s/%s' % (old_dir, new_filename) snap_disk.source_hosts = disk_info['source_hosts'] snap_disk.source_ports = disk_info['source_ports'] snapshot.add_disk(snap_disk) for dev in disks_to_skip: snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk() snap_disk.name = dev snap_disk.snapshot = 'no' snapshot.add_disk(snap_disk) snapshot_xml = snapshot.to_xml() LOG.debug("snap xml: %s", snapshot_xml, instance=instance) image_meta = instance.image_meta try: # Check to see if we can quiesce the guest before taking the # snapshot. self._can_quiesce(instance, image_meta) try: guest.snapshot(snapshot, no_metadata=True, disk_only=True, reuse_ext=True, quiesce=True) return except libvirt.libvirtError: # If the image says that quiesce is required then we fail. if self._requires_quiesce(image_meta): raise LOG.exception(_('Unable to create quiesced VM snapshot, ' 'attempting again with quiescing disabled.'), instance=instance) except (exception.InstanceQuiesceNotSupported, exception.QemuGuestAgentNotEnabled) as err: # If the image says that quiesce is required then we need to fail. if self._requires_quiesce(image_meta): raise LOG.info('Skipping quiescing instance: %(reason)s.', {'reason': err}, instance=instance) try: guest.snapshot(snapshot, no_metadata=True, disk_only=True, reuse_ext=True, quiesce=False) except libvirt.libvirtError: LOG.exception(_('Unable to create VM snapshot, ' 'failing volume_snapshot operation.'), instance=instance) raise def _volume_refresh_connection_info(self, context, instance, volume_id): bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, volume_id, instance.uuid) driver_bdm = driver_block_device.convert_volume(bdm) if driver_bdm: driver_bdm.refresh_connection_info(context, instance, self._volume_api, self) def volume_snapshot_create(self, context, instance, volume_id, create_info): """Create snapshots of a Cinder volume via libvirt. :param instance: VM instance object reference :param volume_id: id of volume being snapshotted :param create_info: dict of information used to create snapshots - snapshot_id : ID of snapshot - type : qcow2 / <other> - new_file : qcow2 file created by Cinder which becomes the VM's active image after the snapshot is complete """ LOG.debug("volume_snapshot_create: create_info: %(c_info)s", {'c_info': create_info}, instance=instance) try: guest = self._host.get_guest(instance) except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance.uuid) if create_info['type'] != 'qcow2': msg = _('Unknown type: %s') % create_info['type'] raise exception.InternalError(msg) snapshot_id = create_info.get('snapshot_id', None) if snapshot_id is None: msg = _('snapshot_id required in create_info') raise exception.InternalError(msg) try: self._volume_snapshot_create(context, instance, guest, volume_id, create_info['new_file']) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_('Error occurred during ' 'volume_snapshot_create, ' 'sending error status to Cinder.'), instance=instance) self._volume_snapshot_update_status( context, snapshot_id, 'error') self._volume_snapshot_update_status( context, snapshot_id, 'creating') def _wait_for_snapshot(): snapshot = self._volume_api.get_snapshot(context, snapshot_id) if snapshot.get('status') != 'creating': self._volume_refresh_connection_info(context, instance, volume_id) raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot) timer.start(interval=0.5).wait() @staticmethod def _rebase_with_qemu_img(guest, device, active_disk_object, rebase_base): """Rebase a device tied to a guest using qemu-img. :param guest:the Guest which owns the device being rebased :type guest: nova.virt.libvirt.guest.Guest :param device: the guest block device to rebase :type device: nova.virt.libvirt.guest.BlockDevice :param active_disk_object: the guest block device to rebase :type active_disk_object: nova.virt.libvirt.config.\ LibvirtConfigGuestDisk :param rebase_base: the new parent in the backing chain :type rebase_base: None or string """ # It's unsure how well qemu-img handles network disks for # every protocol. So let's be safe. active_protocol = active_disk_object.source_protocol if active_protocol is not None: msg = _("Something went wrong when deleting a volume snapshot: " "rebasing a %(protocol)s network disk using qemu-img " "has not been fully tested") % {'protocol': active_protocol} LOG.error(msg) raise exception.InternalError(msg) if rebase_base is None: # If backing_file is specified as "" (the empty string), then # the image is rebased onto no backing file (i.e. it will exist # independently of any backing file). backing_file = "" qemu_img_extra_arg = [] else: # If the rebased image is going to have a backing file then # explicitly set the backing file format to avoid any security # concerns related to file format auto detection. backing_file = rebase_base b_file_fmt = images.qemu_img_info(backing_file).file_format qemu_img_extra_arg = ['-F', b_file_fmt] qemu_img_extra_arg.append(active_disk_object.source_path) utils.execute("qemu-img", "rebase", "-b", backing_file, *qemu_img_extra_arg) def _volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info=None): """Note: if file being merged into == active image: do a blockRebase (pull) operation else: do a blockCommit operation Files must be adjacent in snap chain. :param instance: instance object reference :param volume_id: volume UUID :param snapshot_id: snapshot UUID (unused currently) :param delete_info: { 'type': 'qcow2', 'file_to_merge': 'a.img', 'merge_target_file': 'b.img' or None (if merging file_to_merge into active image) } """ LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info, instance=instance) if delete_info['type'] != 'qcow2': msg = _('Unknown delete_info type %s') % delete_info['type'] raise exception.InternalError(msg) try: guest = self._host.get_guest(instance) except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance.uuid) # Find dev name my_dev = None active_disk = None xml = guest.get_xml_desc() xml_doc = etree.fromstring(xml) device_info = vconfig.LibvirtConfigGuest() device_info.parse_dom(xml_doc) active_disk_object = None for guest_disk in device_info.devices: if (guest_disk.root_name != 'disk'): continue if (guest_disk.target_dev is None or guest_disk.serial is None): continue if guest_disk.serial == volume_id: my_dev = guest_disk.target_dev active_disk = guest_disk.source_path active_protocol = guest_disk.source_protocol active_disk_object = guest_disk break if my_dev is None or (active_disk is None and active_protocol is None): LOG.debug('Domain XML: %s', xml, instance=instance) msg = (_('Disk with id: %s not found attached to instance.') % volume_id) raise exception.InternalError(msg) LOG.debug("found device at %s", my_dev, instance=instance) def _get_snap_dev(filename, backing_store): if filename is None: msg = _('filename cannot be None') raise exception.InternalError(msg) # libgfapi delete LOG.debug("XML: %s", xml) LOG.debug("active disk object: %s", active_disk_object) # determine reference within backing store for desired image filename_to_merge = filename matched_name = None b = backing_store index = None current_filename = active_disk_object.source_name.split('/')[1] if current_filename == filename_to_merge: return my_dev + '[0]' while b is not None: source_filename = b.source_name.split('/')[1] if source_filename == filename_to_merge: LOG.debug('found match: %s', b.source_name) matched_name = b.source_name index = b.index break b = b.backing_store if matched_name is None: msg = _('no match found for %s') % (filename_to_merge) raise exception.InternalError(msg) LOG.debug('index of match (%s) is %s', b.source_name, index) my_snap_dev = '%s[%s]' % (my_dev, index) return my_snap_dev if delete_info['merge_target_file'] is None: # pull via blockRebase() # Merge the most recent snapshot into the active image rebase_disk = my_dev rebase_base = delete_info['file_to_merge'] # often None if (active_protocol is not None) and (rebase_base is not None): rebase_base = _get_snap_dev(rebase_base, active_disk_object.backing_store) # NOTE(deepakcs): libvirt added support for _RELATIVE in v1.2.7, # and when available this flag _must_ be used to ensure backing # paths are maintained relative by qemu. # # If _RELATIVE flag not found, continue with old behaviour # (relative backing path seems to work for this case) try: libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE relative = rebase_base is not None except AttributeError: LOG.warning( "Relative blockrebase support was not detected. " "Continuing with old behaviour.") relative = False LOG.debug( 'disk: %(disk)s, base: %(base)s, ' 'bw: %(bw)s, relative: %(relative)s', {'disk': rebase_disk, 'base': rebase_base, 'bw': libvirt_guest.BlockDevice.REBASE_DEFAULT_BANDWIDTH, 'relative': str(relative)}, instance=instance) dev = guest.get_block_device(rebase_disk) if guest.is_active(): result = dev.rebase(rebase_base, relative=relative) if result == 0: LOG.debug('blockRebase started successfully', instance=instance) while not dev.is_job_complete(): LOG.debug('waiting for blockRebase job completion', instance=instance) time.sleep(0.5) # If the guest is not running libvirt won't do a blockRebase. # In that case, let's ask qemu-img to rebase the disk. else: LOG.debug('Guest is not running so doing a block rebase ' 'using "qemu-img rebase"', instance=instance) self._rebase_with_qemu_img(guest, dev, active_disk_object, rebase_base) else: # commit with blockCommit() my_snap_base = None my_snap_top = None commit_disk = my_dev if active_protocol is not None: my_snap_base = _get_snap_dev(delete_info['merge_target_file'], active_disk_object.backing_store) my_snap_top = _get_snap_dev(delete_info['file_to_merge'], active_disk_object.backing_store) commit_base = my_snap_base or delete_info['merge_target_file'] commit_top = my_snap_top or delete_info['file_to_merge'] LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s ' 'commit_base=%(commit_base)s ' 'commit_top=%(commit_top)s ', {'commit_disk': commit_disk, 'commit_base': commit_base, 'commit_top': commit_top}, instance=instance) dev = guest.get_block_device(commit_disk) result = dev.commit(commit_base, commit_top, relative=True) if result == 0: LOG.debug('blockCommit started successfully', instance=instance) while not dev.is_job_complete(): LOG.debug('waiting for blockCommit job completion', instance=instance) time.sleep(0.5) def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info): try: self._volume_snapshot_delete(context, instance, volume_id, snapshot_id, delete_info=delete_info) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_('Error occurred during ' 'volume_snapshot_delete, ' 'sending error status to Cinder.'), instance=instance) self._volume_snapshot_update_status( context, snapshot_id, 'error_deleting') self._volume_snapshot_update_status(context, snapshot_id, 'deleting') self._volume_refresh_connection_info(context, instance, volume_id) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): """Reboot a virtual machine, given an instance reference.""" if reboot_type == 'SOFT': # NOTE(vish): This will attempt to do a graceful shutdown/restart. try: soft_reboot_success = self._soft_reboot(instance) except libvirt.libvirtError as e: LOG.debug("Instance soft reboot failed: %s", encodeutils.exception_to_unicode(e), instance=instance) soft_reboot_success = False if soft_reboot_success: LOG.info("Instance soft rebooted successfully.", instance=instance) return else: LOG.warning("Failed to soft reboot instance. " "Trying hard reboot.", instance=instance) return self._hard_reboot(context, instance, network_info, block_device_info) def _soft_reboot(self, instance): """Attempt to shutdown and restart the instance gracefully. We use shutdown and create here so we can return if the guest responded and actually rebooted. Note that this method only succeeds if the guest responds to acpi. Therefore we return success or failure so we can fall back to a hard reboot if necessary. :returns: True if the reboot succeeded """ guest = self._host.get_guest(instance) state = guest.get_power_state(self._host) old_domid = guest.id # NOTE(vish): This check allows us to reboot an instance that # is already shutdown. if state == power_state.RUNNING: guest.shutdown() # NOTE(vish): This actually could take slightly longer than the # FLAG defines depending on how long the get_info # call takes to return. self._prepare_pci_devices_for_use( pci_manager.get_instance_pci_devs(instance, 'all')) for x in range(CONF.libvirt.wait_soft_reboot_seconds): guest = self._host.get_guest(instance) state = guest.get_power_state(self._host) new_domid = guest.id # NOTE(ivoks): By checking domain IDs, we make sure we are # not recreating domain that's already running. if old_domid != new_domid: if state in [power_state.SHUTDOWN, power_state.CRASHED]: LOG.info("Instance shutdown successfully.", instance=instance) self._create_domain(domain=guest._domain) timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_running, instance) timer.start(interval=0.5).wait() return True else: LOG.info("Instance may have been rebooted during soft " "reboot, so return now.", instance=instance) return True greenthread.sleep(1) return False def _hard_reboot(self, context, instance, network_info, block_device_info=None): """Reboot a virtual machine, given an instance reference. Performs a Libvirt reset (if supported) on the domain. If Libvirt reset is unavailable this method actually destroys and re-creates the domain to ensure the reboot happens, as the guest OS cannot ignore this action. """ # NOTE(sbauza): Since we undefine the guest XML when destroying, we # need to remember the existing mdevs for reusing them. mdevs = self._get_all_assigned_mediated_devices(instance) mdevs = list(mdevs.keys()) # NOTE(mdbooth): In addition to performing a hard reboot of the domain, # the hard reboot operation is relied upon by operators to be an # automated attempt to fix as many things as possible about a # non-functioning instance before resorting to manual intervention. # With this goal in mind, we tear down all the aspects of an instance # we can here without losing data. This allows us to re-initialise from # scratch, and hopefully fix, most aspects of a non-functioning guest. self.destroy(context, instance, network_info, destroy_disks=False, block_device_info=block_device_info) # Convert the system metadata to image metadata # NOTE(mdbooth): This is a workaround for stateless Nova compute # https://bugs.launchpad.net/nova/+bug/1349978 instance_dir = libvirt_utils.get_instance_path(instance) fileutils.ensure_tree(instance_dir) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, instance.image_meta, block_device_info) # NOTE(vish): This could generate the wrong device_format if we are # using the raw backend and the images don't exist yet. # The create_images_and_backing below doesn't properly # regenerate raw backend images, however, so when it # does we need to (re)generate the xml after the images # are in place. xml = self._get_guest_xml(context, instance, network_info, disk_info, instance.image_meta, block_device_info=block_device_info, mdevs=mdevs) # NOTE(mdbooth): context.auth_token will not be set when we call # _hard_reboot from resume_state_on_host_boot() if context.auth_token is not None: # NOTE (rmk): Re-populate any missing backing files. config = vconfig.LibvirtConfigGuest() config.parse_str(xml) backing_disk_info = self._get_instance_disk_info_from_config( config, block_device_info) self._create_images_and_backing(context, instance, instance_dir, backing_disk_info) # Initialize all the necessary networking, block devices and # start the instance. # NOTE(melwitt): Pass vifs_already_plugged=True here even though we've # unplugged vifs earlier. The behavior of neutron plug events depends # on which vif type we're using and we are working with a stale network # info cache here, so won't rely on waiting for neutron plug events. # vifs_already_plugged=True means "do not wait for neutron plug events" self._create_domain_and_network(context, xml, instance, network_info, block_device_info=block_device_info, vifs_already_plugged=True) self._prepare_pci_devices_for_use( pci_manager.get_instance_pci_devs(instance, 'all')) def _wait_for_reboot(): """Called at an interval until the VM is running again.""" state = self.get_info(instance).state if state == power_state.RUNNING: LOG.info("Instance rebooted successfully.", instance=instance) raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot) timer.start(interval=0.5).wait() def pause(self, instance): """Pause VM instance.""" self._host.get_guest(instance).pause() def unpause(self, instance): """Unpause paused VM instance.""" guest = self._host.get_guest(instance) guest.resume() guest.sync_guest_time() def _clean_shutdown(self, instance, timeout, retry_interval): """Attempt to shutdown the instance gracefully. :param instance: The instance to be shutdown :param timeout: How long to wait in seconds for the instance to shutdown :param retry_interval: How often in seconds to signal the instance to shutdown while waiting :returns: True if the shutdown succeeded """ # List of states that represent a shutdown instance SHUTDOWN_STATES = [power_state.SHUTDOWN, power_state.CRASHED] try: guest = self._host.get_guest(instance) except exception.InstanceNotFound: # If the instance has gone then we don't need to # wait for it to shutdown return True state = guest.get_power_state(self._host) if state in SHUTDOWN_STATES: LOG.info("Instance already shutdown.", instance=instance) return True LOG.debug("Shutting down instance from state %s", state, instance=instance) guest.shutdown() retry_countdown = retry_interval for sec in range(timeout): guest = self._host.get_guest(instance) state = guest.get_power_state(self._host) if state in SHUTDOWN_STATES: LOG.info("Instance shutdown successfully after %d seconds.", sec, instance=instance) return True # Note(PhilD): We can't assume that the Guest was able to process # any previous shutdown signal (for example it may # have still been startingup, so within the overall # timeout we re-trigger the shutdown every # retry_interval if retry_countdown == 0: retry_countdown = retry_interval # Instance could shutdown at any time, in which case we # will get an exception when we call shutdown try: LOG.debug("Instance in state %s after %d seconds - " "resending shutdown", state, sec, instance=instance) guest.shutdown() except libvirt.libvirtError: # Assume this is because its now shutdown, so loop # one more time to clean up. LOG.debug("Ignoring libvirt exception from shutdown " "request.", instance=instance) continue else: retry_countdown -= 1 time.sleep(1) LOG.info("Instance failed to shutdown in %d seconds.", timeout, instance=instance) return False def power_off(self, instance, timeout=0, retry_interval=0): """Power off the specified instance.""" if timeout: self._clean_shutdown(instance, timeout, retry_interval) self._destroy(instance) def power_on(self, context, instance, network_info, block_device_info=None): """Power on the specified instance.""" # We use _hard_reboot here to ensure that all backing files, # network, and block device connections, etc. are established # and available before we attempt to start the instance. self._hard_reboot(context, instance, network_info, block_device_info) def trigger_crash_dump(self, instance): """Trigger crash dump by injecting an NMI to the specified instance.""" try: self._host.get_guest(instance).inject_nmi() except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_SUPPORT: raise exception.TriggerCrashDumpNotSupported() elif error_code == libvirt.VIR_ERR_OPERATION_INVALID: raise exception.InstanceNotRunning(instance_id=instance.uuid) LOG.exception(_('Error from libvirt while injecting an NMI to ' '%(instance_uuid)s: ' '[Error Code %(error_code)s] %(ex)s'), {'instance_uuid': instance.uuid, 'error_code': error_code, 'ex': ex}) raise def suspend(self, context, instance): """Suspend the specified instance.""" guest = self._host.get_guest(instance) self._detach_pci_devices(guest, pci_manager.get_instance_pci_devs(instance)) self._detach_direct_passthrough_ports(context, instance, guest) self._detach_mediated_devices(guest) guest.save_memory_state() def resume(self, context, instance, network_info, block_device_info=None): """resume the specified instance.""" xml = self._get_existing_domain_xml(instance, network_info, block_device_info) guest = self._create_domain_and_network(context, xml, instance, network_info, block_device_info=block_device_info, vifs_already_plugged=True) self._attach_pci_devices(guest, pci_manager.get_instance_pci_devs(instance)) self._attach_direct_passthrough_ports( context, instance, guest, network_info) timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_running, instance) timer.start(interval=0.5).wait() guest.sync_guest_time() def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """resume guest state when a host is booted.""" # Check if the instance is running already and avoid doing # anything if it is. try: guest = self._host.get_guest(instance) state = guest.get_power_state(self._host) ignored_states = (power_state.RUNNING, power_state.SUSPENDED, power_state.NOSTATE, power_state.PAUSED) if state in ignored_states: return except (exception.InternalError, exception.InstanceNotFound): pass # Instance is not up and could be in an unknown state. # Be as absolute as possible about getting it back into # a known and running state. self._hard_reboot(context, instance, network_info, block_device_info) def rescue(self, context, instance, network_info, image_meta, rescue_password): """Loads a VM using rescue images. A rescue is normally performed when something goes wrong with the primary images and data needs to be corrected/recovered. Rescuing should not edit or over-ride the original image, only allow for data recovery. """ instance_dir = libvirt_utils.get_instance_path(instance) unrescue_xml = self._get_existing_domain_xml(instance, network_info) unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml') libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml) rescue_image_id = None if image_meta.obj_attr_is_set("id"): rescue_image_id = image_meta.id rescue_images = { 'image_id': (rescue_image_id or CONF.libvirt.rescue_image_id or instance.image_ref), 'kernel_id': (CONF.libvirt.rescue_kernel_id or instance.kernel_id), 'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or instance.ramdisk_id), } disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, rescue=True) injection_info = InjectionInfo(network_info=network_info, admin_pass=rescue_password, files=None) gen_confdrive = functools.partial(self._create_configdrive, context, instance, injection_info, rescue=True) self._create_image(context, instance, disk_info['mapping'], injection_info=injection_info, suffix='.rescue', disk_images=rescue_images) xml = self._get_guest_xml(context, instance, network_info, disk_info, image_meta, rescue=rescue_images) self._destroy(instance) self._create_domain(xml, post_xml_callback=gen_confdrive) def unrescue(self, instance, network_info): """Reboot the VM which is being rescued back into primary images. """ instance_dir = libvirt_utils.get_instance_path(instance) unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml') xml = libvirt_utils.load_file(unrescue_xml_path) guest = self._host.get_guest(instance) # TODO(sahid): We are converting all calls from a # virDomain object to use nova.virt.libvirt.Guest. # We should be able to remove virt_dom at the end. virt_dom = guest._domain self._destroy(instance) self._create_domain(xml, virt_dom) os.unlink(unrescue_xml_path) rescue_files = os.path.join(instance_dir, "*.rescue") for rescue_file in glob.iglob(rescue_files): if os.path.isdir(rescue_file): shutil.rmtree(rescue_file) else: os.unlink(rescue_file) # cleanup rescue volume lvm.remove_volumes([lvmdisk for lvmdisk in self._lvm_disks(instance) if lvmdisk.endswith('.rescue')]) if CONF.libvirt.images_type == 'rbd': filter_fn = lambda disk: (disk.startswith(instance.uuid) and disk.endswith('.rescue')) LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn) def poll_rebooting_instances(self, timeout, instances): pass # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, block_device_info=None): disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, block_device_info) injection_info = InjectionInfo(network_info=network_info, files=injected_files, admin_pass=admin_password) gen_confdrive = functools.partial(self._create_configdrive, context, instance, injection_info) self._create_image(context, instance, disk_info['mapping'], injection_info=injection_info, block_device_info=block_device_info) # Required by Quobyte CI self._ensure_console_log_for_instance(instance) # Does the guest need to be assigned some vGPU mediated devices ? mdevs = self._allocate_mdevs(allocations) xml = self._get_guest_xml(context, instance, network_info, disk_info, image_meta, block_device_info=block_device_info, mdevs=mdevs) self._create_domain_and_network( context, xml, instance, network_info, block_device_info=block_device_info, post_xml_callback=gen_confdrive, destroy_disks_on_failure=True) LOG.debug("Instance is running", instance=instance) def _wait_for_boot(): """Called at an interval until the VM is running.""" state = self.get_info(instance).state if state == power_state.RUNNING: LOG.info("Instance spawned successfully.", instance=instance) raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) timer.start(interval=0.5).wait() def _get_console_output_file(self, instance, console_log): bytes_to_read = MAX_CONSOLE_BYTES log_data = b"" # The last N read bytes i = 0 # in case there is a log rotation (like "virtlogd") path = console_log while bytes_to_read > 0 and os.path.exists(path): read_log_data, remaining = nova.privsep.path.last_bytes( path, bytes_to_read) # We need the log file content in chronological order, # that's why we *prepend* the log data. log_data = read_log_data + log_data # Prep to read the next file in the chain bytes_to_read -= len(read_log_data) path = console_log + "." + str(i) i += 1 if remaining > 0: LOG.info('Truncated console log returned, ' '%d bytes ignored', remaining, instance=instance) return log_data def get_console_output(self, context, instance): guest = self._host.get_guest(instance) xml = guest.get_xml_desc() tree = etree.fromstring(xml) # If the guest has a console logging to a file prefer to use that file_consoles = tree.findall("./devices/console[@type='file']") if file_consoles: for file_console in file_consoles: source_node = file_console.find('./source') if source_node is None: continue path = source_node.get("path") if not path: continue if not os.path.exists(path): LOG.info('Instance is configured with a file console, ' 'but the backing file is not (yet?) present', instance=instance) return "" return self._get_console_output_file(instance, path) # Try 'pty' types pty_consoles = tree.findall("./devices/console[@type='pty']") if pty_consoles: for pty_console in pty_consoles: source_node = pty_console.find('./source') if source_node is None: continue pty = source_node.get("path") if not pty: continue break else: raise exception.ConsoleNotAvailable() else: raise exception.ConsoleNotAvailable() console_log = self._get_console_log_path(instance) data = nova.privsep.libvirt.readpty(pty) # NOTE(markus_z): The virt_types kvm and qemu are the only ones # which create a dedicated file device for the console logging. # Other virt_types like xen, lxc, uml, parallels depend on the # flush of that pty device into the "console.log" file to ensure # that a series of "get_console_output" calls return the complete # content even after rebooting a guest. nova.privsep.path.writefile(console_log, 'a+', data) return self._get_console_output_file(instance, console_log) def get_host_ip_addr(self): ips = compute_utils.get_machine_ips() if CONF.my_ip not in ips: LOG.warning('my_ip address (%(my_ip)s) was not found on ' 'any of the interfaces: %(ifaces)s', {'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)}) return CONF.my_ip def get_vnc_console(self, context, instance): def get_vnc_port_for_instance(instance_name): guest = self._host.get_guest(instance) xml = guest.get_xml_desc() xml_dom = etree.fromstring(xml) graphic = xml_dom.find("./devices/graphics[@type='vnc']") if graphic is not None: return graphic.get('port') # NOTE(rmk): We had VNC consoles enabled but the instance in # question is not actually listening for connections. raise exception.ConsoleTypeUnavailable(console_type='vnc') port = get_vnc_port_for_instance(instance.name) host = CONF.vnc.server_proxyclient_address return ctype.ConsoleVNC(host=host, port=port) def get_spice_console(self, context, instance): def get_spice_ports_for_instance(instance_name): guest = self._host.get_guest(instance) xml = guest.get_xml_desc() xml_dom = etree.fromstring(xml) graphic = xml_dom.find("./devices/graphics[@type='spice']") if graphic is not None: return (graphic.get('port'), graphic.get('tlsPort')) # NOTE(rmk): We had Spice consoles enabled but the instance in # question is not actually listening for connections. raise exception.ConsoleTypeUnavailable(console_type='spice') ports = get_spice_ports_for_instance(instance.name) host = CONF.spice.server_proxyclient_address return ctype.ConsoleSpice(host=host, port=ports[0], tlsPort=ports[1]) def get_serial_console(self, context, instance): guest = self._host.get_guest(instance) for hostname, port in self._get_serial_ports_from_guest( guest, mode='bind'): return ctype.ConsoleSerial(host=hostname, port=port) raise exception.ConsoleTypeUnavailable(console_type='serial') @staticmethod def _create_ephemeral(target, ephemeral_size, fs_label, os_type, is_block_dev=False, context=None, specified_fs=None, vm_mode=None): if not is_block_dev: if (CONF.libvirt.virt_type == "parallels" and vm_mode == fields.VMMode.EXE): libvirt_utils.create_ploop_image('expanded', target, '%dG' % ephemeral_size, specified_fs) return libvirt_utils.create_image('raw', target, '%dG' % ephemeral_size) # Run as root only for block devices. disk_api.mkfs(os_type, fs_label, target, run_as_root=is_block_dev, specified_fs=specified_fs) @staticmethod def _create_swap(target, swap_mb, context=None): """Create a swap file of specified size.""" libvirt_utils.create_image('raw', target, '%dM' % swap_mb) nova.privsep.fs.unprivileged_mkfs('swap', target) @staticmethod def _get_console_log_path(instance): return os.path.join(libvirt_utils.get_instance_path(instance), 'console.log') def _ensure_console_log_for_instance(self, instance): # NOTE(mdbooth): Although libvirt will create this file for us # automatically when it starts, it will initially create it with # root ownership and then chown it depending on the configuration of # the domain it is launching. Quobyte CI explicitly disables the # chown by setting dynamic_ownership=0 in libvirt's config. # Consequently when the domain starts it is unable to write to its # console.log. See bug https://bugs.launchpad.net/nova/+bug/1597644 # # To work around this, we create the file manually before starting # the domain so it has the same ownership as Nova. This works # for Quobyte CI because it is also configured to run qemu as the same # user as the Nova service. Installations which don't set # dynamic_ownership=0 are not affected because libvirt will always # correctly configure permissions regardless of initial ownership. # # Setting dynamic_ownership=0 is dubious and potentially broken in # more ways than console.log (see comment #22 on the above bug), so # Future Maintainer who finds this code problematic should check to see # if we still support it. console_file = self._get_console_log_path(instance) LOG.debug('Ensure instance console log exists: %s', console_file, instance=instance) try: libvirt_utils.file_open(console_file, 'a').close() # NOTE(sfinucan): We can safely ignore permission issues here and # assume that it is libvirt that has taken ownership of this file. except IOError as ex: if ex.errno != errno.EACCES: raise LOG.debug('Console file already exists: %s.', console_file) @staticmethod def _get_disk_config_image_type(): # TODO(mikal): there is a bug here if images_type has # changed since creation of the instance, but I am pretty # sure that this bug already exists. return 'rbd' if CONF.libvirt.images_type == 'rbd' else 'raw' @staticmethod def _is_booted_from_volume(block_device_info): """Determines whether the VM is booting from volume Determines whether the block device info indicates that the VM is booting from a volume. """ block_device_mapping = driver.block_device_info_get_mapping( block_device_info) return bool(block_device.get_root_bdm(block_device_mapping)) def _inject_data(self, disk, instance, injection_info): """Injects data in a disk image Helper used for injecting data in a disk image file system. :param disk: The disk we're injecting into (an Image object) :param instance: The instance we're injecting into :param injection_info: Injection info """ # Handles the partition need to be used. LOG.debug('Checking root disk injection %s', str(injection_info), instance=instance) target_partition = None if not instance.kernel_id: target_partition = CONF.libvirt.inject_partition if target_partition == 0: target_partition = None if CONF.libvirt.virt_type == 'lxc': target_partition = None # Handles the key injection. if CONF.libvirt.inject_key and instance.get('key_data'): key = str(instance.key_data) else: key = None # Handles the admin password injection. if not CONF.libvirt.inject_password: admin_pass = None else: admin_pass = injection_info.admin_pass # Handles the network injection. net = netutils.get_injected_network_template( injection_info.network_info, libvirt_virt_type=CONF.libvirt.virt_type) # Handles the metadata injection metadata = instance.get('metadata') if any((key, net, metadata, admin_pass, injection_info.files)): LOG.debug('Injecting %s', str(injection_info), instance=instance) img_id = instance.image_ref try: disk_api.inject_data(disk.get_model(self._conn), key, net, metadata, admin_pass, injection_info.files, partition=target_partition, mandatory=('files',)) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Error injecting data into image ' '%(img_id)s (%(e)s)', {'img_id': img_id, 'e': e}, instance=instance) # NOTE(sileht): many callers of this method assume that this # method doesn't fail if an image already exists but instead # think that it will be reused (ie: (live)-migration/resize) def _create_image(self, context, instance, disk_mapping, injection_info=None, suffix='', disk_images=None, block_device_info=None, fallback_from_host=None, ignore_bdi_for_swap=False): booted_from_volume = self._is_booted_from_volume(block_device_info) def image(fname, image_type=CONF.libvirt.images_type): return self.image_backend.by_name(instance, fname + suffix, image_type) def raw(fname): return image(fname, image_type='raw') # ensure directories exist and are writable fileutils.ensure_tree(libvirt_utils.get_instance_path(instance)) LOG.info('Creating image', instance=instance) inst_type = instance.get_flavor() swap_mb = 0 if 'disk.swap' in disk_mapping: mapping = disk_mapping['disk.swap'] if ignore_bdi_for_swap: # This is a workaround to support legacy swap resizing, # which does not touch swap size specified in bdm, # but works with flavor specified size only. # In this case we follow the legacy logic and ignore block # device info completely. # NOTE(ft): This workaround must be removed when a correct # implementation of resize operation changing sizes in bdms is # developed. Also at that stage we probably may get rid of # the direct usage of flavor swap size here, # leaving the work with bdm only. swap_mb = inst_type['swap'] else: swap = driver.block_device_info_get_swap(block_device_info) if driver.swap_is_usable(swap): swap_mb = swap['swap_size'] elif (inst_type['swap'] > 0 and not block_device.volume_in_mapping( mapping['dev'], block_device_info)): swap_mb = inst_type['swap'] if swap_mb > 0: if (CONF.libvirt.virt_type == "parallels" and instance.vm_mode == fields.VMMode.EXE): msg = _("Swap disk is not supported " "for Virtuozzo container") raise exception.Invalid(msg) if not disk_images: disk_images = {'image_id': instance.image_ref, 'kernel_id': instance.kernel_id, 'ramdisk_id': instance.ramdisk_id} if disk_images['kernel_id']: fname = imagecache.get_cache_fname(disk_images['kernel_id']) raw('kernel').cache(fetch_func=libvirt_utils.fetch_raw_image, context=context, filename=fname, image_id=disk_images['kernel_id']) if disk_images['ramdisk_id']: fname = imagecache.get_cache_fname(disk_images['ramdisk_id']) raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_raw_image, context=context, filename=fname, image_id=disk_images['ramdisk_id']) if CONF.libvirt.virt_type == 'uml': # PONDERING(mikal): can I assume that root is UID zero in every # OS? Probably not. uid = pwd.getpwnam('root').pw_uid nova.privsep.path.chown(image('disk').path, uid=uid) self._create_and_inject_local_root(context, instance, booted_from_volume, suffix, disk_images, injection_info, fallback_from_host) # Lookup the filesystem type if required os_type_with_default = nova.privsep.fs.get_fs_type_for_os_type( instance.os_type) # Generate a file extension based on the file system # type and the mkfs commands configured if any file_extension = nova.privsep.fs.get_file_extension_for_os_type( os_type_with_default, CONF.default_ephemeral_format) vm_mode = fields.VMMode.get_from_instance(instance) ephemeral_gb = instance.flavor.ephemeral_gb if 'disk.local' in disk_mapping: disk_image = image('disk.local') fn = functools.partial(self._create_ephemeral, fs_label='ephemeral0', os_type=instance.os_type, is_block_dev=disk_image.is_block_dev, vm_mode=vm_mode) fname = "ephemeral_%s_%s" % (ephemeral_gb, file_extension) size = ephemeral_gb * units.Gi disk_image.cache(fetch_func=fn, context=context, filename=fname, size=size, ephemeral_size=ephemeral_gb) for idx, eph in enumerate(driver.block_device_info_get_ephemerals( block_device_info)): disk_image = image(blockinfo.get_eph_disk(idx)) specified_fs = eph.get('guest_format') if specified_fs and not self.is_supported_fs_format(specified_fs): msg = _("%s format is not supported") % specified_fs raise exception.InvalidBDMFormat(details=msg) fn = functools.partial(self._create_ephemeral, fs_label='ephemeral%d' % idx, os_type=instance.os_type, is_block_dev=disk_image.is_block_dev, vm_mode=vm_mode) size = eph['size'] * units.Gi fname = "ephemeral_%s_%s" % (eph['size'], file_extension) disk_image.cache(fetch_func=fn, context=context, filename=fname, size=size, ephemeral_size=eph['size'], specified_fs=specified_fs) if swap_mb > 0: size = swap_mb * units.Mi image('disk.swap').cache(fetch_func=self._create_swap, context=context, filename="swap_%s" % swap_mb, size=size, swap_mb=swap_mb) def _create_and_inject_local_root(self, context, instance, booted_from_volume, suffix, disk_images, injection_info, fallback_from_host): # File injection only if needed need_inject = (not configdrive.required_by(instance) and injection_info is not None and CONF.libvirt.inject_partition != -2) # NOTE(ndipanov): Even if disk_mapping was passed in, which # currently happens only on rescue - we still don't want to # create a base image. if not booted_from_volume: root_fname = imagecache.get_cache_fname(disk_images['image_id']) size = instance.flavor.root_gb * units.Gi if size == 0 or suffix == '.rescue': size = None backend = self.image_backend.by_name(instance, 'disk' + suffix, CONF.libvirt.images_type) if instance.task_state == task_states.RESIZE_FINISH: backend.create_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME) if backend.SUPPORTS_CLONE: def clone_fallback_to_fetch(*args, **kwargs): try: backend.clone(context, disk_images['image_id']) except exception.ImageUnacceptable: libvirt_utils.fetch_image(*args, **kwargs) fetch_func = clone_fallback_to_fetch else: fetch_func = libvirt_utils.fetch_image self._try_fetch_image_cache(backend, fetch_func, context, root_fname, disk_images['image_id'], instance, size, fallback_from_host) if need_inject: self._inject_data(backend, instance, injection_info) elif need_inject: LOG.warning('File injection into a boot from volume ' 'instance is not supported', instance=instance) def _create_configdrive(self, context, instance, injection_info, rescue=False): # As this method being called right after the definition of a # domain, but before its actual launch, device metadata will be built # and saved in the instance for it to be used by the config drive and # the metadata service. instance.device_metadata = self._build_device_metadata(context, instance) if configdrive.required_by(instance): LOG.info('Using config drive', instance=instance) name = 'disk.config' if rescue: name += '.rescue' config_disk = self.image_backend.by_name( instance, name, self._get_disk_config_image_type()) # Don't overwrite an existing config drive if not config_disk.exists(): extra_md = {} if injection_info.admin_pass: extra_md['admin_pass'] = injection_info.admin_pass inst_md = instance_metadata.InstanceMetadata( instance, content=injection_info.files, extra_md=extra_md, network_info=injection_info.network_info, request_context=context) cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md) with cdb: # NOTE(mdbooth): We're hardcoding here the path of the # config disk when using the flat backend. This isn't # good, but it's required because we need a local path we # know we can write to in case we're subsequently # importing into rbd. This will be cleaned up when we # replace this with a call to create_from_func, but that # can't happen until we've updated the backends and we # teach them not to cache config disks. This isn't # possible while we're still using cache() under the hood. config_disk_local_path = os.path.join( libvirt_utils.get_instance_path(instance), name) LOG.info('Creating config drive at %(path)s', {'path': config_disk_local_path}, instance=instance) try: cdb.make_drive(config_disk_local_path) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error('Creating config drive failed with ' 'error: %s', e, instance=instance) try: config_disk.import_file( instance, config_disk_local_path, name) finally: # NOTE(mikal): if the config drive was imported into RBD, # then we no longer need the local copy if CONF.libvirt.images_type == 'rbd': LOG.info('Deleting local config drive %(path)s ' 'because it was imported into RBD.', {'path': config_disk_local_path}, instance=instance) os.unlink(config_disk_local_path) def _prepare_pci_devices_for_use(self, pci_devices): # kvm , qemu support managed mode # In managed mode, the configured device will be automatically # detached from the host OS drivers when the guest is started, # and then re-attached when the guest shuts down. if CONF.libvirt.virt_type != 'xen': # we do manual detach only for xen return try: for dev in pci_devices: libvirt_dev_addr = dev['hypervisor_name'] libvirt_dev = \ self._host.device_lookup_by_name(libvirt_dev_addr) # Note(yjiang5) Spelling for 'dettach' is correct, see # http://libvirt.org/html/libvirt-libvirt.html. libvirt_dev.dettach() # Note(yjiang5): A reset of one PCI device may impact other # devices on the same bus, thus we need two separated loops # to detach and then reset it. for dev in pci_devices: libvirt_dev_addr = dev['hypervisor_name'] libvirt_dev = \ self._host.device_lookup_by_name(libvirt_dev_addr) libvirt_dev.reset() except libvirt.libvirtError as exc: raise exception.PciDevicePrepareFailed(id=dev['id'], instance_uuid= dev['instance_uuid'], reason=six.text_type(exc)) def _detach_pci_devices(self, guest, pci_devs): try: for dev in pci_devs: guest.detach_device(self._get_guest_pci_device(dev), live=True) # after detachDeviceFlags returned, we should check the dom to # ensure the detaching is finished xml = guest.get_xml_desc() xml_doc = etree.fromstring(xml) guest_config = vconfig.LibvirtConfigGuest() guest_config.parse_dom(xml_doc) for hdev in [d for d in guest_config.devices if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]: hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function] dbsf = pci_utils.parse_address(dev.address) if [int(x, 16) for x in hdbsf] ==\ [int(x, 16) for x in dbsf]: raise exception.PciDeviceDetachFailed(reason= "timeout", dev=dev) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: LOG.warning("Instance disappeared while detaching " "a PCI device from it.") else: raise def _attach_pci_devices(self, guest, pci_devs): try: for dev in pci_devs: guest.attach_device(self._get_guest_pci_device(dev)) except libvirt.libvirtError: LOG.error('Attaching PCI devices %(dev)s to %(dom)s failed.', {'dev': pci_devs, 'dom': guest.id}) raise @staticmethod def _has_direct_passthrough_port(network_info): for vif in network_info: if (vif['vnic_type'] in network_model.VNIC_TYPES_DIRECT_PASSTHROUGH): return True return False def _attach_direct_passthrough_ports( self, context, instance, guest, network_info=None): if network_info is None: network_info = instance.info_cache.network_info if network_info is None: return if self._has_direct_passthrough_port(network_info): for vif in network_info: if (vif['vnic_type'] in network_model.VNIC_TYPES_DIRECT_PASSTHROUGH): cfg = self.vif_driver.get_config(instance, vif, instance.image_meta, instance.flavor, CONF.libvirt.virt_type, self._host) LOG.debug('Attaching direct passthrough port %(port)s ' 'to %(dom)s', {'port': vif, 'dom': guest.id}, instance=instance) guest.attach_device(cfg) def _detach_direct_passthrough_ports(self, context, instance, guest): network_info = instance.info_cache.network_info if network_info is None: return if self._has_direct_passthrough_port(network_info): # In case of VNIC_TYPES_DIRECT_PASSTHROUGH ports we create # pci request per direct passthrough port. Therefore we can trust # that pci_slot value in the vif is correct. direct_passthrough_pci_addresses = [ vif['profile']['pci_slot'] for vif in network_info if (vif['vnic_type'] in network_model.VNIC_TYPES_DIRECT_PASSTHROUGH and vif['profile'].get('pci_slot') is not None) ] # use detach_pci_devices to avoid failure in case of # multiple guest direct passthrough ports with the same MAC # (protection use-case, ports are on different physical # interfaces) pci_devs = pci_manager.get_instance_pci_devs(instance, 'all') direct_passthrough_pci_addresses = ( [pci_dev for pci_dev in pci_devs if pci_dev.address in direct_passthrough_pci_addresses]) self._detach_pci_devices(guest, direct_passthrough_pci_addresses) def _set_host_enabled(self, enabled, disable_reason=DISABLE_REASON_UNDEFINED): """Enables / Disables the compute service on this host. This doesn't override non-automatic disablement with an automatic setting; thereby permitting operators to keep otherwise healthy hosts out of rotation. """ status_name = {True: 'disabled', False: 'enabled'} disable_service = not enabled ctx = nova_context.get_admin_context() try: service = objects.Service.get_by_compute_host(ctx, CONF.host) if service.disabled != disable_service: # Note(jang): this is a quick fix to stop operator- # disabled compute hosts from re-enabling themselves # automatically. We prefix any automatic reason code # with a fixed string. We only re-enable a host # automatically if we find that string in place. # This should probably be replaced with a separate flag. if not service.disabled or ( service.disabled_reason and service.disabled_reason.startswith(DISABLE_PREFIX)): service.disabled = disable_service service.disabled_reason = ( DISABLE_PREFIX + disable_reason if disable_service and disable_reason else DISABLE_REASON_UNDEFINED) service.save() LOG.debug('Updating compute service status to %s', status_name[disable_service]) else: LOG.debug('Not overriding manual compute service ' 'status with: %s', status_name[disable_service]) except exception.ComputeHostNotFound: LOG.warning('Cannot update service status on host "%s" ' 'since it is not registered.', CONF.host) except Exception: LOG.warning('Cannot update service status on host "%s" ' 'due to an unexpected exception.', CONF.host, exc_info=True) if enabled: mount.get_manager().host_up(self._host) else: mount.get_manager().host_down() def _get_guest_cpu_model_config(self): mode = CONF.libvirt.cpu_mode model = CONF.libvirt.cpu_model extra_flags = CONF.libvirt.cpu_model_extra_flags if (CONF.libvirt.virt_type == "kvm" or CONF.libvirt.virt_type == "qemu"): if mode is None: caps = self._host.get_capabilities() # AArch64 lacks 'host-model' support because neither libvirt # nor QEMU are able to tell what the host CPU model exactly is. # And there is no CPU description code for ARM(64) at this # point. # Also worth noting: 'host-passthrough' mode will completely # break live migration, *unless* all the Compute nodes (running # libvirtd) have *identical* CPUs. if caps.host.cpu.arch == fields.Architecture.AARCH64: mode = "host-passthrough" LOG.info('CPU mode "host-passthrough" was chosen. Live ' 'migration can break unless all compute nodes ' 'have identical cpus. AArch64 does not support ' 'other modes.') else: mode = "host-model" if mode == "none": return vconfig.LibvirtConfigGuestCPU() else: if mode is None or mode == "none": return None if ((CONF.libvirt.virt_type != "kvm" and CONF.libvirt.virt_type != "qemu")): msg = _("Config requested an explicit CPU model, but " "the current libvirt hypervisor '%s' does not " "support selecting CPU models") % CONF.libvirt.virt_type raise exception.Invalid(msg) if mode == "custom" and model is None: msg = _("Config requested a custom CPU model, but no " "model name was provided") raise exception.Invalid(msg) elif mode != "custom" and model is not None: msg = _("A CPU model name should not be set when a " "host CPU model is requested") raise exception.Invalid(msg) # FIXME (kchamart): We're intentionally restricting the choices # (in the conf/libvirt.py) for 'extra_flags` to just 'PCID', to # address the immediate guest performance degradation caused by # "Meltdown" CVE fixes on certain Intel CPU models. In a future # patch, we will: # (a) Remove the restriction of choices for 'extra_flags', # allowing to add / remove additional CPU flags, as it will # make way for other useful features. # (b) Remove the below check for "host-model", as it is a # valid configuration to supply additional CPU flags to it. # (c) Revisit and fix the warnings / exception handling for # different combinations of CPU modes and 'extra_flags'. elif ((mode == "host-model" or mode == "host-passthrough") and extra_flags): extra_flags = [] LOG.warning("Setting extra CPU flags is only valid in " "combination with a custom CPU model. Refer " "to the 'nova.conf' documentation for " "'[libvirt]/cpu_model_extra_flags'") LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen, " "with extra flags: '%(extra_flags)s'", {'mode': mode, 'model': (model or ""), 'extra_flags': (extra_flags or "")}) cpu = vconfig.LibvirtConfigGuestCPU() cpu.mode = mode cpu.model = model # NOTE (kchamart): Currently there's no existing way to ask if a # given CPU model + CPU flags combination is supported by KVM & # a specific QEMU binary. However, libvirt runs the 'CPUID' # command upfront -- before even a Nova instance (a QEMU # process) is launched -- to construct CPU models and check # their validity; so we are good there. In the long-term, # upstream libvirt intends to add an additional new API that can # do fine-grained validation of a certain CPU model + CPU flags # against a specific QEMU binary (the libvirt RFE bug for that: # https://bugzilla.redhat.com/show_bug.cgi?id=1559832). for flag in extra_flags: cpu.add_feature(vconfig.LibvirtConfigGuestCPUFeature(flag)) return cpu def _get_guest_cpu_config(self, flavor, image_meta, guest_cpu_numa_config, instance_numa_topology): cpu = self._get_guest_cpu_model_config() if cpu is None: return None topology = hardware.get_best_cpu_topology( flavor, image_meta, numa_topology=instance_numa_topology) cpu.sockets = topology.sockets cpu.cores = topology.cores cpu.threads = topology.threads cpu.numa = guest_cpu_numa_config return cpu def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type, image_type=None): disk_unit = None disk = self.image_backend.by_name(instance, name, image_type) if (name == 'disk.config' and image_type == 'rbd' and not disk.exists()): # This is likely an older config drive that has not been migrated # to rbd yet. Try to fall back on 'flat' image type. # TODO(melwitt): Add online migration of some sort so we can # remove this fall back once we know all config drives are in rbd. # NOTE(vladikr): make sure that the flat image exist, otherwise # the image will be created after the domain definition. flat_disk = self.image_backend.by_name(instance, name, 'flat') if flat_disk.exists(): disk = flat_disk LOG.debug('Config drive not found in RBD, falling back to the ' 'instance directory', instance=instance) disk_info = disk_mapping[name] if 'unit' in disk_mapping and disk_info['bus'] == 'scsi': disk_unit = disk_mapping['unit'] disk_mapping['unit'] += 1 # Increments for the next disk added conf = disk.libvirt_info(disk_info['bus'], disk_info['dev'], disk_info['type'], self.disk_cachemode, inst_type['extra_specs'], self._host.get_version(), disk_unit=disk_unit) return conf def _get_guest_fs_config(self, instance, name, image_type=None): disk = self.image_backend.by_name(instance, name, image_type) return disk.libvirt_fs_info("/", "ploop") def _get_guest_storage_config(self, context, instance, image_meta, disk_info, rescue, block_device_info, inst_type, os_type): devices = [] disk_mapping = disk_info['mapping'] block_device_mapping = driver.block_device_info_get_mapping( block_device_info) mount_rootfs = CONF.libvirt.virt_type == "lxc" scsi_controller = self._get_scsi_controller(image_meta) if scsi_controller and scsi_controller.model == 'virtio-scsi': # The virtio-scsi can handle up to 256 devices but the # optional element "address" must be defined to describe # where the device is placed on the controller (see: # LibvirtConfigGuestDeviceAddressDrive). # # Note about why it's added in disk_mapping: It's not # possible to pass an 'int' by reference in Python, so we # use disk_mapping as container to keep reference of the # unit added and be able to increment it for each disk # added. # # NOTE(jaypipes,melwitt): If this is a boot-from-volume instance, # we need to start the disk mapping unit at 1 since we set the # bootable volume's unit to 0 for the bootable volume. disk_mapping['unit'] = 0 if self._is_booted_from_volume(block_device_info): disk_mapping['unit'] = 1 def _get_ephemeral_devices(): eph_devices = [] for idx, eph in enumerate( driver.block_device_info_get_ephemerals( block_device_info)): diskeph = self._get_guest_disk_config( instance, blockinfo.get_eph_disk(idx), disk_mapping, inst_type) eph_devices.append(diskeph) return eph_devices if mount_rootfs: fs = vconfig.LibvirtConfigGuestFilesys() fs.source_type = "mount" fs.source_dir = os.path.join( libvirt_utils.get_instance_path(instance), 'rootfs') devices.append(fs) elif (os_type == fields.VMMode.EXE and CONF.libvirt.virt_type == "parallels"): if rescue: fsrescue = self._get_guest_fs_config(instance, "disk.rescue") devices.append(fsrescue) fsos = self._get_guest_fs_config(instance, "disk") fsos.target_dir = "/mnt/rescue" devices.append(fsos) else: if 'disk' in disk_mapping: fs = self._get_guest_fs_config(instance, "disk") devices.append(fs) devices = devices + _get_ephemeral_devices() else: if rescue: diskrescue = self._get_guest_disk_config(instance, 'disk.rescue', disk_mapping, inst_type) devices.append(diskrescue) diskos = self._get_guest_disk_config(instance, 'disk', disk_mapping, inst_type) devices.append(diskos) else: if 'disk' in disk_mapping: diskos = self._get_guest_disk_config(instance, 'disk', disk_mapping, inst_type) devices.append(diskos) if 'disk.local' in disk_mapping: disklocal = self._get_guest_disk_config(instance, 'disk.local', disk_mapping, inst_type) devices.append(disklocal) instance.default_ephemeral_device = ( block_device.prepend_dev(disklocal.target_dev)) devices = devices + _get_ephemeral_devices() if 'disk.swap' in disk_mapping: diskswap = self._get_guest_disk_config(instance, 'disk.swap', disk_mapping, inst_type) devices.append(diskswap) instance.default_swap_device = ( block_device.prepend_dev(diskswap.target_dev)) config_name = 'disk.config.rescue' if rescue else 'disk.config' if config_name in disk_mapping: diskconfig = self._get_guest_disk_config( instance, config_name, disk_mapping, inst_type, self._get_disk_config_image_type()) devices.append(diskconfig) for vol in block_device.get_bdms_to_connect(block_device_mapping, mount_rootfs): connection_info = vol['connection_info'] vol_dev = block_device.prepend_dev(vol['mount_device']) info = disk_mapping[vol_dev] self._connect_volume(context, connection_info, instance) if scsi_controller and scsi_controller.model == 'virtio-scsi': # Check if this is the bootable volume when in a # boot-from-volume instance, and if so, ensure the unit # attribute is 0. if vol.get('boot_index') == 0: info['unit'] = 0 else: info['unit'] = disk_mapping['unit'] disk_mapping['unit'] += 1 cfg = self._get_volume_config(connection_info, info) devices.append(cfg) vol['connection_info'] = connection_info vol.save() for d in devices: self._set_cache_mode(d) if scsi_controller: devices.append(scsi_controller) return devices @staticmethod def _get_scsi_controller(image_meta): """Return scsi controller or None based on image meta""" if image_meta.properties.get('hw_scsi_model'): hw_scsi_model = image_meta.properties.hw_scsi_model scsi_controller = vconfig.LibvirtConfigGuestController() scsi_controller.type = 'scsi' scsi_controller.model = hw_scsi_model scsi_controller.index = 0 return scsi_controller def _get_host_sysinfo_serial_hardware(self): """Get a UUID from the host hardware Get a UUID for the host hardware reported by libvirt. This is typically from the SMBIOS data, unless it has been overridden in /etc/libvirt/libvirtd.conf """ caps = self._host.get_capabilities() return caps.host.uuid def _get_host_sysinfo_serial_os(self): """Get a UUID from the host operating system Get a UUID for the host operating system. Modern Linux distros based on systemd provide a /etc/machine-id file containing a UUID. This is also provided inside systemd based containers and can be provided by other init systems too, since it is just a plain text file. """ if not os.path.exists("/etc/machine-id"): msg = _("Unable to get host UUID: /etc/machine-id does not exist") raise exception.InternalError(msg) with open("/etc/machine-id") as f: # We want to have '-' in the right place # so we parse & reformat the value lines = f.read().split() if not lines: msg = _("Unable to get host UUID: /etc/machine-id is empty") raise exception.InternalError(msg) return str(uuid.UUID(lines[0])) def _get_host_sysinfo_serial_auto(self): if os.path.exists("/etc/machine-id"): return self._get_host_sysinfo_serial_os() else: return self._get_host_sysinfo_serial_hardware() def _get_guest_config_sysinfo(self, instance): sysinfo = vconfig.LibvirtConfigGuestSysinfo() sysinfo.system_manufacturer = version.vendor_string() sysinfo.system_product = version.product_string() sysinfo.system_version = version.version_string_with_package() sysinfo.system_serial = self._sysinfo_serial_func() sysinfo.system_uuid = instance.uuid sysinfo.system_family = "Virtual Machine" return sysinfo def _get_guest_pci_device(self, pci_device): dbsf = pci_utils.parse_address(pci_device.address) dev = vconfig.LibvirtConfigGuestHostdevPCI() dev.domain, dev.bus, dev.slot, dev.function = dbsf # only kvm support managed mode if CONF.libvirt.virt_type in ('xen', 'parallels',): dev.managed = 'no' if CONF.libvirt.virt_type in ('kvm', 'qemu'): dev.managed = 'yes' return dev def _get_guest_config_meta(self, instance): """Get metadata config for guest.""" meta = vconfig.LibvirtConfigGuestMetaNovaInstance() meta.package = version.version_string_with_package() meta.name = instance.display_name meta.creationTime = time.time() if instance.image_ref not in ("", None): meta.roottype = "image" meta.rootid = instance.image_ref system_meta = instance.system_metadata ometa = vconfig.LibvirtConfigGuestMetaNovaOwner() ometa.userid = instance.user_id ometa.username = system_meta.get('owner_user_name', 'N/A') ometa.projectid = instance.project_id ometa.projectname = system_meta.get('owner_project_name', 'N/A') meta.owner = ometa fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor() flavor = instance.flavor fmeta.name = flavor.name fmeta.memory = flavor.memory_mb fmeta.vcpus = flavor.vcpus fmeta.ephemeral = flavor.ephemeral_gb fmeta.disk = flavor.root_gb fmeta.swap = flavor.swap meta.flavor = fmeta return meta def _machine_type_mappings(self): mappings = {} for mapping in CONF.libvirt.hw_machine_type: host_arch, _, machine_type = mapping.partition('=') mappings[host_arch] = machine_type return mappings def _get_machine_type(self, image_meta, caps): # The underlying machine type can be set as an image attribute, # or otherwise based on some architecture specific defaults mach_type = None if image_meta.properties.get('hw_machine_type') is not None: mach_type = image_meta.properties.hw_machine_type else: # For ARM systems we will default to vexpress-a15 for armv7 # and virt for aarch64 if caps.host.cpu.arch == fields.Architecture.ARMV7: mach_type = "vexpress-a15" if caps.host.cpu.arch == fields.Architecture.AARCH64: mach_type = "virt" if caps.host.cpu.arch in (fields.Architecture.S390, fields.Architecture.S390X): mach_type = 's390-ccw-virtio' # If set in the config, use that as the default. if CONF.libvirt.hw_machine_type: mappings = self._machine_type_mappings() mach_type = mappings.get(caps.host.cpu.arch) return mach_type @staticmethod def _create_idmaps(klass, map_strings): idmaps = [] if len(map_strings) > 5: map_strings = map_strings[0:5] LOG.warning("Too many id maps, only included first five.") for map_string in map_strings: try: idmap = klass() values = [int(i) for i in map_string.split(":")] idmap.start = values[0] idmap.target = values[1] idmap.count = values[2] idmaps.append(idmap) except (ValueError, IndexError): LOG.warning("Invalid value for id mapping %s", map_string) return idmaps def _get_guest_idmaps(self): id_maps = [] if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.uid_maps: uid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestUIDMap, CONF.libvirt.uid_maps) id_maps.extend(uid_maps) if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.gid_maps: gid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestGIDMap, CONF.libvirt.gid_maps) id_maps.extend(gid_maps) return id_maps def _update_guest_cputune(self, guest, flavor, virt_type): is_able = self._host.is_cpu_control_policy_capable() cputuning = ['shares', 'period', 'quota'] wants_cputune = any([k for k in cputuning if "quota:cpu_" + k in flavor.extra_specs.keys()]) if wants_cputune and not is_able: raise exception.UnsupportedHostCPUControlPolicy() if not is_able or virt_type not in ('lxc', 'kvm', 'qemu'): return if guest.cputune is None: guest.cputune = vconfig.LibvirtConfigGuestCPUTune() # Setting the default cpu.shares value to be a value # dependent on the number of vcpus guest.cputune.shares = 1024 * guest.vcpus for name in cputuning: key = "quota:cpu_" + name if key in flavor.extra_specs: setattr(guest.cputune, name, int(flavor.extra_specs[key])) def _get_cpu_numa_config_from_instance(self, instance_numa_topology, wants_hugepages): if instance_numa_topology: guest_cpu_numa = vconfig.LibvirtConfigGuestCPUNUMA() for instance_cell in instance_numa_topology.cells: guest_cell = vconfig.LibvirtConfigGuestCPUNUMACell() guest_cell.id = instance_cell.id guest_cell.cpus = instance_cell.cpuset guest_cell.memory = instance_cell.memory * units.Ki # The vhost-user network backend requires file backed # guest memory (ie huge pages) to be marked as shared # access, not private, so an external process can read # and write the pages. # # You can't change the shared vs private flag for an # already running guest, and since we can't predict what # types of NIC may be hotplugged, we have no choice but # to unconditionally turn on the shared flag. This has # no real negative functional effect on the guest, so # is a reasonable approach to take if wants_hugepages: guest_cell.memAccess = "shared" guest_cpu_numa.cells.append(guest_cell) return guest_cpu_numa def _has_cpu_policy_support(self): for ver in BAD_LIBVIRT_CPU_POLICY_VERSIONS: if self._host.has_version(ver): ver_ = libvirt_utils.version_to_string(ver) raise exception.CPUPinningNotSupported(reason=_( 'Invalid libvirt version %(version)s') % {'version': ver_}) return True def _wants_hugepages(self, host_topology, instance_topology): """Determine if the guest / host topology implies the use of huge pages for guest RAM backing """ if host_topology is None or instance_topology is None: return False avail_pagesize = [page.size_kb for page in host_topology.cells[0].mempages] avail_pagesize.sort() # Remove smallest page size as that's not classed as a largepage avail_pagesize = avail_pagesize[1:] # See if we have page size set for cell in instance_topology.cells: if (cell.pagesize is not None and cell.pagesize in avail_pagesize): return True return False def _get_cell_pairs(self, guest_cpu_numa_config, host_topology): """Returns the lists of pairs(tuple) of an instance cell and corresponding host cell: [(LibvirtConfigGuestCPUNUMACell, NUMACell), ...] """ cell_pairs = [] for guest_config_cell in guest_cpu_numa_config.cells: for host_cell in host_topology.cells: if guest_config_cell.id == host_cell.id: cell_pairs.append((guest_config_cell, host_cell)) return cell_pairs def _get_pin_cpuset(self, vcpu, object_numa_cell, host_cell): """Returns the config object of LibvirtConfigGuestCPUTuneVCPUPin. Prepares vcpupin config for the guest with the following caveats: a) If there is pinning information in the cell, we pin vcpus to individual CPUs b) Otherwise we float over the whole host NUMA node """ pin_cpuset = vconfig.LibvirtConfigGuestCPUTuneVCPUPin() pin_cpuset.id = vcpu if object_numa_cell.cpu_pinning and self._has_cpu_policy_support(): pin_cpuset.cpuset = set([object_numa_cell.cpu_pinning[vcpu]]) else: pin_cpuset.cpuset = host_cell.cpuset return pin_cpuset def _get_emulatorpin_cpuset(self, vcpu, object_numa_cell, vcpus_rt, emulator_threads_isolated, wants_realtime, pin_cpuset): """Returns a set of cpu_ids to add to the cpuset for emulator threads with the following caveats: a) If emulator threads policy is isolated, we pin emulator threads to one cpu we have reserved for it. b) Otherwise; b1) If realtime IS NOT enabled, the emulator threads are allowed to float cross all the pCPUs associated with the guest vCPUs. b2) If realtime IS enabled, at least 1 vCPU is required to be set aside for non-realtime usage. The emulator threads are allowed to float across the pCPUs that are associated with the non-realtime VCPUs. """ emulatorpin_cpuset = set([]) if emulator_threads_isolated: if object_numa_cell.cpuset_reserved: emulatorpin_cpuset = object_numa_cell.cpuset_reserved elif not wants_realtime or vcpu not in vcpus_rt: emulatorpin_cpuset = pin_cpuset.cpuset return emulatorpin_cpuset def _get_guest_numa_config(self, instance_numa_topology, flavor, allowed_cpus=None, image_meta=None): """Returns the config objects for the guest NUMA specs. Determines the CPUs that the guest can be pinned to if the guest specifies a cell topology and the host supports it. Constructs the libvirt XML config object representing the NUMA topology selected for the guest. Returns a tuple of: (cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune) With the following caveats: a) If there is no specified guest NUMA topology, then all tuple elements except cpu_set shall be None. cpu_set will be populated with the chosen CPUs that the guest allowed CPUs fit within, which could be the supplied allowed_cpus value if the host doesn't support NUMA topologies. b) If there is a specified guest NUMA topology, then cpu_set will be None and guest_cpu_numa will be the LibvirtConfigGuestCPUNUMA object representing the guest's NUMA topology. If the host supports NUMA, then guest_cpu_tune will contain a LibvirtConfigGuestCPUTune object representing the optimized chosen cells that match the host capabilities with the instance's requested topology. If the host does not support NUMA, then guest_cpu_tune and guest_numa_tune will be None. """ if (not self._has_numa_support() and instance_numa_topology is not None): # We should not get here, since we should have avoided # reporting NUMA topology from _get_host_numa_topology # in the first place. Just in case of a scheduler # mess up though, raise an exception raise exception.NUMATopologyUnsupported() topology = self._get_host_numa_topology() # We have instance NUMA so translate it to the config class guest_cpu_numa_config = self._get_cpu_numa_config_from_instance( instance_numa_topology, self._wants_hugepages(topology, instance_numa_topology)) if not guest_cpu_numa_config: # No NUMA topology defined for instance - let the host kernel deal # with the NUMA effects. # TODO(ndipanov): Attempt to spread the instance # across NUMA nodes and expose the topology to the # instance as an optimisation return GuestNumaConfig(allowed_cpus, None, None, None) if not topology: # No NUMA topology defined for host - This will only happen with # some libvirt versions and certain platforms. return GuestNumaConfig(allowed_cpus, None, guest_cpu_numa_config, None) # Now get configuration from the numa_topology # Init CPUTune configuration guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune() guest_cpu_tune.emulatorpin = ( vconfig.LibvirtConfigGuestCPUTuneEmulatorPin()) guest_cpu_tune.emulatorpin.cpuset = set([]) # Init NUMATune configuration guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune() guest_numa_tune.memory = vconfig.LibvirtConfigGuestNUMATuneMemory() guest_numa_tune.memnodes = [] emulator_threads_isolated = ( instance_numa_topology.emulator_threads_isolated) # Set realtime scheduler for CPUTune vcpus_rt = set([]) wants_realtime = hardware.is_realtime_enabled(flavor) if wants_realtime: if not self._host.has_min_version(MIN_LIBVIRT_REALTIME_VERSION): raise exception.RealtimePolicyNotSupported() vcpus_rt = hardware.vcpus_realtime_topology(flavor, image_meta) vcpusched = vconfig.LibvirtConfigGuestCPUTuneVCPUSched() designer.set_vcpu_realtime_scheduler( vcpusched, vcpus_rt, CONF.libvirt.realtime_scheduler_priority) guest_cpu_tune.vcpusched.append(vcpusched) cell_pairs = self._get_cell_pairs(guest_cpu_numa_config, topology) for guest_node_id, (guest_config_cell, host_cell) in enumerate( cell_pairs): # set NUMATune for the cell tnode = vconfig.LibvirtConfigGuestNUMATuneMemNode() designer.set_numa_memnode(tnode, guest_node_id, host_cell.id) guest_numa_tune.memnodes.append(tnode) guest_numa_tune.memory.nodeset.append(host_cell.id) # set CPUTune for the cell object_numa_cell = instance_numa_topology.cells[guest_node_id] for cpu in guest_config_cell.cpus: pin_cpuset = self._get_pin_cpuset(cpu, object_numa_cell, host_cell) guest_cpu_tune.vcpupin.append(pin_cpuset) emu_pin_cpuset = self._get_emulatorpin_cpuset( cpu, object_numa_cell, vcpus_rt, emulator_threads_isolated, wants_realtime, pin_cpuset) guest_cpu_tune.emulatorpin.cpuset.update(emu_pin_cpuset) # TODO(berrange) When the guest has >1 NUMA node, it will # span multiple host NUMA nodes. By pinning emulator threads # to the union of all nodes, we guarantee there will be # cross-node memory access by the emulator threads when # responding to guest I/O operations. The only way to avoid # this would be to pin emulator threads to a single node and # tell the guest OS to only do I/O from one of its virtual # NUMA nodes. This is not even remotely practical. # # The long term solution is to make use of a new QEMU feature # called "I/O Threads" which will let us configure an explicit # I/O thread for each guest vCPU or guest NUMA node. It is # still TBD how to make use of this feature though, especially # how to associate IO threads with guest devices to eliminate # cross NUMA node traffic. This is an area of investigation # for QEMU community devs. # Sort the vcpupin list per vCPU id for human-friendlier XML guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id")) # normalize cell.id for i, (cell, memnode) in enumerate(zip(guest_cpu_numa_config.cells, guest_numa_tune.memnodes)): cell.id = i memnode.cellid = i return GuestNumaConfig(None, guest_cpu_tune, guest_cpu_numa_config, guest_numa_tune) def _get_guest_os_type(self, virt_type): """Returns the guest OS type based on virt type.""" if virt_type == "lxc": ret = fields.VMMode.EXE elif virt_type == "uml": ret = fields.VMMode.UML elif virt_type == "xen": ret = fields.VMMode.XEN else: ret = fields.VMMode.HVM return ret def _set_guest_for_rescue(self, rescue, guest, inst_path, virt_type, root_device_name): if rescue.get('kernel_id'): guest.os_kernel = os.path.join(inst_path, "kernel.rescue") guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE)) if virt_type == "qemu": guest.os_cmdline += " no_timer_check" if rescue.get('ramdisk_id'): guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue") def _set_guest_for_inst_kernel(self, instance, guest, inst_path, virt_type, root_device_name, image_meta): guest.os_kernel = os.path.join(inst_path, "kernel") guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE)) if virt_type == "qemu": guest.os_cmdline += " no_timer_check" if instance.ramdisk_id: guest.os_initrd = os.path.join(inst_path, "ramdisk") # we only support os_command_line with images with an explicit # kernel set and don't want to break nova if there's an # os_command_line property without a specified kernel_id param if image_meta.properties.get("os_command_line"): guest.os_cmdline = image_meta.properties.os_command_line def _set_clock(self, guest, os_type, image_meta, virt_type): # NOTE(mikal): Microsoft Windows expects the clock to be in # "localtime". If the clock is set to UTC, then you can use a # registry key to let windows know, but Microsoft says this is # buggy in http://support.microsoft.com/kb/2687252 clk = vconfig.LibvirtConfigGuestClock() if os_type == 'windows': LOG.info('Configuring timezone for windows instance to localtime') clk.offset = 'localtime' else: clk.offset = 'utc' guest.set_clock(clk) if virt_type == "kvm": self._set_kvm_timers(clk, os_type, image_meta) def _set_kvm_timers(self, clk, os_type, image_meta): # TODO(berrange) One day this should be per-guest # OS type configurable tmpit = vconfig.LibvirtConfigGuestTimer() tmpit.name = "pit" tmpit.tickpolicy = "delay" tmrtc = vconfig.LibvirtConfigGuestTimer() tmrtc.name = "rtc" tmrtc.tickpolicy = "catchup" clk.add_timer(tmpit) clk.add_timer(tmrtc) guestarch = libvirt_utils.get_arch(image_meta) if guestarch in (fields.Architecture.I686, fields.Architecture.X86_64): # NOTE(rfolco): HPET is a hardware timer for x86 arch. # qemu -no-hpet is not supported on non-x86 targets. tmhpet = vconfig.LibvirtConfigGuestTimer() tmhpet.name = "hpet" tmhpet.present = False clk.add_timer(tmhpet) # Provide Windows guests with the paravirtualized hyperv timer source. # This is the windows equiv of kvm-clock, allowing Windows # guests to accurately keep time. if os_type == 'windows': tmhyperv = vconfig.LibvirtConfigGuestTimer() tmhyperv.name = "hypervclock" tmhyperv.present = True clk.add_timer(tmhyperv) def _set_features(self, guest, os_type, caps, virt_type, image_meta): if virt_type == "xen": # PAE only makes sense in X86 if caps.host.cpu.arch in (fields.Architecture.I686, fields.Architecture.X86_64): guest.features.append(vconfig.LibvirtConfigGuestFeaturePAE()) if (virt_type not in ("lxc", "uml", "parallels", "xen") or (virt_type == "xen" and guest.os_type == fields.VMMode.HVM)): guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI()) guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC()) if (virt_type in ("qemu", "kvm") and os_type == 'windows'): hv = vconfig.LibvirtConfigGuestFeatureHyperV() hv.relaxed = True hv.spinlocks = True # Increase spinlock retries - value recommended by # KVM maintainers who certify Windows guests # with Microsoft hv.spinlock_retries = 8191 hv.vapic = True guest.features.append(hv) if (virt_type in ("qemu", "kvm") and image_meta.properties.get('img_hide_hypervisor_id')): guest.features.append(vconfig.LibvirtConfigGuestFeatureKvmHidden()) def _check_number_of_serial_console(self, num_ports): virt_type = CONF.libvirt.virt_type if (virt_type in ("kvm", "qemu") and num_ports > ALLOWED_QEMU_SERIAL_PORTS): raise exception.SerialPortNumberLimitExceeded( allowed=ALLOWED_QEMU_SERIAL_PORTS, virt_type=virt_type) def _add_video_driver(self, guest, image_meta, flavor): VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl", "virtio") video = vconfig.LibvirtConfigGuestVideo() # NOTE(ldbragst): The following logic sets the video.type # depending on supported defaults given the architecture, # virtualization type, and features. The video.type attribute can # be overridden by the user with image_meta.properties, which # is carried out in the next if statement below this one. guestarch = libvirt_utils.get_arch(image_meta) if guest.os_type == fields.VMMode.XEN: video.type = 'xen' elif CONF.libvirt.virt_type == 'parallels': video.type = 'vga' elif guestarch in (fields.Architecture.PPC, fields.Architecture.PPC64, fields.Architecture.PPC64LE): # NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default # so use 'vga' instead when running on Power hardware. video.type = 'vga' elif guestarch in (fields.Architecture.AARCH64): # NOTE(kevinz): Only virtio device type is supported by AARCH64 # so use 'virtio' instead when running on AArch64 hardware. video.type = 'virtio' elif CONF.spice.enabled: video.type = 'qxl' if image_meta.properties.get('hw_video_model'): video.type = image_meta.properties.hw_video_model if (video.type not in VALID_VIDEO_DEVICES): raise exception.InvalidVideoMode(model=video.type) # Set video memory, only if the flavor's limit is set video_ram = image_meta.properties.get('hw_video_ram', 0) max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0)) if video_ram > max_vram: raise exception.RequestedVRamTooHigh(req_vram=video_ram, max_vram=max_vram) if max_vram and video_ram: video.vram = video_ram * units.Mi / units.Ki guest.add_device(video) def _add_qga_device(self, guest, instance): qga = vconfig.LibvirtConfigGuestChannel() qga.type = "unix" qga.target_name = "org.qemu.guest_agent.0" qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" % ("org.qemu.guest_agent.0", instance.name)) guest.add_device(qga) def _add_rng_device(self, guest, flavor): rng_device = vconfig.LibvirtConfigGuestRng() rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0) period = flavor.extra_specs.get('hw_rng:rate_period', 0) if rate_bytes: rng_device.rate_bytes = int(rate_bytes) rng_device.rate_period = int(period) rng_path = CONF.libvirt.rng_dev_path if (rng_path and not os.path.exists(rng_path)): raise exception.RngDeviceNotExist(path=rng_path) rng_device.backend = rng_path guest.add_device(rng_device) def _set_qemu_guest_agent(self, guest, flavor, instance, image_meta): # Enable qga only if the 'hw_qemu_guest_agent' is equal to yes if image_meta.properties.get('hw_qemu_guest_agent', False): LOG.debug("Qemu guest agent is enabled through image " "metadata", instance=instance) self._add_qga_device(guest, instance) rng_is_virtio = image_meta.properties.get('hw_rng_model') == 'virtio' rng_allowed_str = flavor.extra_specs.get('hw_rng:allowed', '') rng_allowed = strutils.bool_from_string(rng_allowed_str) if rng_is_virtio and rng_allowed: self._add_rng_device(guest, flavor) def _get_guest_memory_backing_config( self, inst_topology, numatune, flavor): wantsmempages = False if inst_topology: for cell in inst_topology.cells: if cell.pagesize: wantsmempages = True break wantsrealtime = hardware.is_realtime_enabled(flavor) membacking = None if wantsmempages: pages = self._get_memory_backing_hugepages_support( inst_topology, numatune) if pages: membacking = vconfig.LibvirtConfigGuestMemoryBacking() membacking.hugepages = pages if wantsrealtime: if not membacking: membacking = vconfig.LibvirtConfigGuestMemoryBacking() membacking.locked = True membacking.sharedpages = False return membacking def _get_memory_backing_hugepages_support(self, inst_topology, numatune): if not self._has_numa_support(): # We should not get here, since we should have avoided # reporting NUMA topology from _get_host_numa_topology # in the first place. Just in case of a scheduler # mess up though, raise an exception raise exception.MemoryPagesUnsupported() host_topology = self._get_host_numa_topology() if host_topology is None: # As above, we should not get here but just in case... raise exception.MemoryPagesUnsupported() # Currently libvirt does not support the smallest # pagesize set as a backend memory. # https://bugzilla.redhat.com/show_bug.cgi?id=1173507 avail_pagesize = [page.size_kb for page in host_topology.cells[0].mempages] avail_pagesize.sort() smallest = avail_pagesize[0] pages = [] for guest_cellid, inst_cell in enumerate(inst_topology.cells): if inst_cell.pagesize and inst_cell.pagesize > smallest: for memnode in numatune.memnodes: if guest_cellid == memnode.cellid: page = ( vconfig.LibvirtConfigGuestMemoryBackingPage()) page.nodeset = [guest_cellid] page.size_kb = inst_cell.pagesize pages.append(page) break # Quit early... return pages def _get_flavor(self, ctxt, instance, flavor): if flavor is not None: return flavor return instance.flavor def _has_uefi_support(self): # This means that the host can support uefi booting for guests supported_archs = [fields.Architecture.X86_64, fields.Architecture.AARCH64] caps = self._host.get_capabilities() return ((caps.host.cpu.arch in supported_archs) and os.path.exists(DEFAULT_UEFI_LOADER_PATH[caps.host.cpu.arch])) def _get_supported_perf_events(self): if (len(CONF.libvirt.enabled_perf_events) == 0 or not self._host.has_min_version(MIN_LIBVIRT_PERF_VERSION)): return [] supported_events = [] host_cpu_info = self._get_cpu_info() for event in CONF.libvirt.enabled_perf_events: if self._supported_perf_event(event, host_cpu_info['features']): supported_events.append(event) return supported_events def _supported_perf_event(self, event, cpu_features): libvirt_perf_event_name = LIBVIRT_PERF_EVENT_PREFIX + event.upper() if not hasattr(libvirt, libvirt_perf_event_name): LOG.warning("Libvirt doesn't support event type %s.", event) return False if (event in PERF_EVENTS_CPU_FLAG_MAPPING and PERF_EVENTS_CPU_FLAG_MAPPING[event] not in cpu_features): LOG.warning("Host does not support event type %s.", event) return False return True def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance, image_meta, flavor, root_device_name): if virt_type == "xen": if guest.os_type == fields.VMMode.HVM: guest.os_loader = CONF.libvirt.xen_hvmloader_path else: guest.os_cmdline = CONSOLE elif virt_type in ("kvm", "qemu"): if caps.host.cpu.arch in (fields.Architecture.I686, fields.Architecture.X86_64): guest.sysinfo = self._get_guest_config_sysinfo(instance) guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS() hw_firmware_type = image_meta.properties.get('hw_firmware_type') if caps.host.cpu.arch == fields.Architecture.AARCH64: if not hw_firmware_type: hw_firmware_type = fields.FirmwareType.UEFI if hw_firmware_type == fields.FirmwareType.UEFI: if self._has_uefi_support(): global uefi_logged if not uefi_logged: LOG.warning("uefi support is without some kind of " "functional testing and therefore " "considered experimental.") uefi_logged = True guest.os_loader = DEFAULT_UEFI_LOADER_PATH[ caps.host.cpu.arch] guest.os_loader_type = "pflash" else: raise exception.UEFINotSupported() guest.os_mach_type = self._get_machine_type(image_meta, caps) if image_meta.properties.get('hw_boot_menu') is None: guest.os_bootmenu = strutils.bool_from_string( flavor.extra_specs.get('hw:boot_menu', 'no')) else: guest.os_bootmenu = image_meta.properties.hw_boot_menu elif virt_type == "lxc": guest.os_init_path = "/sbin/init" guest.os_cmdline = CONSOLE elif virt_type == "uml": guest.os_kernel = "/usr/bin/linux" guest.os_root = root_device_name elif virt_type == "parallels": if guest.os_type == fields.VMMode.EXE: guest.os_init_path = "/sbin/init" def _conf_non_lxc_uml(self, virt_type, guest, root_device_name, rescue, instance, inst_path, image_meta, disk_info): if rescue: self._set_guest_for_rescue(rescue, guest, inst_path, virt_type, root_device_name) elif instance.kernel_id: self._set_guest_for_inst_kernel(instance, guest, inst_path, virt_type, root_device_name, image_meta) else: guest.os_boot_dev = blockinfo.get_boot_order(disk_info) def _create_consoles(self, virt_type, guest_cfg, instance, flavor, image_meta): # NOTE(markus_z): Beware! Below are so many conditionals that it is # easy to lose track. Use this chart to figure out your case: # # case | is serial | has | is qemu | resulting # | enabled? | virtlogd? | or kvm? | devices # -------------------------------------------------- # 1 | no | no | no | pty* # 2 | no | no | yes | file + pty # 3 | no | yes | no | see case 1 # 4 | no | yes | yes | pty with logd # 5 | yes | no | no | see case 1 # 6 | yes | no | yes | tcp + pty # 7 | yes | yes | no | see case 1 # 8 | yes | yes | yes | tcp with logd # * exception: virt_type "parallels" doesn't create a device if virt_type == 'parallels': pass elif virt_type not in ("qemu", "kvm"): log_path = self._get_console_log_path(instance) self._create_pty_device(guest_cfg, vconfig.LibvirtConfigGuestConsole, log_path=log_path) elif (virt_type in ("qemu", "kvm") and self._is_s390x_guest(image_meta)): self._create_consoles_s390x(guest_cfg, instance, flavor, image_meta) elif virt_type in ("qemu", "kvm"): self._create_consoles_qemu_kvm(guest_cfg, instance, flavor, image_meta) def _is_s390x_guest(self, image_meta): s390x_archs = (fields.Architecture.S390, fields.Architecture.S390X) return libvirt_utils.get_arch(image_meta) in s390x_archs def _create_consoles_qemu_kvm(self, guest_cfg, instance, flavor, image_meta): char_dev_cls = vconfig.LibvirtConfigGuestSerial log_path = self._get_console_log_path(instance) if CONF.serial_console.enabled: if not self._serial_ports_already_defined(instance): num_ports = hardware.get_number_of_serial_ports(flavor, image_meta) self._check_number_of_serial_console(num_ports) self._create_serial_consoles(guest_cfg, num_ports, char_dev_cls, log_path) else: self._create_file_device(guest_cfg, instance, char_dev_cls) self._create_pty_device(guest_cfg, char_dev_cls, log_path=log_path) def _create_consoles_s390x(self, guest_cfg, instance, flavor, image_meta): char_dev_cls = vconfig.LibvirtConfigGuestConsole log_path = self._get_console_log_path(instance) if CONF.serial_console.enabled: if not self._serial_ports_already_defined(instance): num_ports = hardware.get_number_of_serial_ports(flavor, image_meta) self._create_serial_consoles(guest_cfg, num_ports, char_dev_cls, log_path) else: self._create_file_device(guest_cfg, instance, char_dev_cls, "sclplm") self._create_pty_device(guest_cfg, char_dev_cls, "sclp", log_path) def _create_pty_device(self, guest_cfg, char_dev_cls, target_type=None, log_path=None): def _create_base_dev(): consolepty = char_dev_cls() consolepty.target_type = target_type consolepty.type = "pty" return consolepty def _create_logd_dev(): consolepty = _create_base_dev() log = vconfig.LibvirtConfigGuestCharDeviceLog() log.file = log_path consolepty.log = log return consolepty if CONF.serial_console.enabled: if self._is_virtlogd_available(): return else: # NOTE(markus_z): You may wonder why this is necessary and # so do I. I'm certain that this is *not* needed in any # real use case. It is, however, useful if you want to # pypass the Nova API and use "virsh console <guest>" on # an hypervisor, as this CLI command doesn't work with TCP # devices (like the serial console is). # https://bugzilla.redhat.com/show_bug.cgi?id=781467 # Pypassing the Nova API however is a thing we don't want. # Future changes should remove this and fix the unit tests # which ask for the existence. guest_cfg.add_device(_create_base_dev()) else: if self._is_virtlogd_available(): guest_cfg.add_device(_create_logd_dev()) else: guest_cfg.add_device(_create_base_dev()) def _create_file_device(self, guest_cfg, instance, char_dev_cls, target_type=None): if self._is_virtlogd_available(): return consolelog = char_dev_cls() consolelog.target_type = target_type consolelog.type = "file" consolelog.source_path = self._get_console_log_path(instance) guest_cfg.add_device(consolelog) def _serial_ports_already_defined(self, instance): try: guest = self._host.get_guest(instance) if list(self._get_serial_ports_from_guest(guest)): # Serial port are already configured for instance that # means we are in a context of migration. return True except exception.InstanceNotFound: LOG.debug( "Instance does not exist yet on libvirt, we can " "safely pass on looking for already defined serial " "ports in its domain XML", instance=instance) return False def _create_serial_consoles(self, guest_cfg, num_ports, char_dev_cls, log_path): for port in six.moves.range(num_ports): console = char_dev_cls() console.port = port console.type = "tcp" console.listen_host = CONF.serial_console.proxyclient_address listen_port = serial_console.acquire_port(console.listen_host) console.listen_port = listen_port # NOTE: only the first serial console gets the boot messages, # that's why we attach the logd subdevice only to that. if port == 0 and self._is_virtlogd_available(): log = vconfig.LibvirtConfigGuestCharDeviceLog() log.file = log_path console.log = log guest_cfg.add_device(console) def _cpu_config_to_vcpu_model(self, cpu_config, vcpu_model): """Update VirtCPUModel object according to libvirt CPU config. :param:cpu_config: vconfig.LibvirtConfigGuestCPU presenting the instance's virtual cpu configuration. :param:vcpu_model: VirtCPUModel object. A new object will be created if None. :return: Updated VirtCPUModel object, or None if cpu_config is None """ if not cpu_config: return if not vcpu_model: vcpu_model = objects.VirtCPUModel() vcpu_model.arch = cpu_config.arch vcpu_model.vendor = cpu_config.vendor vcpu_model.model = cpu_config.model vcpu_model.mode = cpu_config.mode vcpu_model.match = cpu_config.match if cpu_config.sockets: vcpu_model.topology = objects.VirtCPUTopology( sockets=cpu_config.sockets, cores=cpu_config.cores, threads=cpu_config.threads) else: vcpu_model.topology = None features = [objects.VirtCPUFeature( name=f.name, policy=f.policy) for f in cpu_config.features] vcpu_model.features = features return vcpu_model def _vcpu_model_to_cpu_config(self, vcpu_model): """Create libvirt CPU config according to VirtCPUModel object. :param:vcpu_model: VirtCPUModel object. :return: vconfig.LibvirtConfigGuestCPU. """ cpu_config = vconfig.LibvirtConfigGuestCPU() cpu_config.arch = vcpu_model.arch cpu_config.model = vcpu_model.model cpu_config.mode = vcpu_model.mode cpu_config.match = vcpu_model.match cpu_config.vendor = vcpu_model.vendor if vcpu_model.topology: cpu_config.sockets = vcpu_model.topology.sockets cpu_config.cores = vcpu_model.topology.cores cpu_config.threads = vcpu_model.topology.threads if vcpu_model.features: for f in vcpu_model.features: xf = vconfig.LibvirtConfigGuestCPUFeature() xf.name = f.name xf.policy = f.policy cpu_config.features.add(xf) return cpu_config def _guest_add_pcie_root_ports(self, guest): """Add PCI Express root ports. PCI Express machine can have as many PCIe devices as it has pcie-root-port controllers (slots in virtual motherboard). If we want to have more PCIe slots for hotplug then we need to create whole PCIe structure (libvirt limitation). """ pcieroot = vconfig.LibvirtConfigGuestPCIeRootController() guest.add_device(pcieroot) for x in range(0, CONF.libvirt.num_pcie_ports): pcierootport = vconfig.LibvirtConfigGuestPCIeRootPortController() guest.add_device(pcierootport) def _guest_add_usb_host_keyboard(self, guest): """Add USB Host controller and keyboard for graphical console use. Add USB keyboard as PS/2 support may not be present on non-x86 architectures. """ keyboard = vconfig.LibvirtConfigGuestInput() keyboard.type = "keyboard" keyboard.bus = "usb" guest.add_device(keyboard) usbhost = vconfig.LibvirtConfigGuestUSBHostController() usbhost.index = 0 guest.add_device(usbhost) def _get_guest_config(self, instance, network_info, image_meta, disk_info, rescue=None, block_device_info=None, context=None, mdevs=None): """Get config data for parameters. :param rescue: optional dictionary that should contain the key 'ramdisk_id' if a ramdisk is needed for the rescue image and 'kernel_id' if a kernel is needed for the rescue image. :param mdevs: optional list of mediated devices to assign to the guest. """ flavor = instance.flavor inst_path = libvirt_utils.get_instance_path(instance) disk_mapping = disk_info['mapping'] virt_type = CONF.libvirt.virt_type guest = vconfig.LibvirtConfigGuest() guest.virt_type = virt_type guest.name = instance.name guest.uuid = instance.uuid # We are using default unit for memory: KiB guest.memory = flavor.memory_mb * units.Ki guest.vcpus = flavor.vcpus allowed_cpus = hardware.get_vcpu_pin_set() guest_numa_config = self._get_guest_numa_config( instance.numa_topology, flavor, allowed_cpus, image_meta) guest.cpuset = guest_numa_config.cpuset guest.cputune = guest_numa_config.cputune guest.numatune = guest_numa_config.numatune guest.membacking = self._get_guest_memory_backing_config( instance.numa_topology, guest_numa_config.numatune, flavor) guest.metadata.append(self._get_guest_config_meta(instance)) guest.idmaps = self._get_guest_idmaps() for event in self._supported_perf_events: guest.add_perf_event(event) self._update_guest_cputune(guest, flavor, virt_type) guest.cpu = self._get_guest_cpu_config( flavor, image_meta, guest_numa_config.numaconfig, instance.numa_topology) # Notes(yjiang5): we always sync the instance's vcpu model with # the corresponding config file. instance.vcpu_model = self._cpu_config_to_vcpu_model( guest.cpu, instance.vcpu_model) if 'root' in disk_mapping: root_device_name = block_device.prepend_dev( disk_mapping['root']['dev']) else: root_device_name = None if root_device_name: instance.root_device_name = root_device_name guest.os_type = (fields.VMMode.get_from_instance(instance) or self._get_guest_os_type(virt_type)) caps = self._host.get_capabilities() self._configure_guest_by_virt_type(guest, virt_type, caps, instance, image_meta, flavor, root_device_name) if virt_type not in ('lxc', 'uml'): self._conf_non_lxc_uml(virt_type, guest, root_device_name, rescue, instance, inst_path, image_meta, disk_info) self._set_features(guest, instance.os_type, caps, virt_type, image_meta) self._set_clock(guest, instance.os_type, image_meta, virt_type) storage_configs = self._get_guest_storage_config(context, instance, image_meta, disk_info, rescue, block_device_info, flavor, guest.os_type) for config in storage_configs: guest.add_device(config) for vif in network_info: config = self.vif_driver.get_config( instance, vif, image_meta, flavor, virt_type, self._host) guest.add_device(config) self._create_consoles(virt_type, guest, instance, flavor, image_meta) pointer = self._get_guest_pointer_model(guest.os_type, image_meta) if pointer: guest.add_device(pointer) self._guest_add_spice_channel(guest) if self._guest_add_video_device(guest): self._add_video_driver(guest, image_meta, flavor) # We want video == we want graphical console. Some architectures # do not have input devices attached in default configuration. # Let then add USB Host controller and USB keyboard. # x86(-64) and ppc64 have usb host controller and keyboard # s390x does not support USB if caps.host.cpu.arch == fields.Architecture.AARCH64: self._guest_add_usb_host_keyboard(guest) # Qemu guest agent only support 'qemu' and 'kvm' hypervisor if virt_type in ('qemu', 'kvm'): self._set_qemu_guest_agent(guest, flavor, instance, image_meta) # Add PCIe root port controllers for PCI Express machines # but only if their amount is configured if (CONF.libvirt.num_pcie_ports and ((caps.host.cpu.arch == fields.Architecture.AARCH64 and guest.os_mach_type.startswith('virt')) or (caps.host.cpu.arch == fields.Architecture.X86_64 and guest.os_mach_type is not None and 'q35' in guest.os_mach_type))): self._guest_add_pcie_root_ports(guest) self._guest_add_pci_devices(guest, instance) self._guest_add_watchdog_action(guest, flavor, image_meta) self._guest_add_memory_balloon(guest) if mdevs: self._guest_add_mdevs(guest, mdevs) return guest def _guest_add_mdevs(self, guest, chosen_mdevs): for chosen_mdev in chosen_mdevs: mdev = vconfig.LibvirtConfigGuestHostdevMDEV() mdev.uuid = chosen_mdev guest.add_device(mdev) @staticmethod def _guest_add_spice_channel(guest): if (CONF.spice.enabled and CONF.spice.agent_enabled and guest.virt_type not in ('lxc', 'uml', 'xen')): channel = vconfig.LibvirtConfigGuestChannel() channel.type = 'spicevmc' channel.target_name = "com.redhat.spice.0" guest.add_device(channel) @staticmethod def _guest_add_memory_balloon(guest): virt_type = guest.virt_type # Memory balloon device only support 'qemu/kvm' and 'xen' hypervisor if (virt_type in ('xen', 'qemu', 'kvm') and CONF.libvirt.mem_stats_period_seconds > 0): balloon = vconfig.LibvirtConfigMemoryBalloon() if virt_type in ('qemu', 'kvm'): balloon.model = 'virtio' else: balloon.model = 'xen' balloon.period = CONF.libvirt.mem_stats_period_seconds guest.add_device(balloon) @staticmethod def _guest_add_watchdog_action(guest, flavor, image_meta): # image meta takes precedence over flavor extra specs; disable the # watchdog action by default watchdog_action = (flavor.extra_specs.get('hw:watchdog_action') or 'disabled') watchdog_action = image_meta.properties.get('hw_watchdog_action', watchdog_action) # NB(sross): currently only actually supported by KVM/QEmu if watchdog_action != 'disabled': if watchdog_action in fields.WatchdogAction.ALL: bark = vconfig.LibvirtConfigGuestWatchdog() bark.action = watchdog_action guest.add_device(bark) else: raise exception.InvalidWatchdogAction(action=watchdog_action) def _guest_add_pci_devices(self, guest, instance): virt_type = guest.virt_type if virt_type in ('xen', 'qemu', 'kvm'): # Get all generic PCI devices (non-SR-IOV). for pci_dev in pci_manager.get_instance_pci_devs(instance): guest.add_device(self._get_guest_pci_device(pci_dev)) else: # PCI devices is only supported for hypervisors # 'xen', 'qemu' and 'kvm'. if pci_manager.get_instance_pci_devs(instance, 'all'): raise exception.PciDeviceUnsupportedHypervisor(type=virt_type) @staticmethod def _guest_add_video_device(guest): # NB some versions of libvirt support both SPICE and VNC # at the same time. We're not trying to second guess which # those versions are. We'll just let libvirt report the # errors appropriately if the user enables both. add_video_driver = False if CONF.vnc.enabled and guest.virt_type not in ('lxc', 'uml'): graphics = vconfig.LibvirtConfigGuestGraphics() graphics.type = "vnc" if CONF.vnc.keymap: graphics.keymap = CONF.vnc.keymap graphics.listen = CONF.vnc.server_listen guest.add_device(graphics) add_video_driver = True if CONF.spice.enabled and guest.virt_type not in ('lxc', 'uml', 'xen'): graphics = vconfig.LibvirtConfigGuestGraphics() graphics.type = "spice" if CONF.spice.keymap: graphics.keymap = CONF.spice.keymap graphics.listen = CONF.spice.server_listen guest.add_device(graphics) add_video_driver = True return add_video_driver def _get_guest_pointer_model(self, os_type, image_meta): pointer_model = image_meta.properties.get( 'hw_pointer_model', CONF.pointer_model) if pointer_model is None and CONF.libvirt.use_usb_tablet: # TODO(sahid): We set pointer_model to keep compatibility # until the next release O*. It means operators can continue # to use the deprecated option "use_usb_tablet" or set a # specific device to use pointer_model = "usbtablet" LOG.warning('The option "use_usb_tablet" has been ' 'deprecated for Newton in favor of the more ' 'generic "pointer_model". Please update ' 'nova.conf to address this change.') if pointer_model == "usbtablet": # We want a tablet if VNC is enabled, or SPICE is enabled and # the SPICE agent is disabled. If the SPICE agent is enabled # it provides a paravirt mouse which drastically reduces # overhead (by eliminating USB polling). if CONF.vnc.enabled or ( CONF.spice.enabled and not CONF.spice.agent_enabled): return self._get_guest_usb_tablet(os_type) else: if CONF.pointer_model or CONF.libvirt.use_usb_tablet: # For backward compatibility We don't want to break # process of booting an instance if host is configured # to use USB tablet without VNC or SPICE and SPICE # agent disable. LOG.warning('USB tablet requested for guests by host ' 'configuration. In order to accept this ' 'request VNC should be enabled or SPICE ' 'and SPICE agent disabled on host.') else: raise exception.UnsupportedPointerModelRequested( model="usbtablet") def _get_guest_usb_tablet(self, os_type): tablet = None if os_type == fields.VMMode.HVM: tablet = vconfig.LibvirtConfigGuestInput() tablet.type = "tablet" tablet.bus = "usb" else: if CONF.pointer_model or CONF.libvirt.use_usb_tablet: # For backward compatibility We don't want to break # process of booting an instance if virtual machine mode # is not configured as HVM. LOG.warning('USB tablet requested for guests by host ' 'configuration. In order to accept this ' 'request the machine mode should be ' 'configured as HVM.') else: raise exception.UnsupportedPointerModelRequested( model="usbtablet") return tablet def _get_guest_xml(self, context, instance, network_info, disk_info, image_meta, rescue=None, block_device_info=None, mdevs=None): # NOTE(danms): Stringifying a NetworkInfo will take a lock. Do # this ahead of time so that we don't acquire it while also # holding the logging lock. network_info_str = str(network_info) msg = ('Start _get_guest_xml ' 'network_info=%(network_info)s ' 'disk_info=%(disk_info)s ' 'image_meta=%(image_meta)s rescue=%(rescue)s ' 'block_device_info=%(block_device_info)s' % {'network_info': network_info_str, 'disk_info': disk_info, 'image_meta': image_meta, 'rescue': rescue, 'block_device_info': block_device_info}) # NOTE(mriedem): block_device_info can contain auth_password so we # need to sanitize the password in the message. LOG.debug(strutils.mask_password(msg), instance=instance) conf = self._get_guest_config(instance, network_info, image_meta, disk_info, rescue, block_device_info, context, mdevs) xml = conf.to_xml() LOG.debug('End _get_guest_xml xml=%(xml)s', {'xml': xml}, instance=instance) return xml def get_info(self, instance): """Retrieve information from libvirt for a specific instance. If a libvirt error is encountered during lookup, we might raise a NotFound exception or Error exception depending on how severe the libvirt error is. :param instance: nova.objects.instance.Instance object :returns: An InstanceInfo object """ guest = self._host.get_guest(instance) # Kind of ugly but we need to pass host to get_info as for a # workaround, see libvirt/compat.py return guest.get_info(self._host) def _create_domain_setup_lxc(self, context, instance, image_meta, block_device_info): inst_path = libvirt_utils.get_instance_path(instance) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) root_disk = block_device.get_root_bdm(block_device_mapping) if root_disk: self._connect_volume(context, root_disk['connection_info'], instance) disk_path = root_disk['connection_info']['data']['device_path'] # NOTE(apmelton) - Even though the instance is being booted from a # cinder volume, it is still presented as a local block device. # LocalBlockImage is used here to indicate that the instance's # disk is backed by a local block device. image_model = imgmodel.LocalBlockImage(disk_path) else: root_disk = self.image_backend.by_name(instance, 'disk') image_model = root_disk.get_model(self._conn) container_dir = os.path.join(inst_path, 'rootfs') fileutils.ensure_tree(container_dir) rootfs_dev = disk_api.setup_container(image_model, container_dir=container_dir) try: # Save rootfs device to disconnect it when deleting the instance if rootfs_dev: instance.system_metadata['rootfs_device_name'] = rootfs_dev if CONF.libvirt.uid_maps or CONF.libvirt.gid_maps: id_maps = self._get_guest_idmaps() libvirt_utils.chown_for_id_maps(container_dir, id_maps) except Exception: with excutils.save_and_reraise_exception(): self._create_domain_cleanup_lxc(instance) def _create_domain_cleanup_lxc(self, instance): inst_path = libvirt_utils.get_instance_path(instance) container_dir = os.path.join(inst_path, 'rootfs') try: state = self.get_info(instance).state except exception.InstanceNotFound: # The domain may not be present if the instance failed to start state = None if state == power_state.RUNNING: # NOTE(uni): Now the container is running with its own private # mount namespace and so there is no need to keep the container # rootfs mounted in the host namespace LOG.debug('Attempting to unmount container filesystem: %s', container_dir, instance=instance) disk_api.clean_lxc_namespace(container_dir=container_dir) else: disk_api.teardown_container(container_dir=container_dir) @contextlib.contextmanager def _lxc_disk_handler(self, context, instance, image_meta, block_device_info): """Context manager to handle the pre and post instance boot, LXC specific disk operations. An image or a volume path will be prepared and setup to be used by the container, prior to starting it. The disk will be disconnected and unmounted if a container has failed to start. """ if CONF.libvirt.virt_type != 'lxc': yield return self._create_domain_setup_lxc(context, instance, image_meta, block_device_info) try: yield finally: self._create_domain_cleanup_lxc(instance) # TODO(sahid): Consider renaming this to _create_guest. def _create_domain(self, xml=None, domain=None, power_on=True, pause=False, post_xml_callback=None): """Create a domain. Either domain or xml must be passed in. If both are passed, then the domain definition is overwritten from the xml. :returns guest.Guest: Guest just created """ if xml: guest = libvirt_guest.Guest.create(xml, self._host) if post_xml_callback is not None: post_xml_callback() else: guest = libvirt_guest.Guest(domain) if power_on or pause: guest.launch(pause=pause) if not utils.is_neutron(): guest.enable_hairpin() return guest def _neutron_failed_callback(self, event_name, instance): LOG.error('Neutron Reported failure on event ' '%(event)s for instance %(uuid)s', {'event': event_name, 'uuid': instance.uuid}, instance=instance) if CONF.vif_plugging_is_fatal: raise exception.VirtualInterfaceCreateException() def _neutron_failed_live_migration_callback(self, event_name, instance): msg = ('Neutron reported failure during live migration ' 'with %(event)s for instance %(uuid)s' % {'event': event_name, 'uuid': instance.uuid}) raise exception.MigrationError(reason=msg) def _get_neutron_events(self, network_info): # NOTE(danms): We need to collect any VIFs that are currently # down that we expect a down->up event for. Anything that is # already up will not undergo that transition, and for # anything that might be stale (cache-wise) assume it's # already up so we don't block on it. return [('network-vif-plugged', vif['id']) for vif in network_info if vif.get('active', True) is False] def _get_neutron_events_for_live_migration(self, network_info): # Neutron should send events to Nova indicating that the VIFs # are successfully plugged on destination host. # TODO(sahid): Currently we only use the mechanism of waiting # for neutron events during live-migration for linux-bridge. return [('network-vif-plugged', vif['id']) for vif in network_info if ( vif.get('type') == network_model.VIF_TYPE_BRIDGE)] def _cleanup_failed_start(self, context, instance, network_info, block_device_info, guest, destroy_disks): try: if guest and guest.is_active(): guest.poweroff() finally: self.cleanup(context, instance, network_info=network_info, block_device_info=block_device_info, destroy_disks=destroy_disks) def _create_domain_and_network(self, context, xml, instance, network_info, block_device_info=None, power_on=True, vifs_already_plugged=False, post_xml_callback=None, destroy_disks_on_failure=False): """Do required network setup and create domain.""" timeout = CONF.vif_plugging_timeout if (self._conn_supports_start_paused and utils.is_neutron() and not vifs_already_plugged and power_on and timeout): events = self._get_neutron_events(network_info) else: events = [] pause = bool(events) guest = None try: with self.virtapi.wait_for_instance_event( instance, events, deadline=timeout, error_callback=self._neutron_failed_callback): self.plug_vifs(instance, network_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) with self._lxc_disk_handler(context, instance, instance.image_meta, block_device_info): guest = self._create_domain( xml, pause=pause, power_on=power_on, post_xml_callback=post_xml_callback) self.firewall_driver.apply_instance_filter(instance, network_info) except exception.VirtualInterfaceCreateException: # Neutron reported failure and we didn't swallow it, so # bail here with excutils.save_and_reraise_exception(): self._cleanup_failed_start(context, instance, network_info, block_device_info, guest, destroy_disks_on_failure) except eventlet.timeout.Timeout: # We never heard from Neutron LOG.warning('Timeout waiting for %(events)s for ' 'instance with vm_state %(vm_state)s and ' 'task_state %(task_state)s.', {'events': events, 'vm_state': instance.vm_state, 'task_state': instance.task_state}, instance=instance) if CONF.vif_plugging_is_fatal: self._cleanup_failed_start(context, instance, network_info, block_device_info, guest, destroy_disks_on_failure) raise exception.VirtualInterfaceCreateException() except Exception: # Any other error, be sure to clean up LOG.error('Failed to start libvirt guest', instance=instance) with excutils.save_and_reraise_exception(): self._cleanup_failed_start(context, instance, network_info, block_device_info, guest, destroy_disks_on_failure) # Resume only if domain has been paused if pause: guest.resume() return guest def _get_vcpu_total(self): """Get available vcpu number of physical computer. :returns: the number of cpu core instances can be used. """ try: total_pcpus = self._host.get_cpu_count() except libvirt.libvirtError: LOG.warning("Cannot get the number of cpu, because this " "function is not implemented for this platform. ") return 0 if not CONF.vcpu_pin_set: return total_pcpus available_ids = hardware.get_vcpu_pin_set() # We get the list of online CPUs on the host and see if the requested # set falls under these. If not, we retain the old behavior. online_pcpus = None try: online_pcpus = self._host.get_online_cpus() except libvirt.libvirtError as ex: error_code = ex.get_error_code() err_msg = encodeutils.exception_to_unicode(ex) LOG.warning( "Couldn't retrieve the online CPUs due to a Libvirt " "error: %(error)s with error code: %(error_code)s", {'error': err_msg, 'error_code': error_code}) if online_pcpus: if not (available_ids <= online_pcpus): msg = (_("Invalid vcpu_pin_set config, one or more of the " "specified cpuset is not online. Online cpuset(s): " "%(online)s, requested cpuset(s): %(req)s"), {'online': sorted(online_pcpus), 'req': sorted(available_ids)}) raise exception.Invalid(msg) elif sorted(available_ids)[-1] >= total_pcpus: raise exception.Invalid(_("Invalid vcpu_pin_set config, " "out of hypervisor cpu range.")) return len(available_ids) @staticmethod def _get_local_gb_info(): """Get local storage info of the compute node in GB. :returns: A dict containing: :total: How big the overall usable filesystem is (in gigabytes) :free: How much space is free (in gigabytes) :used: How much space is used (in gigabytes) """ if CONF.libvirt.images_type == 'lvm': info = lvm.get_volume_group_info( CONF.libvirt.images_volume_group) elif CONF.libvirt.images_type == 'rbd': info = LibvirtDriver._get_rbd_driver().get_pool_info() else: info = libvirt_utils.get_fs_info(CONF.instances_path) for (k, v) in info.items(): info[k] = v / units.Gi return info def _get_vcpu_used(self): """Get vcpu usage number of physical computer. :returns: The total number of vcpu(s) that are currently being used. """ total = 0 # Not all libvirt drivers will support the get_vcpus_info() # # For example, LXC does not have a concept of vCPUs, while # QEMU (TCG) traditionally handles all vCPUs in a single # thread. So both will report an exception when the vcpus() # API call is made. In such a case we should report the # guest as having 1 vCPU, since that lets us still do # CPU over commit calculations that apply as the total # guest count scales. # # It is also possible that we might see an exception if # the guest is just in middle of shutting down. Technically # we should report 0 for vCPU usage in this case, but we # we can't reliably distinguish the vcpu not supported # case from the just shutting down case. Thus we don't know # whether to report 1 or 0 for vCPU count. # # Under-reporting vCPUs is bad because it could conceivably # let the scheduler place too many guests on the host. Over- # reporting vCPUs is not a problem as it'll auto-correct on # the next refresh of usage data. # # Thus when getting an exception we always report 1 as the # vCPU count, as the least worst value. for guest in self._host.list_guests(): try: vcpus = guest.get_vcpus_info() total += len(list(vcpus)) except libvirt.libvirtError: total += 1 # NOTE(gtt116): give other tasks a chance. greenthread.sleep(0) return total def _get_supported_vgpu_types(self): if not CONF.devices.enabled_vgpu_types: return [] # TODO(sbauza): Move this check up to compute_manager.init_host if len(CONF.devices.enabled_vgpu_types) > 1: LOG.warning('libvirt only supports one GPU type per compute node,' ' only first type will be used.') requested_types = CONF.devices.enabled_vgpu_types[:1] return requested_types def _get_vgpu_total(self): """Returns the number of total available vGPUs for any GPU type that is enabled with the enabled_vgpu_types CONF option. """ requested_types = self._get_supported_vgpu_types() # Bail out early if operator doesn't care about providing vGPUs if not requested_types: return 0 # Filter how many available mdevs we can create for all the supported # types. mdev_capable_devices = self._get_mdev_capable_devices(requested_types) vgpus = 0 for dev in mdev_capable_devices: for _type in dev['types']: vgpus += dev['types'][_type]['availableInstances'] # Count the already created (but possibly not assigned to a guest) # mdevs for all the supported types mediated_devices = self._get_mediated_devices(requested_types) vgpus += len(mediated_devices) return vgpus def _get_instance_capabilities(self): """Get hypervisor instance capabilities Returns a list of tuples that describe instances the hypervisor is capable of hosting. Each tuple consists of the triplet (arch, hypervisor_type, vm_mode). Supported hypervisor_type is filtered by virt_type, a parameter set by operators via `nova.conf`. :returns: List of tuples describing instance capabilities """ caps = self._host.get_capabilities() instance_caps = list() for g in caps.guests: for dt in g.domtype: if dt != CONF.libvirt.virt_type: continue instance_cap = ( fields.Architecture.canonicalize(g.arch), fields.HVType.canonicalize(dt), fields.VMMode.canonicalize(g.ostype)) instance_caps.append(instance_cap) return instance_caps def _get_cpu_info(self): """Get cpuinfo information. Obtains cpu feature from virConnect.getCapabilities. :return: see above description """ caps = self._host.get_capabilities() cpu_info = dict() cpu_info['arch'] = caps.host.cpu.arch cpu_info['model'] = caps.host.cpu.model cpu_info['vendor'] = caps.host.cpu.vendor topology = dict() topology['cells'] = len(getattr(caps.host.topology, 'cells', [1])) topology['sockets'] = caps.host.cpu.sockets topology['cores'] = caps.host.cpu.cores topology['threads'] = caps.host.cpu.threads cpu_info['topology'] = topology features = set() for f in caps.host.cpu.features: features.add(f.name) cpu_info['features'] = features return cpu_info def _get_pcinet_info(self, vf_address): """Returns a dict of NET device.""" devname = pci_utils.get_net_name_by_vf_pci_address(vf_address) if not devname: return virtdev = self._host.device_lookup_by_name(devname) xmlstr = virtdev.XMLDesc(0) cfgdev = vconfig.LibvirtConfigNodeDevice() cfgdev.parse_str(xmlstr) return {'name': cfgdev.name, 'capabilities': cfgdev.pci_capability.features} def _get_pcidev_info(self, devname): """Returns a dict of PCI device.""" def _get_device_type(cfgdev, pci_address): """Get a PCI device's device type. An assignable PCI device can be a normal PCI device, a SR-IOV Physical Function (PF), or a SR-IOV Virtual Function (VF). Only normal PCI devices or SR-IOV VFs are assignable, while SR-IOV PFs are always owned by hypervisor. """ for fun_cap in cfgdev.pci_capability.fun_capability: if fun_cap.type == 'virt_functions': return { 'dev_type': fields.PciDeviceType.SRIOV_PF, } if (fun_cap.type == 'phys_function' and len(fun_cap.device_addrs) != 0): phys_address = "%04x:%02x:%02x.%01x" % ( fun_cap.device_addrs[0][0], fun_cap.device_addrs[0][1], fun_cap.device_addrs[0][2], fun_cap.device_addrs[0][3]) return { 'dev_type': fields.PciDeviceType.SRIOV_VF, 'parent_addr': phys_address, } # Note(moshele): libvirt < 1.3 reported virt_functions capability # only when VFs are enabled. The check below is a workaround # to get the correct report regardless of whether or not any # VFs are enabled for the device. if not self._host.has_min_version( MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION): is_physical_function = pci_utils.is_physical_function( *pci_utils.get_pci_address_fields(pci_address)) if is_physical_function: return {'dev_type': fields.PciDeviceType.SRIOV_PF} return {'dev_type': fields.PciDeviceType.STANDARD} def _get_device_capabilities(device, address): """Get PCI VF device's additional capabilities. If a PCI device is a virtual function, this function reads the PCI parent's network capabilities (must be always a NIC device) and appends this information to the device's dictionary. """ if device.get('dev_type') == fields.PciDeviceType.SRIOV_VF: pcinet_info = self._get_pcinet_info(address) if pcinet_info: return {'capabilities': {'network': pcinet_info.get('capabilities')}} return {} virtdev = self._host.device_lookup_by_name(devname) xmlstr = virtdev.XMLDesc(0) cfgdev = vconfig.LibvirtConfigNodeDevice() cfgdev.parse_str(xmlstr) address = "%04x:%02x:%02x.%1x" % ( cfgdev.pci_capability.domain, cfgdev.pci_capability.bus, cfgdev.pci_capability.slot, cfgdev.pci_capability.function) device = { "dev_id": cfgdev.name, "address": address, "product_id": "%04x" % cfgdev.pci_capability.product_id, "vendor_id": "%04x" % cfgdev.pci_capability.vendor_id, } device["numa_node"] = cfgdev.pci_capability.numa_node # requirement by DataBase Model device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device device.update(_get_device_type(cfgdev, address)) device.update(_get_device_capabilities(device, address)) return device def _get_pci_passthrough_devices(self): """Get host PCI devices information. Obtains pci devices information from libvirt, and returns as a JSON string. Each device information is a dictionary, with mandatory keys of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id', 'label' and other optional device specific information. Refer to the objects/pci_device.py for more idea of these keys. :returns: a JSON string containing a list of the assignable PCI devices information """ # Bail early if we know we can't support `listDevices` to avoid # repeated warnings within a periodic task if not getattr(self, '_list_devices_supported', True): return jsonutils.dumps([]) try: dev_names = self._host.list_pci_devices() or [] except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_SUPPORT: self._list_devices_supported = False LOG.warning("URI %(uri)s does not support " "listDevices: %(error)s", {'uri': self._uri(), 'error': encodeutils.exception_to_unicode(ex)}) return jsonutils.dumps([]) else: raise pci_info = [] for name in dev_names: pci_info.append(self._get_pcidev_info(name)) return jsonutils.dumps(pci_info) def _get_mdev_capabilities_for_dev(self, devname, types=None): """Returns a dict of MDEV capable device with the ID as first key and then a list of supported types, each of them being a dict. :param types: Only return those specific types. """ virtdev = self._host.device_lookup_by_name(devname) xmlstr = virtdev.XMLDesc(0) cfgdev = vconfig.LibvirtConfigNodeDevice() cfgdev.parse_str(xmlstr) device = { "dev_id": cfgdev.name, "types": {}, } for mdev_cap in cfgdev.pci_capability.mdev_capability: for cap in mdev_cap.mdev_types: if not types or cap['type'] in types: device["types"].update({cap['type']: { 'availableInstances': cap['availableInstances'], 'name': cap['name'], 'deviceAPI': cap['deviceAPI']}}) return device def _get_mdev_capable_devices(self, types=None): """Get host devices supporting mdev types. Obtain devices information from libvirt and returns a list of dictionaries. :param types: Filter only devices supporting those types. """ if not self._host.has_min_version(MIN_LIBVIRT_MDEV_SUPPORT): return [] dev_names = self._host.list_mdev_capable_devices() or [] mdev_capable_devices = [] for name in dev_names: device = self._get_mdev_capabilities_for_dev(name, types) if not device["types"]: continue mdev_capable_devices.append(device) return mdev_capable_devices def _get_mediated_device_information(self, devname): """Returns a dict of a mediated device.""" virtdev = self._host.device_lookup_by_name(devname) xmlstr = virtdev.XMLDesc(0) cfgdev = vconfig.LibvirtConfigNodeDevice() cfgdev.parse_str(xmlstr) device = { "dev_id": cfgdev.name, # name is like mdev_00ead764_fdc0_46b6_8db9_2963f5c815b4 "uuid": str(uuid.UUID(cfgdev.name[5:].replace('_', '-'))), "type": cfgdev.mdev_information.type, "iommu_group": cfgdev.mdev_information.iommu_group, } return device def _get_mediated_devices(self, types=None): """Get host mediated devices. Obtain devices information from libvirt and returns a list of dictionaries. :param types: Filter only devices supporting those types. """ if not self._host.has_min_version(MIN_LIBVIRT_MDEV_SUPPORT): return [] dev_names = self._host.list_mediated_devices() or [] mediated_devices = [] for name in dev_names: device = self._get_mediated_device_information(name) if not types or device["type"] in types: mediated_devices.append(device) return mediated_devices def _get_all_assigned_mediated_devices(self, instance=None): """Lookup all instances from the host and return all the mediated devices that are assigned to a guest. :param instance: Only return mediated devices for that instance. :returns: A dictionary of keys being mediated device UUIDs and their respective values the instance UUID of the guest using it. """ allocated_mdevs = {} if instance: guest = self._host.get_guest(instance) guests = [guest] else: guests = self._host.list_guests(only_running=False) for guest in guests: cfg = guest.get_config() for device in cfg.devices: if isinstance(device, vconfig.LibvirtConfigGuestHostdevMDEV): allocated_mdevs[device.uuid] = guest.uuid return allocated_mdevs @staticmethod def _vgpu_allocations(allocations): """Filtering only the VGPU allocations from a list of allocations. :param allocations: Information about resources allocated to the instance via placement, of the form returned by SchedulerReportClient.get_allocations_for_consumer. """ if not allocations: # If no allocations, there is no vGPU request. return {} RC_VGPU = rc_fields.ResourceClass.VGPU vgpu_allocations = {} for rp in allocations: res = allocations[rp]['resources'] if RC_VGPU in res and res[RC_VGPU] > 0: vgpu_allocations[rp] = {'resources': {RC_VGPU: res[RC_VGPU]}} return vgpu_allocations def _get_existing_mdevs_not_assigned(self, requested_types=None): """Returns the already created mediated devices that are not assigned to a guest yet. :param requested_types: Filter out the result for only mediated devices having those types. """ allocated_mdevs = self._get_all_assigned_mediated_devices() mdevs = self._get_mediated_devices(requested_types) available_mdevs = set([mdev["uuid"] for mdev in mdevs]) - set(allocated_mdevs) return available_mdevs def _create_new_mediated_device(self, requested_types, uuid=None): """Find a physical device that can support a new mediated device and create it. :param requested_types: Filter only capable devices supporting those types. :param uuid: The possible mdev UUID we want to create again :returns: the newly created mdev UUID or None if not possible """ # Try to see if we can still create a new mediated device devices = self._get_mdev_capable_devices(requested_types) for device in devices: # For the moment, the libvirt driver only supports one # type per host # TODO(sbauza): Once we support more than one type, make # sure we look at the flavor/trait for the asked type. asked_type = requested_types[0] if device['types'][asked_type]['availableInstances'] > 0: # That physical GPU has enough room for a new mdev dev_name = device['dev_id'] # We need the PCI address, not the libvirt name # The libvirt name is like 'pci_0000_84_00_0' pci_addr = "{}:{}:{}.{}".format(*dev_name[4:].split('_')) chosen_mdev = nova.privsep.libvirt.create_mdev(pci_addr, asked_type, uuid=uuid) return chosen_mdev @utils.synchronized(VGPU_RESOURCE_SEMAPHORE) def _allocate_mdevs(self, allocations): """Returns a list of mediated device UUIDs corresponding to available resources we can assign to the guest(s) corresponding to the allocation requests passed as argument. That method can either find an existing but unassigned mediated device it can allocate, or create a new mediated device from a capable physical device if the latter has enough left capacity. :param allocations: Information about resources allocated to the instance via placement, of the form returned by SchedulerReportClient.get_allocations_for_consumer. That code is supporting Placement API version 1.12 """ vgpu_allocations = self._vgpu_allocations(allocations) if not vgpu_allocations: return # TODO(sbauza): Once we have nested resource providers, find which one # is having the related allocation for the specific VGPU type. # For the moment, we should only have one allocation for # ResourceProvider. # TODO(sbauza): Iterate over all the allocations once we have # nested Resource Providers. For the moment, just take the first. if len(vgpu_allocations) > 1: LOG.warning('More than one allocation was passed over to libvirt ' 'while at the moment libvirt only supports one. Only ' 'the first allocation will be looked up.') alloc = six.next(six.itervalues(vgpu_allocations)) vgpus_asked = alloc['resources'][rc_fields.ResourceClass.VGPU] requested_types = self._get_supported_vgpu_types() # Which mediated devices are created but not assigned to a guest ? mdevs_available = self._get_existing_mdevs_not_assigned( requested_types) chosen_mdevs = [] for c in six.moves.range(vgpus_asked): chosen_mdev = None if mdevs_available: # Take the first available mdev chosen_mdev = mdevs_available.pop() else: chosen_mdev = self._create_new_mediated_device(requested_types) if not chosen_mdev: # If we can't find devices having available VGPUs, just raise raise exception.ComputeResourcesUnavailable( reason='vGPU resource is not available') else: chosen_mdevs.append(chosen_mdev) return chosen_mdevs def _detach_mediated_devices(self, guest): mdevs = guest.get_all_devices( devtype=vconfig.LibvirtConfigGuestHostdevMDEV) for mdev_cfg in mdevs: try: guest.detach_device(mdev_cfg, live=True) except libvirt.libvirtError as ex: error_code = ex.get_error_code() # NOTE(sbauza): There is a pending issue with libvirt that # doesn't allow to hot-unplug mediated devices. Let's # short-circuit the suspend action and set the instance back # to ACTIVE. # TODO(sbauza): Once libvirt supports this, amend the resume() # operation to support reallocating mediated devices. if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED: reason = _("Suspend is not supported for instances having " "attached vGPUs.") raise exception.InstanceFaultRollback( exception.InstanceSuspendFailure(reason=reason)) else: raise def _has_numa_support(self): # This means that the host can support LibvirtConfigGuestNUMATune # and the nodeset field in LibvirtConfigGuestMemoryBackingPage for ver in BAD_LIBVIRT_NUMA_VERSIONS: if self._host.has_version(ver): if not getattr(self, '_bad_libvirt_numa_version_warn', False): LOG.warning('You are running with libvirt version %s ' 'which is known to have broken NUMA support. ' 'Consider patching or updating libvirt on ' 'this host if you need NUMA support.', libvirt_utils.version_to_string(ver)) self._bad_libvirt_numa_version_warn = True return False caps = self._host.get_capabilities() if (caps.host.cpu.arch in (fields.Architecture.I686, fields.Architecture.X86_64, fields.Architecture.AARCH64) and self._host.has_min_version(hv_type=host.HV_DRIVER_QEMU)): return True elif (caps.host.cpu.arch in (fields.Architecture.PPC64, fields.Architecture.PPC64LE) and self._host.has_min_version(MIN_LIBVIRT_NUMA_VERSION_PPC, hv_type=host.HV_DRIVER_QEMU)): return True return False def _get_host_numa_topology(self): if not self._has_numa_support(): return caps = self._host.get_capabilities() topology = caps.host.topology if topology is None or not topology.cells: return cells = [] allowed_cpus = hardware.get_vcpu_pin_set() online_cpus = self._host.get_online_cpus() if allowed_cpus: allowed_cpus &= online_cpus else: allowed_cpus = online_cpus def _get_reserved_memory_for_cell(self, cell_id, page_size): cell = self._reserved_hugepages.get(cell_id, {}) return cell.get(page_size, 0) for cell in topology.cells: cpuset = set(cpu.id for cpu in cell.cpus) siblings = sorted(map(set, set(tuple(cpu.siblings) if cpu.siblings else () for cpu in cell.cpus) )) cpuset &= allowed_cpus siblings = [sib & allowed_cpus for sib in siblings] # Filter out empty sibling sets that may be left siblings = [sib for sib in siblings if len(sib) > 0] mempages = [ objects.NUMAPagesTopology( size_kb=pages.size, total=pages.total, used=0, reserved=_get_reserved_memory_for_cell( self, cell.id, pages.size)) for pages in cell.mempages] cell = objects.NUMACell(id=cell.id, cpuset=cpuset, memory=cell.memory / units.Ki, cpu_usage=0, memory_usage=0, siblings=siblings, pinned_cpus=set([]), mempages=mempages) cells.append(cell) return objects.NUMATopology(cells=cells) def get_all_volume_usage(self, context, compute_host_bdms): """Return usage info for volumes attached to vms on a given host. """ vol_usage = [] for instance_bdms in compute_host_bdms: instance = instance_bdms['instance'] for bdm in instance_bdms['instance_bdms']: mountpoint = bdm['device_name'] if mountpoint.startswith('/dev/'): mountpoint = mountpoint[5:] volume_id = bdm['volume_id'] LOG.debug("Trying to get stats for the volume %s", volume_id, instance=instance) vol_stats = self.block_stats(instance, mountpoint) if vol_stats: stats = dict(volume=volume_id, instance=instance, rd_req=vol_stats[0], rd_bytes=vol_stats[1], wr_req=vol_stats[2], wr_bytes=vol_stats[3]) LOG.debug( "Got volume usage stats for the volume=%(volume)s," " rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, " "wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d", stats, instance=instance) vol_usage.append(stats) return vol_usage def block_stats(self, instance, disk_id): """Note that this function takes an instance name.""" try: guest = self._host.get_guest(instance) # TODO(sahid): We are converting all calls from a # virDomain object to use nova.virt.libvirt.Guest. # We should be able to remove domain at the end. domain = guest._domain return domain.blockStats(disk_id) except libvirt.libvirtError as e: errcode = e.get_error_code() LOG.info('Getting block stats failed, device might have ' 'been detached. Instance=%(instance_name)s ' 'Disk=%(disk)s Code=%(errcode)s Error=%(e)s', {'instance_name': instance.name, 'disk': disk_id, 'errcode': errcode, 'e': e}, instance=instance) except exception.InstanceNotFound: LOG.info('Could not find domain in libvirt for instance %s. ' 'Cannot get block stats for device', instance.name, instance=instance) def get_console_pool_info(self, console_type): # TODO(mdragon): console proxy should be implemented for libvirt, # in case someone wants to use it with kvm or # such. For now return fake data. return {'address': '127.0.0.1', 'username': 'fakeuser', 'password': 'fakepassword'} def refresh_security_group_rules(self, security_group_id): self.firewall_driver.refresh_security_group_rules(security_group_id) def refresh_instance_security_rules(self, instance): self.firewall_driver.refresh_instance_security_rules(instance) def get_inventory(self, nodename): """Return a dict, keyed by resource class, of inventory information for the supplied node. """ disk_gb = int(self._get_local_gb_info()['total']) memory_mb = int(self._host.get_memory_mb_total()) vcpus = self._get_vcpu_total() # NOTE(sbauza): For the moment, the libvirt driver only supports # providing the total number of virtual GPUs for a single GPU type. If # you have multiple physical GPUs, each of them providing multiple GPU # types, libvirt will return the total sum of virtual GPUs # corresponding to the single type passed in enabled_vgpu_types # configuration option. Eg. if you have 2 pGPUs supporting 'nvidia-35', # each of them having 16 available instances, the total here will be # 32. # If one of the 2 pGPUs doesn't support 'nvidia-35', it won't be used. # TODO(sbauza): Use ProviderTree and traits to make a better world. vgpus = self._get_vgpu_total() # NOTE(jaypipes): We leave some fields like allocation_ratio and # reserved out of the returned dicts here because, for now at least, # the RT injects those values into the inventory dict based on the # compute_nodes record values. result = { rc_fields.ResourceClass.VCPU: { 'total': vcpus, 'min_unit': 1, 'max_unit': vcpus, 'step_size': 1, }, rc_fields.ResourceClass.MEMORY_MB: { 'total': memory_mb, 'min_unit': 1, 'max_unit': memory_mb, 'step_size': 1, }, rc_fields.ResourceClass.DISK_GB: { 'total': disk_gb, 'min_unit': 1, 'max_unit': disk_gb, 'step_size': 1, }, } if vgpus > 0: # Only provide VGPU resource classes if the driver supports it. result[rc_fields.ResourceClass.VGPU] = { 'total': vgpus, 'min_unit': 1, 'max_unit': vgpus, 'step_size': 1, } return result def get_available_resource(self, nodename): """Retrieve resource information. This method is called when nova-compute launches, and as part of a periodic task that records the results in the DB. :param nodename: unused in this driver :returns: dictionary containing resource info """ disk_info_dict = self._get_local_gb_info() data = {} # NOTE(dprince): calling capabilities before getVersion works around # an initialization issue with some versions of Libvirt (1.0.5.5). # See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116 # See: https://bugs.launchpad.net/nova/+bug/1215593 data["supported_instances"] = self._get_instance_capabilities() data["vcpus"] = self._get_vcpu_total() data["memory_mb"] = self._host.get_memory_mb_total() data["local_gb"] = disk_info_dict['total'] data["vcpus_used"] = self._get_vcpu_used() data["memory_mb_used"] = self._host.get_memory_mb_used() data["local_gb_used"] = disk_info_dict['used'] data["hypervisor_type"] = self._host.get_driver_type() data["hypervisor_version"] = self._host.get_version() data["hypervisor_hostname"] = self._host.get_hostname() # TODO(berrange): why do we bother converting the # libvirt capabilities XML into a special JSON format ? # The data format is different across all the drivers # so we could just return the raw capabilities XML # which 'compare_cpu' could use directly # # That said, arch_filter.py now seems to rely on # the libvirt drivers format which suggests this # data format needs to be standardized across drivers data["cpu_info"] = jsonutils.dumps(self._get_cpu_info()) disk_free_gb = disk_info_dict['free'] disk_over_committed = self._get_disk_over_committed_size_total() available_least = disk_free_gb * units.Gi - disk_over_committed data['disk_available_least'] = available_least / units.Gi data['pci_passthrough_devices'] = \ self._get_pci_passthrough_devices() numa_topology = self._get_host_numa_topology() if numa_topology: data['numa_topology'] = numa_topology._to_json() else: data['numa_topology'] = None return data def check_instance_shared_storage_local(self, context, instance): """Check if instance files located on shared storage. This runs check on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: nova.objects.instance.Instance object :returns: - tempfile: A dict containing the tempfile info on the destination host - None: 1. If the instance path is not existing. 2. If the image backend is shared block storage type. """ if self.image_backend.backend().is_shared_block_storage(): return None dirpath = libvirt_utils.get_instance_path(instance) if not os.path.exists(dirpath): return None fd, tmp_file = tempfile.mkstemp(dir=dirpath) LOG.debug("Creating tmpfile %s to verify with other " "compute node that the instance is on " "the same shared storage.", tmp_file, instance=instance) os.close(fd) return {"filename": tmp_file} def check_instance_shared_storage_remote(self, context, data): return os.path.exists(data['filename']) def check_instance_shared_storage_cleanup(self, context, data): fileutils.delete_if_exists(data["filename"]) def check_can_live_migrate_destination(self, context, instance, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): """Check if it is possible to execute live migration. This runs checks on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit :returns: a LibvirtLiveMigrateData object """ if disk_over_commit: disk_available_gb = dst_compute_info['local_gb'] else: disk_available_gb = dst_compute_info['disk_available_least'] disk_available_mb = ( (disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb) # Compare CPU if not instance.vcpu_model or not instance.vcpu_model.model: source_cpu_info = src_compute_info['cpu_info'] self._compare_cpu(None, source_cpu_info, instance) else: self._compare_cpu(instance.vcpu_model, None, instance) # Create file on storage, to be checked on source host filename = self._create_shared_storage_test_file(instance) data = objects.LibvirtLiveMigrateData() data.filename = filename data.image_type = CONF.libvirt.images_type data.graphics_listen_addr_vnc = CONF.vnc.server_listen data.graphics_listen_addr_spice = CONF.spice.server_listen if CONF.serial_console.enabled: data.serial_listen_addr = CONF.serial_console.proxyclient_address else: data.serial_listen_addr = None # Notes(eliqiao): block_migration and disk_over_commit are not # nullable, so just don't set them if they are None if block_migration is not None: data.block_migration = block_migration if disk_over_commit is not None: data.disk_over_commit = disk_over_commit data.disk_available_mb = disk_available_mb return data def cleanup_live_migration_destination_check(self, context, dest_check_data): """Do required cleanup on dest host after check_can_live_migrate calls :param context: security context """ filename = dest_check_data.filename self._cleanup_shared_storage_test_file(filename) def check_can_live_migrate_source(self, context, instance, dest_check_data, block_device_info=None): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param dest_check_data: result of check_can_live_migrate_destination :param block_device_info: result of _get_instance_block_device_info :returns: a LibvirtLiveMigrateData object """ if not isinstance(dest_check_data, migrate_data_obj.LiveMigrateData): md_obj = objects.LibvirtLiveMigrateData() md_obj.from_legacy_dict(dest_check_data) dest_check_data = md_obj # Checking shared storage connectivity # if block migration, instances_path should not be on shared storage. source = CONF.host dest_check_data.is_shared_instance_path = ( self._check_shared_storage_test_file( dest_check_data.filename, instance)) dest_check_data.is_shared_block_storage = ( self._is_shared_block_storage(instance, dest_check_data, block_device_info)) if 'block_migration' not in dest_check_data: dest_check_data.block_migration = ( not dest_check_data.is_on_shared_storage()) if dest_check_data.block_migration: # TODO(eliqiao): Once block_migration flag is removed from the API # we can safely remove the if condition if dest_check_data.is_on_shared_storage(): reason = _("Block migration can not be used " "with shared storage.") raise exception.InvalidLocalStorage(reason=reason, path=source) if 'disk_over_commit' in dest_check_data: self._assert_dest_node_has_enough_disk(context, instance, dest_check_data.disk_available_mb, dest_check_data.disk_over_commit, block_device_info) if block_device_info: bdm = block_device_info.get('block_device_mapping') # NOTE(pkoniszewski): libvirt from version 1.2.17 upwards # supports selective block device migration. It means that it # is possible to define subset of block devices to be copied # during migration. If they are not specified - block devices # won't be migrated. However, it does not work when live # migration is tunnelled through libvirt. if bdm and not self._host.has_min_version( MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION): # NOTE(stpierre): if this instance has mapped volumes, # we can't do a block migration, since that will result # in volumes being copied from themselves to themselves, # which is a recipe for disaster. ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION]) msg = (_('Cannot block migrate instance %(uuid)s with' ' mapped volumes. Selective block device' ' migration feature requires libvirt version' ' %(libvirt_ver)s') % {'uuid': instance.uuid, 'libvirt_ver': ver}) LOG.error(msg, instance=instance) raise exception.MigrationPreCheckError(reason=msg) # NOTE(eliqiao): Selective disk migrations are not supported # with tunnelled block migrations so we can block them early. if (bdm and (self._block_migration_flags & libvirt.VIR_MIGRATE_TUNNELLED != 0)): msg = (_('Cannot block migrate instance %(uuid)s with' ' mapped volumes. Selective block device' ' migration is not supported with tunnelled' ' block migrations.') % {'uuid': instance.uuid}) LOG.error(msg, instance=instance) raise exception.MigrationPreCheckError(reason=msg) elif not (dest_check_data.is_shared_block_storage or dest_check_data.is_shared_instance_path): reason = _("Shared storage live-migration requires either shared " "storage or boot-from-volume with no local disks.") raise exception.InvalidSharedStorage(reason=reason, path=source) # NOTE(mikal): include the instance directory name here because it # doesn't yet exist on the destination but we want to force that # same name to be used instance_path = libvirt_utils.get_instance_path(instance, relative=True) dest_check_data.instance_relative_path = instance_path # NOTE(lyarwood): Used to indicate to the dest that the src is capable # of wiring up the encrypted disk configuration for the domain. # Note that this does not require the QEMU and Libvirt versions to # decrypt LUKS to be installed on the source node. Only the Nova # utility code to generate the correct XML is required, so we can # default to True here for all computes >= Queens. dest_check_data.src_supports_native_luks = True return dest_check_data def _is_shared_block_storage(self, instance, dest_check_data, block_device_info=None): """Check if all block storage of an instance can be shared between source and destination of a live migration. Returns true if the instance is volume backed and has no local disks, or if the image backend is the same on source and destination and the backend shares block storage between compute nodes. :param instance: nova.objects.instance.Instance object :param dest_check_data: dict with boolean fields image_type, is_shared_instance_path, and is_volume_backed """ if (dest_check_data.obj_attr_is_set('image_type') and CONF.libvirt.images_type == dest_check_data.image_type and self.image_backend.backend().is_shared_block_storage()): # NOTE(dgenin): currently true only for RBD image backend return True if (dest_check_data.is_shared_instance_path and self.image_backend.backend().is_file_in_instance_path()): # NOTE(angdraug): file based image backends (Flat, Qcow2) # place block device files under the instance path return True if (dest_check_data.is_volume_backed and not bool(self._get_instance_disk_info(instance, block_device_info))): return True return False def _assert_dest_node_has_enough_disk(self, context, instance, available_mb, disk_over_commit, block_device_info): """Checks if destination has enough disk for block migration.""" # Libvirt supports qcow2 disk format,which is usually compressed # on compute nodes. # Real disk image (compressed) may enlarged to "virtual disk size", # that is specified as the maximum disk size. # (See qemu-img -f path-to-disk) # Scheduler recognizes destination host still has enough disk space # if real disk size < available disk size # if disk_over_commit is True, # otherwise virtual disk size < available disk size. available = 0 if available_mb: available = available_mb * units.Mi disk_infos = self._get_instance_disk_info(instance, block_device_info) necessary = 0 if disk_over_commit: for info in disk_infos: necessary += int(info['disk_size']) else: for info in disk_infos: necessary += int(info['virt_disk_size']) # Check that available disk > necessary disk if (available - necessary) < 0: reason = (_('Unable to migrate %(instance_uuid)s: ' 'Disk of instance is too large(available' ' on destination host:%(available)s ' '< need:%(necessary)s)') % {'instance_uuid': instance.uuid, 'available': available, 'necessary': necessary}) raise exception.MigrationPreCheckError(reason=reason) def _compare_cpu(self, guest_cpu, host_cpu_str, instance): """Check the host is compatible with the requested CPU :param guest_cpu: nova.objects.VirtCPUModel or None :param host_cpu_str: JSON from _get_cpu_info() method If the 'guest_cpu' parameter is not None, this will be validated for migration compatibility with the host. Otherwise the 'host_cpu_str' JSON string will be used for validation. :returns: None. if given cpu info is not compatible to this server, raise exception. """ # NOTE(kchamart): Comparing host to guest CPU model for emulated # guests (<domain type='qemu'>) should not matter -- in this # mode (QEMU "TCG") the CPU is fully emulated in software and no # hardware acceleration, like KVM, is involved. So, skip the CPU # compatibility check for the QEMU domain type, and retain it for # KVM guests. if CONF.libvirt.virt_type not in ['kvm']: return if guest_cpu is None: info = jsonutils.loads(host_cpu_str) LOG.info('Instance launched has CPU info: %s', host_cpu_str) cpu = vconfig.LibvirtConfigCPU() cpu.arch = info['arch'] cpu.model = info['model'] cpu.vendor = info['vendor'] cpu.sockets = info['topology']['sockets'] cpu.cores = info['topology']['cores'] cpu.threads = info['topology']['threads'] for f in info['features']: cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f)) else: cpu = self._vcpu_model_to_cpu_config(guest_cpu) u = ("http://libvirt.org/html/libvirt-libvirt-host.html#" "virCPUCompareResult") m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s") # unknown character exists in xml, then libvirt complains try: cpu_xml = cpu.to_xml() LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance) ret = self._host.compare_cpu(cpu_xml) except libvirt.libvirtError as e: error_code = e.get_error_code() if error_code == libvirt.VIR_ERR_NO_SUPPORT: LOG.debug("URI %(uri)s does not support cpu comparison. " "It will be proceeded though. Error: %(error)s", {'uri': self._uri(), 'error': e}) return else: LOG.error(m, {'ret': e, 'u': u}) raise exception.MigrationPreCheckError( reason=m % {'ret': e, 'u': u}) if ret <= 0: LOG.error(m, {'ret': ret, 'u': u}) raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u}) def _create_shared_storage_test_file(self, instance): """Makes tmpfile under CONF.instances_path.""" dirpath = CONF.instances_path fd, tmp_file = tempfile.mkstemp(dir=dirpath) LOG.debug("Creating tmpfile %s to notify to other " "compute nodes that they should mount " "the same storage.", tmp_file, instance=instance) os.close(fd) return os.path.basename(tmp_file) def _check_shared_storage_test_file(self, filename, instance): """Confirms existence of the tmpfile under CONF.instances_path. Cannot confirm tmpfile return False. """ # NOTE(tpatzig): if instances_path is a shared volume that is # under heavy IO (many instances on many compute nodes), # then checking the existence of the testfile fails, # just because it takes longer until the client refreshes and new # content gets visible. # os.utime (like touch) on the directory forces the client to refresh. os.utime(CONF.instances_path, None) tmp_file = os.path.join(CONF.instances_path, filename) if not os.path.exists(tmp_file): exists = False else: exists = True LOG.debug('Check if temp file %s exists to indicate shared storage ' 'is being used for migration. Exists? %s', tmp_file, exists, instance=instance) return exists def _cleanup_shared_storage_test_file(self, filename): """Removes existence of the tmpfile under CONF.instances_path.""" tmp_file = os.path.join(CONF.instances_path, filename) os.remove(tmp_file) def ensure_filtering_rules_for_instance(self, instance, network_info): """Ensure that an instance's filtering rules are enabled. When migrating an instance, we need the filtering rules to be configured on the destination host before starting the migration. Also, when restarting the compute service, we need to ensure that filtering rules exist for all running services. """ self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) # nwfilters may be defined in a separate thread in the case # of libvirt non-blocking mode, so we wait for completion timeout_count = list(range(CONF.live_migration_retry_count)) while timeout_count: if self.firewall_driver.instance_filter_exists(instance, network_info): break timeout_count.pop() if len(timeout_count) == 0: msg = _('The firewall filter for %s does not exist') raise exception.InternalError(msg % instance.name) greenthread.sleep(1) def filter_defer_apply_on(self): self.firewall_driver.filter_defer_apply_on() def filter_defer_apply_off(self): self.firewall_driver.filter_defer_apply_off() def live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Spawning live_migration operation for distributing high-load. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param dest: destination host :param post_method: post operation method. expected nova.compute.manager._post_live_migration. :param recover_method: recovery method when any exception occurs. expected nova.compute.manager._rollback_live_migration. :param block_migration: if true, do block migration. :param migrate_data: a LibvirtLiveMigrateData object """ # 'dest' will be substituted into 'migration_uri' so ensure # it does't contain any characters that could be used to # exploit the URI accepted by libivrt if not libvirt_utils.is_valid_hostname(dest): raise exception.InvalidHostname(hostname=dest) self._live_migration(context, instance, dest, post_method, recover_method, block_migration, migrate_data) def live_migration_abort(self, instance): """Aborting a running live-migration. :param instance: instance object that is in migration """ guest = self._host.get_guest(instance) dom = guest._domain try: dom.abortJob() except libvirt.libvirtError as e: LOG.error("Failed to cancel migration %s", encodeutils.exception_to_unicode(e), instance=instance) raise def _verify_serial_console_is_disabled(self): if CONF.serial_console.enabled: msg = _('Your destination node does not support' ' retrieving listen addresses. In order' ' for live migration to work properly you' ' must disable serial console.') raise exception.MigrationError(reason=msg) def _live_migration_operation(self, context, instance, dest, block_migration, migrate_data, guest, device_names, bandwidth): """Invoke the live migration operation :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param dest: destination host :param block_migration: if true, do block migration. :param migrate_data: a LibvirtLiveMigrateData object :param guest: the guest domain object :param device_names: list of device names that are being migrated with instance :param bandwidth: MiB/s of bandwidth allowed for the migration at start This method is intended to be run in a background thread and will block that thread until the migration is finished or failed. """ try: if migrate_data.block_migration: migration_flags = self._block_migration_flags else: migration_flags = self._live_migration_flags serial_listen_addr = libvirt_migrate.serial_listen_addr( migrate_data) if not serial_listen_addr: # In this context we want to ensure that serial console is # disabled on source node. This is because nova couldn't # retrieve serial listen address from destination node, so we # consider that destination node might have serial console # disabled as well. self._verify_serial_console_is_disabled() # NOTE(aplanas) migrate_uri will have a value only in the # case that `live_migration_inbound_addr` parameter is # set, and we propose a non tunneled migration. migrate_uri = None if ('target_connect_addr' in migrate_data and migrate_data.target_connect_addr is not None): dest = migrate_data.target_connect_addr if (migration_flags & libvirt.VIR_MIGRATE_TUNNELLED == 0): migrate_uri = self._migrate_uri(dest) params = None new_xml_str = None if CONF.libvirt.virt_type != "parallels": new_xml_str = libvirt_migrate.get_updated_guest_xml( # TODO(sahid): It's not a really good idea to pass # the method _get_volume_config and we should to find # a way to avoid this in future. guest, migrate_data, self._get_volume_config) if self._host.has_min_version( MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION): params = { 'destination_xml': new_xml_str, 'migrate_disks': device_names, } # NOTE(pkoniszewski): Because of precheck which blocks # tunnelled block live migration with mapped volumes we # can safely remove migrate_disks when tunnelling is on. # Otherwise we will block all tunnelled block migrations, # even when an instance does not have volumes mapped. # This is because selective disk migration is not # supported in tunnelled block live migration. Also we # cannot fallback to migrateToURI2 in this case because of # bug #1398999 if (migration_flags & libvirt.VIR_MIGRATE_TUNNELLED != 0): params.pop('migrate_disks') # TODO(sahid): This should be in # post_live_migration_at_source but no way to retrieve # ports acquired on the host for the guest at this # step. Since the domain is going to be removed from # libvird on source host after migration, we backup the # serial ports to release them if all went well. serial_ports = [] if CONF.serial_console.enabled: serial_ports = list(self._get_serial_ports_from_guest(guest)) guest.migrate(self._live_migration_uri(dest), migrate_uri=migrate_uri, flags=migration_flags, params=params, domain_xml=new_xml_str, bandwidth=bandwidth) for hostname, port in serial_ports: serial_console.release_port(host=hostname, port=port) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Live Migration failure: %s", e, instance=instance) # If 'migrateToURI' fails we don't know what state the # VM instances on each host are in. Possibilities include # # 1. src==running, dst==none # # Migration failed & rolled back, or never started # # 2. src==running, dst==paused # # Migration started but is still ongoing # # 3. src==paused, dst==paused # # Migration data transfer completed, but switchover # is still ongoing, or failed # # 4. src==paused, dst==running # # Migration data transfer completed, switchover # happened but cleanup on source failed # # 5. src==none, dst==running # # Migration fully succeeded. # # Libvirt will aim to complete any migration operation # or roll it back. So even if the migrateToURI call has # returned an error, if the migration was not finished # libvirt should clean up. # # So we take the error raise here with a pinch of salt # and rely on the domain job info status to figure out # what really happened to the VM, which is a much more # reliable indicator. # # In particular we need to try very hard to ensure that # Nova does not "forget" about the guest. ie leaving it # running on a different host to the one recorded in # the database, as that would be a serious resource leak LOG.debug("Migration operation thread has finished", instance=instance) def _live_migration_copy_disk_paths(self, context, instance, guest): '''Get list of disks to copy during migration :param context: security context :param instance: the instance being migrated :param guest: the Guest instance being migrated Get the list of disks to copy during migration. :returns: a list of local source paths and a list of device names to copy ''' disk_paths = [] device_names = [] block_devices = [] # TODO(pkoniszewski): Remove version check when we bump min libvirt # version to >= 1.2.17. if (self._block_migration_flags & libvirt.VIR_MIGRATE_TUNNELLED == 0 and self._host.has_min_version( MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION)): bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = driver.get_block_device_info(instance, bdm_list) block_device_mappings = driver.block_device_info_get_mapping( block_device_info) for bdm in block_device_mappings: device_name = str(bdm['mount_device'].rsplit('/', 1)[1]) block_devices.append(device_name) for dev in guest.get_all_disks(): if dev.readonly or dev.shareable: continue if dev.source_type not in ["file", "block"]: continue if dev.target_dev in block_devices: continue disk_paths.append(dev.source_path) device_names.append(dev.target_dev) return (disk_paths, device_names) def _live_migration_data_gb(self, instance, disk_paths): '''Calculate total amount of data to be transferred :param instance: the nova.objects.Instance being migrated :param disk_paths: list of disk paths that are being migrated with instance Calculates the total amount of data that needs to be transferred during the live migration. The actual amount copied will be larger than this, due to the guest OS continuing to dirty RAM while the migration is taking place. So this value represents the minimal data size possible. :returns: data size to be copied in GB ''' ram_gb = instance.flavor.memory_mb * units.Mi / units.Gi if ram_gb < 2: ram_gb = 2 disk_gb = 0 for path in disk_paths: try: size = os.stat(path).st_size size_gb = (size / units.Gi) if size_gb < 2: size_gb = 2 disk_gb += size_gb except OSError as e: LOG.warning("Unable to stat %(disk)s: %(ex)s", {'disk': path, 'ex': e}) # Ignore error since we don't want to break # the migration monitoring thread operation return ram_gb + disk_gb def _get_migration_flags(self, is_block_migration): if is_block_migration: return self._block_migration_flags return self._live_migration_flags def _live_migration_monitor(self, context, instance, guest, dest, post_method, recover_method, block_migration, migrate_data, finish_event, disk_paths): on_migration_failure = deque() data_gb = self._live_migration_data_gb(instance, disk_paths) downtime_steps = list(libvirt_migrate.downtime_steps(data_gb)) migration = migrate_data.migration curdowntime = None migration_flags = self._get_migration_flags( migrate_data.block_migration) n = 0 start = time.time() progress_time = start progress_watermark = None previous_data_remaining = -1 is_post_copy_enabled = self._is_post_copy_enabled(migration_flags) while True: info = guest.get_job_info() if info.type == libvirt.VIR_DOMAIN_JOB_NONE: # Either still running, or failed or completed, # lets untangle the mess if not finish_event.ready(): LOG.debug("Operation thread is still running", instance=instance) else: info.type = libvirt_migrate.find_job_type(guest, instance) LOG.debug("Fixed incorrect job type to be %d", info.type, instance=instance) if info.type == libvirt.VIR_DOMAIN_JOB_NONE: # Migration is not yet started LOG.debug("Migration not running yet", instance=instance) elif info.type == libvirt.VIR_DOMAIN_JOB_UNBOUNDED: # Migration is still running # # This is where we wire up calls to change live # migration status. eg change max downtime, cancel # the operation, change max bandwidth libvirt_migrate.run_tasks(guest, instance, self.active_migrations, on_migration_failure, migration, is_post_copy_enabled) now = time.time() elapsed = now - start if ((progress_watermark is None) or (progress_watermark == 0) or (progress_watermark > info.data_remaining)): progress_watermark = info.data_remaining progress_time = now progress_timeout = CONF.libvirt.live_migration_progress_timeout completion_timeout = int( CONF.libvirt.live_migration_completion_timeout * data_gb) if libvirt_migrate.should_abort(instance, now, progress_time, progress_timeout, elapsed, completion_timeout, migration.status): try: guest.abort_job() except libvirt.libvirtError as e: LOG.warning("Failed to abort migration %s", encodeutils.exception_to_unicode(e), instance=instance) self._clear_empty_migration(instance) raise if (is_post_copy_enabled and libvirt_migrate.should_switch_to_postcopy( info.memory_iteration, info.data_remaining, previous_data_remaining, migration.status)): libvirt_migrate.trigger_postcopy_switch(guest, instance, migration) previous_data_remaining = info.data_remaining curdowntime = libvirt_migrate.update_downtime( guest, instance, curdowntime, downtime_steps, elapsed) # We loop every 500ms, so don't log on every # iteration to avoid spamming logs for long # running migrations. Just once every 5 secs # is sufficient for developers to debug problems. # We log once every 30 seconds at info to help # admins see slow running migration operations # when debug logs are off. if (n % 10) == 0: # Ignoring memory_processed, as due to repeated # dirtying of data, this can be way larger than # memory_total. Best to just look at what's # remaining to copy and ignore what's done already # # TODO(berrange) perhaps we could include disk # transfer stats in the progress too, but it # might make memory info more obscure as large # disk sizes might dwarf memory size remaining = 100 if info.memory_total != 0: remaining = round(info.memory_remaining * 100 / info.memory_total) libvirt_migrate.save_stats(instance, migration, info, remaining) lg = LOG.debug if (n % 60) == 0: lg = LOG.info lg("Migration running for %(secs)d secs, " "memory %(remaining)d%% remaining; " "(bytes processed=%(processed_memory)d, " "remaining=%(remaining_memory)d, " "total=%(total_memory)d)", {"secs": n / 2, "remaining": remaining, "processed_memory": info.memory_processed, "remaining_memory": info.memory_remaining, "total_memory": info.memory_total}, instance=instance) if info.data_remaining > progress_watermark: lg("Data remaining %(remaining)d bytes, " "low watermark %(watermark)d bytes " "%(last)d seconds ago", {"remaining": info.data_remaining, "watermark": progress_watermark, "last": (now - progress_time)}, instance=instance) n = n + 1 elif info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED: # Migration is all done LOG.info("Migration operation has completed", instance=instance) post_method(context, instance, dest, block_migration, migrate_data) break elif info.type == libvirt.VIR_DOMAIN_JOB_FAILED: # Migration did not succeed LOG.error("Migration operation has aborted", instance=instance) libvirt_migrate.run_recover_tasks(self._host, guest, instance, on_migration_failure) recover_method(context, instance, dest, migrate_data) break elif info.type == libvirt.VIR_DOMAIN_JOB_CANCELLED: # Migration was stopped by admin LOG.warning("Migration operation was cancelled", instance=instance) libvirt_migrate.run_recover_tasks(self._host, guest, instance, on_migration_failure) recover_method(context, instance, dest, migrate_data, migration_status='cancelled') break else: LOG.warning("Unexpected migration job type: %d", info.type, instance=instance) time.sleep(0.5) self._clear_empty_migration(instance) def _clear_empty_migration(self, instance): try: del self.active_migrations[instance.uuid] except KeyError: LOG.warning("There are no records in active migrations " "for instance", instance=instance) def _live_migration(self, context, instance, dest, post_method, recover_method, block_migration, migrate_data): """Do live migration. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param dest: destination host :param post_method: post operation method. expected nova.compute.manager._post_live_migration. :param recover_method: recovery method when any exception occurs. expected nova.compute.manager._rollback_live_migration. :param block_migration: if true, do block migration. :param migrate_data: a LibvirtLiveMigrateData object This fires off a new thread to run the blocking migration operation, and then this thread monitors the progress of migration and controls its operation """ guest = self._host.get_guest(instance) disk_paths = [] device_names = [] if (migrate_data.block_migration and CONF.libvirt.virt_type != "parallels"): disk_paths, device_names = self._live_migration_copy_disk_paths( context, instance, guest) deadline = CONF.vif_plugging_timeout if utils.is_neutron() and deadline: # We don't generate events if CONF.vif_plugging_timeout=0 # meaning that the operator disabled using them. # In case of Linux Bridge, the agent is waiting for new # TAP devices on destination node. They are going to be # created by libvirt at the very beginning of the # live-migration process. Then receiving the events from # Neutron will ensure that everything is configured # correctly. events = self._get_neutron_events_for_live_migration( instance.get_network_info()) else: # TODO(sahid): This 'is_neutron()' condition should be # removed when nova-network will be erased from the tree # (Rocky). events = [] if events: # We start migration with the minimum bandwidth # speed. Depending on the VIF type (see: # _get_neutron_events_for_live_migration) we will wait for # Neutron to send events that confirm network is setup or # directly configure QEMU to use the maximun BW allowed. bandwidth = MIN_MIGRATION_SPEED_BW else: bandwidth = CONF.libvirt.live_migration_bandwidth try: error_cb = self._neutron_failed_live_migration_callback with self.virtapi.wait_for_instance_event(instance, events, deadline=deadline, error_callback=error_cb): opthread = utils.spawn(self._live_migration_operation, context, instance, dest, block_migration, migrate_data, guest, device_names, bandwidth) except eventlet.timeout.Timeout: msg = ('Timeout waiting for VIF plugging events, ' 'canceling migration') raise exception.MigrationError(reason=msg) else: if utils.is_neutron() and events: LOG.debug('VIF events received, continuing migration ' 'with max bandwidth configured: %d', CONF.libvirt.live_migration_bandwidth, instance=instance) # Configure QEMU to use the maximum bandwidth allowed. guest.migrate_configure_max_speed( CONF.libvirt.live_migration_bandwidth) finish_event = eventlet.event.Event() self.active_migrations[instance.uuid] = deque() def thread_finished(thread, event): LOG.debug("Migration operation thread notification", instance=instance) event.send() opthread.link(thread_finished, finish_event) # Let eventlet schedule the new thread right away time.sleep(0) try: LOG.debug("Starting monitoring of live migration", instance=instance) self._live_migration_monitor(context, instance, guest, dest, post_method, recover_method, block_migration, migrate_data, finish_event, disk_paths) except Exception as ex: LOG.warning("Error monitoring migration: %(ex)s", {"ex": ex}, instance=instance, exc_info=True) raise finally: LOG.debug("Live migration monitoring is all done", instance=instance) def _is_post_copy_enabled(self, migration_flags): if self._is_post_copy_available(): if (migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0: return True return False def live_migration_force_complete(self, instance): try: self.active_migrations[instance.uuid].append('force-complete') except KeyError: raise exception.NoActiveMigrationForInstance( instance_id=instance.uuid) def _try_fetch_image(self, context, path, image_id, instance, fallback_from_host=None): try: libvirt_utils.fetch_image(context, path, image_id) except exception.ImageNotFound: if not fallback_from_host: raise LOG.debug("Image %(image_id)s doesn't exist anymore on " "image service, attempting to copy image " "from %(host)s", {'image_id': image_id, 'host': fallback_from_host}) libvirt_utils.copy_image(src=path, dest=path, host=fallback_from_host, receive=True) def _fetch_instance_kernel_ramdisk(self, context, instance, fallback_from_host=None): """Download kernel and ramdisk for instance in instance directory.""" instance_dir = libvirt_utils.get_instance_path(instance) if instance.kernel_id: kernel_path = os.path.join(instance_dir, 'kernel') # NOTE(dsanders): only fetch image if it's not available at # kernel_path. This also avoids ImageNotFound exception if # the image has been deleted from glance if not os.path.exists(kernel_path): self._try_fetch_image(context, kernel_path, instance.kernel_id, instance, fallback_from_host) if instance.ramdisk_id: ramdisk_path = os.path.join(instance_dir, 'ramdisk') # NOTE(dsanders): only fetch image if it's not available at # ramdisk_path. This also avoids ImageNotFound exception if # the image has been deleted from glance if not os.path.exists(ramdisk_path): self._try_fetch_image(context, ramdisk_path, instance.ramdisk_id, instance, fallback_from_host) def rollback_live_migration_at_destination(self, context, instance, network_info, block_device_info, destroy_disks=True, migrate_data=None): """Clean up destination node after a failed live migration.""" try: self.destroy(context, instance, network_info, block_device_info, destroy_disks) finally: # NOTE(gcb): Failed block live migration may leave instance # directory at destination node, ensure it is always deleted. is_shared_instance_path = True if migrate_data: is_shared_instance_path = migrate_data.is_shared_instance_path if (migrate_data.obj_attr_is_set("serial_listen_ports") and migrate_data.serial_listen_ports): # Releases serial ports reserved. for port in migrate_data.serial_listen_ports: serial_console.release_port( host=migrate_data.serial_listen_addr, port=port) if not is_shared_instance_path: instance_dir = libvirt_utils.get_instance_path_at_destination( instance, migrate_data) if os.path.exists(instance_dir): shutil.rmtree(instance_dir) def pre_live_migration(self, context, instance, block_device_info, network_info, disk_info, migrate_data): """Preparation live migration.""" if disk_info is not None: disk_info = jsonutils.loads(disk_info) LOG.debug('migrate_data in pre_live_migration: %s', migrate_data, instance=instance) is_shared_block_storage = migrate_data.is_shared_block_storage is_shared_instance_path = migrate_data.is_shared_instance_path is_block_migration = migrate_data.block_migration if not is_shared_instance_path: instance_dir = libvirt_utils.get_instance_path_at_destination( instance, migrate_data) if os.path.exists(instance_dir): raise exception.DestinationDiskExists(path=instance_dir) LOG.debug('Creating instance directory: %s', instance_dir, instance=instance) os.mkdir(instance_dir) # Recreate the disk.info file and in doing so stop the # imagebackend from recreating it incorrectly by inspecting the # contents of each file when using the Raw backend. if disk_info: image_disk_info = {} for info in disk_info: image_file = os.path.basename(info['path']) image_path = os.path.join(instance_dir, image_file) image_disk_info[image_path] = info['type'] LOG.debug('Creating disk.info with the contents: %s', image_disk_info, instance=instance) image_disk_info_path = os.path.join(instance_dir, 'disk.info') libvirt_utils.write_to_file(image_disk_info_path, jsonutils.dumps(image_disk_info)) if not is_shared_block_storage: # Ensure images and backing files are present. LOG.debug('Checking to make sure images and backing files are ' 'present before live migration.', instance=instance) self._create_images_and_backing( context, instance, instance_dir, disk_info, fallback_from_host=instance.host) if (configdrive.required_by(instance) and CONF.config_drive_format == 'iso9660'): # NOTE(pkoniszewski): Due to a bug in libvirt iso config # drive needs to be copied to destination prior to # migration when instance path is not shared and block # storage is not shared. Files that are already present # on destination are excluded from a list of files that # need to be copied to destination. If we don't do that # live migration will fail on copying iso config drive to # destination and writing to read-only device. # Please see bug/1246201 for more details. src = "%s:%s/disk.config" % (instance.host, instance_dir) self._remotefs.copy_file(src, instance_dir) if not is_block_migration: # NOTE(angdraug): when block storage is shared between source # and destination and instance path isn't (e.g. volume backed # or rbd backed instance), instance path on destination has to # be prepared # Required by Quobyte CI self._ensure_console_log_for_instance(instance) # if image has kernel and ramdisk, just download # following normal way. self._fetch_instance_kernel_ramdisk(context, instance) # Establishing connection to volume server. block_device_mapping = driver.block_device_info_get_mapping( block_device_info) if len(block_device_mapping): LOG.debug('Connecting volumes before live migration.', instance=instance) for bdm in block_device_mapping: connection_info = bdm['connection_info'] # NOTE(lyarwood): Handle the P to Q LM during upgrade use case # where an instance has encrypted volumes attached using the # os-brick encryptors. Do not attempt to attach the encrypted # volume using native LUKS decryption on the destionation. src_native_luks = False if migrate_data.obj_attr_is_set('src_supports_native_luks'): src_native_luks = migrate_data.src_supports_native_luks dest_native_luks = self._is_native_luks_available() allow_native_luks = src_native_luks and dest_native_luks self._connect_volume(context, connection_info, instance, allow_native_luks=allow_native_luks) # We call plug_vifs before the compute manager calls # ensure_filtering_rules_for_instance, to ensure bridge is set up # Retry operation is necessary because continuously request comes, # concurrent request occurs to iptables, then it complains. LOG.debug('Plugging VIFs before live migration.', instance=instance) max_retry = CONF.live_migration_retry_count for cnt in range(max_retry): try: self.plug_vifs(instance, network_info) break except processutils.ProcessExecutionError: if cnt == max_retry - 1: raise else: LOG.warning('plug_vifs() failed %(cnt)d. Retry up to ' '%(max_retry)d.', {'cnt': cnt, 'max_retry': max_retry}, instance=instance) greenthread.sleep(1) # Store server_listen and latest disk device info if not migrate_data: migrate_data = objects.LibvirtLiveMigrateData(bdms=[]) else: migrate_data.bdms = [] # Store live_migration_inbound_addr migrate_data.target_connect_addr = \ CONF.libvirt.live_migration_inbound_addr migrate_data.supported_perf_events = self._supported_perf_events migrate_data.serial_listen_ports = [] if CONF.serial_console.enabled: num_ports = hardware.get_number_of_serial_ports( instance.flavor, instance.image_meta) for port in six.moves.range(num_ports): migrate_data.serial_listen_ports.append( serial_console.acquire_port( migrate_data.serial_listen_addr)) for vol in block_device_mapping: connection_info = vol['connection_info'] if connection_info.get('serial'): disk_info = blockinfo.get_info_from_bdm( instance, CONF.libvirt.virt_type, instance.image_meta, vol) bdmi = objects.LibvirtLiveMigrateBDMInfo() bdmi.serial = connection_info['serial'] bdmi.connection_info = connection_info bdmi.bus = disk_info['bus'] bdmi.dev = disk_info['dev'] bdmi.type = disk_info['type'] bdmi.format = disk_info.get('format') bdmi.boot_index = disk_info.get('boot_index') volume_secret = self._host.find_secret('volume', vol.volume_id) if volume_secret: bdmi.encryption_secret_uuid = volume_secret.UUIDString() migrate_data.bdms.append(bdmi) return migrate_data def _try_fetch_image_cache(self, image, fetch_func, context, filename, image_id, instance, size, fallback_from_host=None): try: image.cache(fetch_func=fetch_func, context=context, filename=filename, image_id=image_id, size=size) except exception.ImageNotFound: if not fallback_from_host: raise LOG.debug("Image %(image_id)s doesn't exist anymore " "on image service, attempting to copy " "image from %(host)s", {'image_id': image_id, 'host': fallback_from_host}, instance=instance) def copy_from_host(target): libvirt_utils.copy_image(src=target, dest=target, host=fallback_from_host, receive=True) image.cache(fetch_func=copy_from_host, filename=filename) def _create_images_and_backing(self, context, instance, instance_dir, disk_info, fallback_from_host=None): """:param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param instance_dir: instance path to use, calculated externally to handle block migrating an instance with an old style instance path :param disk_info: disk info specified in _get_instance_disk_info_from_config (list of dicts) :param fallback_from_host: host where we can retrieve images if the glance images are not available. """ # Virtuozzo containers don't use backing file if (CONF.libvirt.virt_type == "parallels" and instance.vm_mode == fields.VMMode.EXE): return if not disk_info: disk_info = [] for info in disk_info: base = os.path.basename(info['path']) # Get image type and create empty disk image, and # create backing file in case of qcow2. instance_disk = os.path.join(instance_dir, base) if not info['backing_file'] and not os.path.exists(instance_disk): libvirt_utils.create_image(info['type'], instance_disk, info['virt_disk_size']) elif info['backing_file']: # Creating backing file follows same way as spawning instances. cache_name = os.path.basename(info['backing_file']) disk = self.image_backend.by_name(instance, instance_disk, CONF.libvirt.images_type) if cache_name.startswith('ephemeral'): # The argument 'size' is used by image.cache to # validate disk size retrieved from cache against # the instance disk size (should always return OK) # and ephemeral_size is used by _create_ephemeral # to build the image if the disk is not already # cached. disk.cache( fetch_func=self._create_ephemeral, fs_label=cache_name, os_type=instance.os_type, filename=cache_name, size=info['virt_disk_size'], ephemeral_size=info['virt_disk_size'] / units.Gi) elif cache_name.startswith('swap'): inst_type = instance.get_flavor() swap_mb = inst_type.swap disk.cache(fetch_func=self._create_swap, filename="swap_%s" % swap_mb, size=swap_mb * units.Mi, swap_mb=swap_mb) else: self._try_fetch_image_cache(disk, libvirt_utils.fetch_image, context, cache_name, instance.image_ref, instance, info['virt_disk_size'], fallback_from_host) # if disk has kernel and ramdisk, just download # following normal way. self._fetch_instance_kernel_ramdisk( context, instance, fallback_from_host=fallback_from_host) def post_live_migration(self, context, instance, block_device_info, migrate_data=None): # Disconnect from volume server block_device_mapping = driver.block_device_info_get_mapping( block_device_info) volume_api = self._volume_api for vol in block_device_mapping: volume_id = vol['connection_info']['serial'] if vol['attachment_id'] is None: # Cinder v2 api flow: Retrieve connection info from Cinder's # initialize_connection API. The info returned will be # accurate for the source server. connector = self.get_volume_connector(instance) connection_info = volume_api.initialize_connection( context, volume_id, connector) else: # cinder v3.44 api flow: Retrieve the connection_info for # the old attachment from cinder. old_attachment_id = \ migrate_data.old_vol_attachment_ids[volume_id] old_attachment = volume_api.attachment_get( context, old_attachment_id) connection_info = old_attachment['connection_info'] # TODO(leeantho) The following multipath_id logic is temporary # and will be removed in the future once os-brick is updated # to handle multipath for drivers in a more efficient way. # For now this logic is needed to ensure the connection info # data is correct. # Pull out multipath_id from the bdm information. The # multipath_id can be placed into the connection info # because it is based off of the volume and will be the # same on the source and destination hosts. if 'multipath_id' in vol['connection_info']['data']: multipath_id = vol['connection_info']['data']['multipath_id'] connection_info['data']['multipath_id'] = multipath_id self._disconnect_volume(context, connection_info, instance) def post_live_migration_at_source(self, context, instance, network_info): """Unplug VIFs from networks at source. :param context: security context :param instance: instance object reference :param network_info: instance network information """ self.unplug_vifs(instance, network_info) def post_live_migration_at_destination(self, context, instance, network_info, block_migration=False, block_device_info=None): """Post operation of live migration at destination host. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param network_info: instance network information :param block_migration: if true, post operation of block_migration. """ # The source node set the VIR_MIGRATE_PERSIST_DEST flag when live # migrating so the guest xml should already be persisted on the # destination host, so just perform a sanity check to make sure it # made it as expected. self._host.get_guest(instance) def _get_instance_disk_info_from_config(self, guest_config, block_device_info): """Get the non-volume disk information from the domain xml :param LibvirtConfigGuest guest_config: the libvirt domain config for the instance :param dict block_device_info: block device info for BDMs :returns disk_info: list of dicts with keys: * 'type': the disk type (str) * 'path': the disk path (str) * 'virt_disk_size': the virtual disk size (int) * 'backing_file': backing file of a disk image (str) * 'disk_size': physical disk size (int) * 'over_committed_disk_size': virt_disk_size - disk_size or 0 """ block_device_mapping = driver.block_device_info_get_mapping( block_device_info) volume_devices = set() for vol in block_device_mapping: disk_dev = vol['mount_device'].rpartition("/")[2] volume_devices.add(disk_dev) disk_info = [] if (guest_config.virt_type == 'parallels' and guest_config.os_type == fields.VMMode.EXE): node_type = 'filesystem' else: node_type = 'disk' for device in guest_config.devices: if device.root_name != node_type: continue disk_type = device.source_type if device.root_name == 'filesystem': target = device.target_dir if device.source_type == 'file': path = device.source_file elif device.source_type == 'block': path = device.source_dev else: path = None else: target = device.target_dev path = device.source_path if not path: LOG.debug('skipping disk for %s as it does not have a path', guest_config.name) continue if disk_type not in ['file', 'block']: LOG.debug('skipping disk because it looks like a volume', path) continue if target in volume_devices: LOG.debug('skipping disk %(path)s (%(target)s) as it is a ' 'volume', {'path': path, 'target': target}) continue if device.root_name == 'filesystem': driver_type = device.driver_type else: driver_type = device.driver_format # get the real disk size or # raise a localized error if image is unavailable if disk_type == 'file': if driver_type == 'ploop': dk_size = 0 for dirpath, dirnames, filenames in os.walk(path): for f in filenames: fp = os.path.join(dirpath, f) dk_size += os.path.getsize(fp) else: dk_size = disk_api.get_allocated_disk_size(path) elif disk_type == 'block' and block_device_info: dk_size = lvm.get_volume_size(path) else: LOG.debug('skipping disk %(path)s (%(target)s) - unable to ' 'determine if volume', {'path': path, 'target': target}) continue if driver_type in ("qcow2", "ploop"): backing_file = libvirt_utils.get_disk_backing_file(path) virt_size = disk_api.get_disk_size(path) over_commit_size = int(virt_size) - dk_size else: backing_file = "" virt_size = dk_size over_commit_size = 0 disk_info.append({'type': driver_type, 'path': path, 'virt_disk_size': virt_size, 'backing_file': backing_file, 'disk_size': dk_size, 'over_committed_disk_size': over_commit_size}) return disk_info def _get_instance_disk_info(self, instance, block_device_info): try: guest = self._host.get_guest(instance) config = guest.get_config() except libvirt.libvirtError as ex: error_code = ex.get_error_code() LOG.warning('Error from libvirt while getting description of ' '%(instance_name)s: [Error Code %(error_code)s] ' '%(ex)s', {'instance_name': instance.name, 'error_code': error_code, 'ex': encodeutils.exception_to_unicode(ex)}, instance=instance) raise exception.InstanceNotFound(instance_id=instance.uuid) return self._get_instance_disk_info_from_config(config, block_device_info) def get_instance_disk_info(self, instance, block_device_info=None): return jsonutils.dumps( self._get_instance_disk_info(instance, block_device_info)) def _get_disk_over_committed_size_total(self): """Return total over committed disk size for all instances.""" # Disk size that all instance uses : virtual_size - disk_size disk_over_committed_size = 0 instance_domains = self._host.list_instance_domains(only_running=False) if not instance_domains: return disk_over_committed_size # Get all instance uuids instance_uuids = [dom.UUIDString() for dom in instance_domains] ctx = nova_context.get_admin_context() # Get instance object list by uuid filter filters = {'uuid': instance_uuids} # NOTE(ankit): objects.InstanceList.get_by_filters method is # getting called twice one is here and another in the # _update_available_resource method of resource_tracker. Since # _update_available_resource method is synchronized, there is a # possibility the instances list retrieved here to calculate # disk_over_committed_size would differ to the list you would get # in _update_available_resource method for calculating usages based # on instance utilization. local_instance_list = objects.InstanceList.get_by_filters( ctx, filters, use_slave=True) # Convert instance list to dictionary with instance uuid as key. local_instances = {inst.uuid: inst for inst in local_instance_list} # Get bdms by instance uuids bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid( ctx, instance_uuids) for dom in instance_domains: try: guest = libvirt_guest.Guest(dom) config = guest.get_config() block_device_info = None if guest.uuid in local_instances \ and (bdms and guest.uuid in bdms): # Get block device info for instance block_device_info = driver.get_block_device_info( local_instances[guest.uuid], bdms[guest.uuid]) disk_infos = self._get_instance_disk_info_from_config( config, block_device_info) if not disk_infos: continue for info in disk_infos: disk_over_committed_size += int( info['over_committed_disk_size']) except libvirt.libvirtError as ex: error_code = ex.get_error_code() LOG.warning( 'Error from libvirt while getting description of ' '%(instance_name)s: [Error Code %(error_code)s] %(ex)s', {'instance_name': guest.name, 'error_code': error_code, 'ex': encodeutils.exception_to_unicode(ex)}) except OSError as e: if e.errno in (errno.ENOENT, errno.ESTALE): LOG.warning('Periodic task is updating the host stat, ' 'it is trying to get disk %(i_name)s, ' 'but disk file was removed by concurrent ' 'operations such as resize.', {'i_name': guest.name}) elif e.errno == errno.EACCES: LOG.warning('Periodic task is updating the host stat, ' 'it is trying to get disk %(i_name)s, ' 'but access is denied. It is most likely ' 'due to a VM that exists on the compute ' 'node but is not managed by Nova.', {'i_name': guest.name}) else: raise except exception.VolumeBDMPathNotFound as e: LOG.warning('Periodic task is updating the host stats, ' 'it is trying to get disk info for %(i_name)s, ' 'but the backing volume block device was removed ' 'by concurrent operations such as resize. ' 'Error: %(error)s', {'i_name': guest.name, 'error': e}) except exception.DiskNotFound: with excutils.save_and_reraise_exception() as err_ctxt: # If the instance is undergoing a task state transition, # like moving to another host or is being deleted, we # should ignore this instance and move on. if guest.uuid in local_instances: inst = local_instances[guest.uuid] if inst.task_state is not None: LOG.info('Periodic task is updating the host ' 'stats; it is trying to get disk info ' 'for %(i_name)s, but the backing disk ' 'was removed by a concurrent operation ' '(task_state=%(task_state)s)', {'i_name': guest.name, 'task_state': inst.task_state}, instance=inst) err_ctxt.reraise = False # NOTE(gtt116): give other tasks a chance. greenthread.sleep(0) return disk_over_committed_size def unfilter_instance(self, instance, network_info): """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance, network_info=network_info) def get_available_nodes(self, refresh=False): return [self._host.get_hostname()] def get_host_cpu_stats(self): """Return the current CPU state of the host.""" return self._host.get_cpu_stats() def get_host_uptime(self): """Returns the result of calling "uptime".""" out, err = utils.execute('env', 'LANG=C', 'uptime') return out def manage_image_cache(self, context, all_instances): """Manage the local cache of images.""" self.image_cache_manager.update(context, all_instances) def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize, shared_storage=False): """Used only for cleanup in case migrate_disk_and_power_off fails.""" try: if os.path.exists(inst_base_resize): shutil.rmtree(inst_base, ignore_errors=True) utils.execute('mv', inst_base_resize, inst_base) if not shared_storage: self._remotefs.remove_dir(dest, inst_base) except Exception: pass def _is_storage_shared_with(self, dest, inst_base): # NOTE (rmk): There are two methods of determining whether we are # on the same filesystem: the source and dest IP are the # same, or we create a file on the dest system via SSH # and check whether the source system can also see it. # NOTE (drwahl): Actually, there is a 3rd way: if images_type is rbd, # it will always be shared storage if CONF.libvirt.images_type == 'rbd': return True shared_storage = (dest == self.get_host_ip_addr()) if not shared_storage: tmp_file = uuidutils.generate_uuid(dashed=False) + '.tmp' tmp_path = os.path.join(inst_base, tmp_file) try: self._remotefs.create_file(dest, tmp_path) if os.path.exists(tmp_path): shared_storage = True os.unlink(tmp_path) else: self._remotefs.remove_file(dest, tmp_path) except Exception: pass return shared_storage def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None, timeout=0, retry_interval=0): LOG.debug("Starting migrate_disk_and_power_off", instance=instance) ephemerals = driver.block_device_info_get_ephemerals(block_device_info) # get_bdm_ephemeral_disk_size() will return 0 if the new # instance's requested block device mapping contain no # ephemeral devices. However, we still want to check if # the original instance's ephemeral_gb property was set and # ensure that the new requested flavor ephemeral size is greater eph_size = (block_device.get_bdm_ephemeral_disk_size(ephemerals) or instance.flavor.ephemeral_gb) # Checks if the migration needs a disk resize down. root_down = flavor.root_gb < instance.flavor.root_gb ephemeral_down = flavor.ephemeral_gb < eph_size booted_from_volume = self._is_booted_from_volume(block_device_info) if (root_down and not booted_from_volume) or ephemeral_down: reason = _("Unable to resize disk down.") raise exception.InstanceFaultRollback( exception.ResizeError(reason=reason)) # NOTE(dgenin): Migration is not implemented for LVM backed instances. if CONF.libvirt.images_type == 'lvm' and not booted_from_volume: reason = _("Migration is not supported for LVM backed instances") raise exception.InstanceFaultRollback( exception.MigrationPreCheckError(reason=reason)) # copy disks to destination # rename instance dir to +_resize at first for using # shared storage for instance dir (eg. NFS). inst_base = libvirt_utils.get_instance_path(instance) inst_base_resize = inst_base + "_resize" shared_storage = self._is_storage_shared_with(dest, inst_base) # try to create the directory on the remote compute node # if this fails we pass the exception up the stack so we can catch # failures here earlier if not shared_storage: try: self._remotefs.create_dir(dest, inst_base) except processutils.ProcessExecutionError as e: reason = _("not able to execute ssh command: %s") % e raise exception.InstanceFaultRollback( exception.ResizeError(reason=reason)) self.power_off(instance, timeout, retry_interval) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] self._disconnect_volume(context, connection_info, instance) disk_info = self._get_instance_disk_info(instance, block_device_info) try: utils.execute('mv', inst_base, inst_base_resize) # if we are migrating the instance with shared storage then # create the directory. If it is a remote node the directory # has already been created if shared_storage: dest = None fileutils.ensure_tree(inst_base) on_execute = lambda process: \ self.job_tracker.add_job(instance, process.pid) on_completion = lambda process: \ self.job_tracker.remove_job(instance, process.pid) for info in disk_info: # assume inst_base == dirname(info['path']) img_path = info['path'] fname = os.path.basename(img_path) from_path = os.path.join(inst_base_resize, fname) # We will not copy over the swap disk here, and rely on # finish_migration to re-create it for us. This is ok because # the OS is shut down, and as recreating a swap disk is very # cheap it is more efficient than copying either locally or # over the network. This also means we don't have to resize it. if fname == 'disk.swap': continue compression = info['type'] not in NO_COMPRESSION_TYPES libvirt_utils.copy_image(from_path, img_path, host=dest, on_execute=on_execute, on_completion=on_completion, compression=compression) # Ensure disk.info is written to the new path to avoid disks being # reinspected and potentially changing format. src_disk_info_path = os.path.join(inst_base_resize, 'disk.info') if os.path.exists(src_disk_info_path): dst_disk_info_path = os.path.join(inst_base, 'disk.info') libvirt_utils.copy_image(src_disk_info_path, dst_disk_info_path, host=dest, on_execute=on_execute, on_completion=on_completion) except Exception: with excutils.save_and_reraise_exception(): self._cleanup_remote_migration(dest, inst_base, inst_base_resize, shared_storage) return jsonutils.dumps(disk_info) def _wait_for_running(self, instance): state = self.get_info(instance).state if state == power_state.RUNNING: LOG.info("Instance running successfully.", instance=instance) raise loopingcall.LoopingCallDone() @staticmethod def _disk_raw_to_qcow2(path): """Converts a raw disk to qcow2.""" path_qcow = path + '_qcow' utils.execute('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', path, path_qcow) utils.execute('mv', path_qcow, path) @staticmethod def _disk_qcow2_to_raw(path): """Converts a qcow2 disk to raw.""" path_raw = path + '_raw' utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', path, path_raw) utils.execute('mv', path_raw, path) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): LOG.debug("Starting finish_migration", instance=instance) block_disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, block_device_info) # assume _create_image does nothing if a target file exists. # NOTE: This has the intended side-effect of fetching a missing # backing file. self._create_image(context, instance, block_disk_info['mapping'], block_device_info=block_device_info, ignore_bdi_for_swap=True, fallback_from_host=migration.source_compute) # Required by Quobyte CI self._ensure_console_log_for_instance(instance) gen_confdrive = functools.partial( self._create_configdrive, context, instance, InjectionInfo(admin_pass=None, network_info=network_info, files=None)) # Convert raw disks to qcow2 if migrating to host which uses # qcow2 from host which uses raw. disk_info = jsonutils.loads(disk_info) for info in disk_info: path = info['path'] disk_name = os.path.basename(path) # NOTE(mdbooth): The code below looks wrong, but is actually # required to prevent a security hole when migrating from a host # with use_cow_images=False to one with use_cow_images=True. # Imagebackend uses use_cow_images to select between the # atrociously-named-Raw and Qcow2 backends. The Qcow2 backend # writes to disk.info, but does not read it as it assumes qcow2. # Therefore if we don't convert raw to qcow2 here, a raw disk will # be incorrectly assumed to be qcow2, which is a severe security # flaw. The reverse is not true, because the atrociously-named-Raw # backend supports both qcow2 and raw disks, and will choose # appropriately between them as long as disk.info exists and is # correctly populated, which it is because Qcow2 writes to # disk.info. # # In general, we do not yet support format conversion during # migration. For example: # * Converting from use_cow_images=True to use_cow_images=False # isn't handled. This isn't a security bug, but is almost # certainly buggy in other cases, as the 'Raw' backend doesn't # expect a backing file. # * Converting to/from lvm and rbd backends is not supported. # # This behaviour is inconsistent, and therefore undesirable for # users. It is tightly-coupled to implementation quirks of 2 # out of 5 backends in imagebackend and defends against a severe # security flaw which is not at all obvious without deep analysis, # and is therefore undesirable to developers. We should aim to # remove it. This will not be possible, though, until we can # represent the storage layout of a specific instance # independent of the default configuration of the local compute # host. # Config disks are hard-coded to be raw even when # use_cow_images=True (see _get_disk_config_image_type),so don't # need to be converted. if (disk_name != 'disk.config' and info['type'] == 'raw' and CONF.use_cow_images): self._disk_raw_to_qcow2(info['path']) xml = self._get_guest_xml(context, instance, network_info, block_disk_info, image_meta, block_device_info=block_device_info) # NOTE(mriedem): vifs_already_plugged=True here, regardless of whether # or not we've migrated to another host, because we unplug VIFs locally # and the status change in the port might go undetected by the neutron # L2 agent (or neutron server) so neutron may not know that the VIF was # unplugged in the first place and never send an event. guest = self._create_domain_and_network(context, xml, instance, network_info, block_device_info=block_device_info, power_on=power_on, vifs_already_plugged=True, post_xml_callback=gen_confdrive) if power_on: timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_running, instance) timer.start(interval=0.5).wait() # Sync guest time after migration. guest.sync_guest_time() LOG.debug("finish_migration finished successfully.", instance=instance) def _cleanup_failed_migration(self, inst_base): """Make sure that a failed migrate doesn't prevent us from rolling back in a revert. """ try: shutil.rmtree(inst_base) except OSError as e: if e.errno != errno.ENOENT: raise def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): LOG.debug("Starting finish_revert_migration", instance=instance) inst_base = libvirt_utils.get_instance_path(instance) inst_base_resize = inst_base + "_resize" # NOTE(danms): if we're recovering from a failed migration, # make sure we don't have a left-over same-host base directory # that would conflict. Also, don't fail on the rename if the # failure happened early. if os.path.exists(inst_base_resize): self._cleanup_failed_migration(inst_base) utils.execute('mv', inst_base_resize, inst_base) root_disk = self.image_backend.by_name(instance, 'disk') # Once we rollback, the snapshot is no longer needed, so remove it # TODO(nic): Remove the try/except/finally in a future release # To avoid any upgrade issues surrounding instances being in pending # resize state when the software is updated, this portion of the # method logs exceptions rather than failing on them. Once it can be # reasonably assumed that no such instances exist in the wild # anymore, the try/except/finally should be removed, # and ignore_errors should be set back to False (the default) so # that problems throw errors, like they should. if root_disk.exists(): try: root_disk.rollback_to_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME) except exception.SnapshotNotFound: LOG.warning("Failed to rollback snapshot (%s)", libvirt_utils.RESIZE_SNAPSHOT_NAME) finally: root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, instance.image_meta, block_device_info) xml = self._get_guest_xml(context, instance, network_info, disk_info, instance.image_meta, block_device_info=block_device_info) self._create_domain_and_network(context, xml, instance, network_info, block_device_info=block_device_info, power_on=power_on, vifs_already_plugged=True) if power_on: timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_running, instance) timer.start(interval=0.5).wait() LOG.debug("finish_revert_migration finished successfully.", instance=instance) def confirm_migration(self, context, migration, instance, network_info): """Confirms a resize, destroying the source VM.""" self._cleanup_resize(context, instance, network_info) @staticmethod def _get_io_devices(xml_doc): """get the list of io devices from the xml document.""" result = {"volumes": [], "ifaces": []} try: doc = etree.fromstring(xml_doc) except Exception: return result blocks = [('./devices/disk', 'volumes'), ('./devices/interface', 'ifaces')] for block, key in blocks: section = doc.findall(block) for node in section: for child in node.getchildren(): if child.tag == 'target' and child.get('dev'): result[key].append(child.get('dev')) return result def get_diagnostics(self, instance): guest = self._host.get_guest(instance) # TODO(sahid): We are converting all calls from a # virDomain object to use nova.virt.libvirt.Guest. # We should be able to remove domain at the end. domain = guest._domain output = {} # get cpu time, might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt try: for vcpu in guest.get_vcpus_info(): output["cpu" + str(vcpu.id) + "_time"] = vcpu.time except libvirt.libvirtError: pass # get io status xml = guest.get_xml_desc() dom_io = LibvirtDriver._get_io_devices(xml) for guest_disk in dom_io["volumes"]: try: # blockStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt stats = domain.blockStats(guest_disk) output[guest_disk + "_read_req"] = stats[0] output[guest_disk + "_read"] = stats[1] output[guest_disk + "_write_req"] = stats[2] output[guest_disk + "_write"] = stats[3] output[guest_disk + "_errors"] = stats[4] except libvirt.libvirtError: pass for interface in dom_io["ifaces"]: try: # interfaceStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt stats = domain.interfaceStats(interface) output[interface + "_rx"] = stats[0] output[interface + "_rx_packets"] = stats[1] output[interface + "_rx_errors"] = stats[2] output[interface + "_rx_drop"] = stats[3] output[interface + "_tx"] = stats[4] output[interface + "_tx_packets"] = stats[5] output[interface + "_tx_errors"] = stats[6] output[interface + "_tx_drop"] = stats[7] except libvirt.libvirtError: pass output["memory"] = domain.maxMemory() # memoryStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt try: mem = domain.memoryStats() for key in mem.keys(): output["memory-" + key] = mem[key] except (libvirt.libvirtError, AttributeError): pass return output def get_instance_diagnostics(self, instance): guest = self._host.get_guest(instance) # TODO(sahid): We are converting all calls from a # virDomain object to use nova.virt.libvirt.Guest. # We should be able to remove domain at the end. domain = guest._domain xml = guest.get_xml_desc() xml_doc = etree.fromstring(xml) # TODO(sahid): Needs to use get_info but more changes have to # be done since a mapping STATE_MAP LIBVIRT_POWER_STATE is # needed. (state, max_mem, mem, num_cpu, cpu_time) = \ guest._get_domain_info(self._host) config_drive = configdrive.required_by(instance) launched_at = timeutils.normalize_time(instance.launched_at) uptime = timeutils.delta_seconds(launched_at, timeutils.utcnow()) diags = diagnostics_obj.Diagnostics(state=power_state.STATE_MAP[state], driver='libvirt', config_drive=config_drive, hypervisor=CONF.libvirt.virt_type, hypervisor_os='linux', uptime=uptime) diags.memory_details = diagnostics_obj.MemoryDiagnostics( maximum=max_mem / units.Mi, used=mem / units.Mi) # get cpu time, might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt try: for vcpu in guest.get_vcpus_info(): diags.add_cpu(id=vcpu.id, time=vcpu.time) except libvirt.libvirtError: pass # get io status dom_io = LibvirtDriver._get_io_devices(xml) for guest_disk in dom_io["volumes"]: try: # blockStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt stats = domain.blockStats(guest_disk) diags.add_disk(read_bytes=stats[1], read_requests=stats[0], write_bytes=stats[3], write_requests=stats[2], errors_count=stats[4]) except libvirt.libvirtError: pass for interface in dom_io["ifaces"]: try: # interfaceStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt stats = domain.interfaceStats(interface) diags.add_nic(rx_octets=stats[0], rx_errors=stats[2], rx_drop=stats[3], rx_packets=stats[1], tx_octets=stats[4], tx_errors=stats[6], tx_drop=stats[7], tx_packets=stats[5]) except libvirt.libvirtError: pass # Update mac addresses of interface if stats have been reported if diags.nic_details: nodes = xml_doc.findall('./devices/interface/mac') for index, node in enumerate(nodes): diags.nic_details[index].mac_address = node.get('address') return diags @staticmethod def _prepare_device_bus(dev): """Determines the device bus and its hypervisor assigned address """ bus = None address = (dev.device_addr.format_address() if dev.device_addr else None) if isinstance(dev.device_addr, vconfig.LibvirtConfigGuestDeviceAddressPCI): bus = objects.PCIDeviceBus() elif isinstance(dev, vconfig.LibvirtConfigGuestDisk): if dev.target_bus == 'scsi': bus = objects.SCSIDeviceBus() elif dev.target_bus == 'ide': bus = objects.IDEDeviceBus() elif dev.target_bus == 'usb': bus = objects.USBDeviceBus() if address is not None and bus is not None: bus.address = address return bus def _build_device_metadata(self, context, instance): """Builds a metadata object for instance devices, that maps the user provided tag to the hypervisor assigned device address. """ def _get_device_name(bdm): return block_device.strip_dev(bdm.device_name) network_info = instance.info_cache.network_info vlans_by_mac = netutils.get_cached_vifs_with_vlan(network_info) vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context, instance.uuid) vifs_to_expose = {vif.address: vif for vif in vifs if ('tag' in vif and vif.tag) or vlans_by_mac.get(vif.address)} # TODO(mriedem): We should be able to avoid the DB query here by using # block_device_info['block_device_mapping'] which is passed into most # methods that call this function. bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) tagged_bdms = {_get_device_name(bdm): bdm for bdm in bdms if bdm.tag} devices = [] guest = self._host.get_guest(instance) xml = guest.get_xml_desc() xml_dom = etree.fromstring(xml) guest_config = vconfig.LibvirtConfigGuest() guest_config.parse_dom(xml_dom) for dev in guest_config.devices: # Build network interfaces related metadata if isinstance(dev, vconfig.LibvirtConfigGuestInterface): vif = vifs_to_expose.get(dev.mac_addr) if not vif: continue bus = self._prepare_device_bus(dev) device = objects.NetworkInterfaceMetadata(mac=vif.address) if 'tag' in vif and vif.tag: device.tags = [vif.tag] if bus: device.bus = bus vlan = vlans_by_mac.get(vif.address) if vlan: device.vlan = int(vlan) devices.append(device) # Build disks related metadata if isinstance(dev, vconfig.LibvirtConfigGuestDisk): bdm = tagged_bdms.get(dev.target_dev) if not bdm: continue bus = self._prepare_device_bus(dev) device = objects.DiskMetadata(tags=[bdm.tag]) # NOTE(artom) Setting the serial (which corresponds to # volume_id in BlockDeviceMapping) in DiskMetadata allows us to # find the disks's BlockDeviceMapping object when we detach the # volume and want to clean up its metadata. device.serial = bdm.volume_id if bus: device.bus = bus devices.append(device) if devices: dev_meta = objects.InstanceDeviceMetadata(devices=devices) return dev_meta def instance_on_disk(self, instance): # ensure directories exist and are writable instance_path = libvirt_utils.get_instance_path(instance) LOG.debug('Checking instance files accessibility %s', instance_path, instance=instance) shared_instance_path = os.access(instance_path, os.W_OK) # NOTE(flwang): For shared block storage scenario, the file system is # not really shared by the two hosts, but the volume of evacuated # instance is reachable. shared_block_storage = (self.image_backend.backend(). is_shared_block_storage()) return shared_instance_path or shared_block_storage def inject_network_info(self, instance, nw_info): self.firewall_driver.setup_basic_filtering(instance, nw_info) def delete_instance_files(self, instance): target = libvirt_utils.get_instance_path(instance) # A resize may be in progress target_resize = target + '_resize' # Other threads may attempt to rename the path, so renaming the path # to target + '_del' (because it is atomic) and iterating through # twice in the unlikely event that a concurrent rename occurs between # the two rename attempts in this method. In general this method # should be fairly thread-safe without these additional checks, since # other operations involving renames are not permitted when the task # state is not None and the task state should be set to something # other than None by the time this method is invoked. target_del = target + '_del' for i in range(2): try: utils.execute('mv', target, target_del) break except Exception: pass try: utils.execute('mv', target_resize, target_del) break except Exception: pass # Either the target or target_resize path may still exist if all # rename attempts failed. remaining_path = None for p in (target, target_resize): if os.path.exists(p): remaining_path = p break # A previous delete attempt may have been interrupted, so target_del # may exist even if all rename attempts during the present method # invocation failed due to the absence of both target and # target_resize. if not remaining_path and os.path.exists(target_del): self.job_tracker.terminate_jobs(instance) LOG.info('Deleting instance files %s', target_del, instance=instance) remaining_path = target_del try: shutil.rmtree(target_del) except OSError as e: LOG.error('Failed to cleanup directory %(target)s: %(e)s', {'target': target_del, 'e': e}, instance=instance) # It is possible that the delete failed, if so don't mark the instance # as cleaned. if remaining_path and os.path.exists(remaining_path): LOG.info('Deletion of %s failed', remaining_path, instance=instance) return False LOG.info('Deletion of %s complete', target_del, instance=instance) return True @property def need_legacy_block_device_info(self): return False def default_root_device_name(self, instance, image_meta, root_bdm): disk_bus = blockinfo.get_disk_bus_for_device_type( instance, CONF.libvirt.virt_type, image_meta, "disk") cdrom_bus = blockinfo.get_disk_bus_for_device_type( instance, CONF.libvirt.virt_type, image_meta, "cdrom") root_info = blockinfo.get_root_info( instance, CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus, cdrom_bus) return block_device.prepend_dev(root_info['dev']) def default_device_names_for_instance(self, instance, root_device_name, *block_device_lists): block_device_mapping = list(itertools.chain(*block_device_lists)) # NOTE(ndipanov): Null out the device names so that blockinfo code # will assign them for bdm in block_device_mapping: if bdm.device_name is not None: LOG.warning( "Ignoring supplied device name: %(device_name)s. " "Libvirt can't honour user-supplied dev names", {'device_name': bdm.device_name}, instance=instance) bdm.device_name = None block_device_info = driver.get_block_device_info(instance, block_device_mapping) blockinfo.default_device_names(CONF.libvirt.virt_type, nova_context.get_admin_context(), instance, block_device_info, instance.image_meta) def get_device_name_for_instance(self, instance, bdms, block_device_obj): block_device_info = driver.get_block_device_info(instance, bdms) instance_info = blockinfo.get_disk_info( CONF.libvirt.virt_type, instance, instance.image_meta, block_device_info=block_device_info) suggested_dev_name = block_device_obj.device_name if suggested_dev_name is not None: LOG.warning( 'Ignoring supplied device name: %(suggested_dev)s', {'suggested_dev': suggested_dev_name}, instance=instance) # NOTE(ndipanov): get_info_from_bdm will generate the new device name # only when it's actually not set on the bd object block_device_obj.device_name = None disk_info = blockinfo.get_info_from_bdm( instance, CONF.libvirt.virt_type, instance.image_meta, block_device_obj, mapping=instance_info['mapping']) return block_device.prepend_dev(disk_info['dev']) def is_supported_fs_format(self, fs_type): return fs_type in [nova.privsep.fs.FS_FORMAT_EXT2, nova.privsep.fs.FS_FORMAT_EXT3, nova.privsep.fs.FS_FORMAT_EXT4, nova.privsep.fs.FS_FORMAT_XFS]
py
1a439d0d31548c10a31d2e90f4e57946cb6e7d57
#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for doing coverage analysis on the RPC interface. Provides a way to track which RPC commands are exercised during testing. """ import os REFERENCE_FILENAME = 'rpc_interface.txt' class AuthServiceProxyWrapper(): """ An object that wraps AuthServiceProxy to record specific RPC calls. """ def __init__(self, auth_service_proxy_instance, coverage_logfile=None): """ Kwargs: auth_service_proxy_instance (AuthServiceProxy): the instance being wrapped. coverage_logfile (str): if specified, write each service_name out to a file when called. """ self.auth_service_proxy_instance = auth_service_proxy_instance self.coverage_logfile = coverage_logfile def __getattr__(self, name): return_val = getattr(self.auth_service_proxy_instance, name) if not isinstance(return_val, type(self.auth_service_proxy_instance)): # If proxy getattr returned an unwrapped value, do the same here. return return_val return AuthServiceProxyWrapper(return_val, self.coverage_logfile) def __call__(self, *args, **kwargs): """ Delegates to AuthServiceProxy, then writes the particular RPC method called to a file. """ return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs) self._log_call() return return_val def _log_call(self): rpc_method = self.auth_service_proxy_instance._service_name if self.coverage_logfile: with open(self.coverage_logfile, 'a+', encoding='utf8') as f: f.write("%s\n" % rpc_method) def __truediv__(self, relative_uri): return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri, self.coverage_logfile) def get_request(self, *args, **kwargs): self._log_call() return self.auth_service_proxy_instance.get_request(*args) def get_filename(dirname, n_node): """ Get a filename unique to the test process ID and node. This file will contain a list of RPC commands covered. """ pid = str(os.getpid()) return os.path.join( dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node))) def write_all_rpc_commands(dirname, node): """ Write out a list of all RPC functions available in `bitwin24-cli` for coverage comparison. This will only happen once per coverage directory. Args: dirname (str): temporary test dir node (AuthServiceProxy): client Returns: bool. if the RPC interface file was written. """ filename = os.path.join(dirname, REFERENCE_FILENAME) if os.path.isfile(filename): return False help_output = node.help().split('\n') commands = set() for line in help_output: line = line.strip() # Ignore blanks and headers if line and not line.startswith('='): commands.add("%s\n" % line.split()[0]) with open(filename, 'w', encoding='utf8') as f: f.writelines(list(commands)) return True
py
1a439d5dfb5bee4dfa305e9f218b7515f8cf23d5
from django import forms from fobi.base import FormFieldPlugin, form_element_plugin_registry from .forms import LocationZipHomeForm class LocationZipHomePlugin(FormFieldPlugin): """LocationZipHomePlugin.""" uid = "location_zip_home" name = "What is the zip code of your home?" form = LocationZipHomeForm group = "Intercept" # Group to which the plugin belongs to def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs): field_kwargs = { 'required': self.data.required, 'label': self.data.label, 'widget': forms.widgets.NumberInput(attrs={}), } return [(self.data.name, forms.IntegerField, field_kwargs)] form_element_plugin_registry.register(LocationZipHomePlugin)
py
1a439df30ba191769485609e99f3b514764a0810
import numpy import numpy as nx import numpy.linalg as linalg from numpy import inf, nan import math def apply_distortion(x,k): if len(k) < 5: newk = [0]*5 for i in range(len(k)): newk[i] = k[i] k = newk m,n = x.shape # Add distortion r2 = x[0,:]**2 + x[1,:]**2 r4 = r2**2 r6 = r2**3 # Radial distortion: cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6 #print 'cdist',cdist xd1 = x * nx.dot(nx.ones((2,1)),cdist[nx.newaxis,:]) #print 'xd1',xd1 coeff = nx.dot(nx.reshape( nx.transpose(nx.array([ cdist, cdist])),(2*n,1)), nx.ones((1,3))) #print 'coeff',coeff # tangential distortion: a1 = 2*x[0,:]*x[1,:] a2 = r2 + 2*x[0,:]**2 a3 = r2 + 2*x[1,:]**2 delta_x = nx.array([ k[2]*a1 + k[3]*a2, k[2]*a3 + k[3]*a1]) if 0: aa = nx.dot((2*k[2]*x[1,:] + 6*k[3]*x[0,:])[:,nx.newaxis], nx.ones((1,3))) bb = nx.dot((2*k[2]*x[0,:] + 2*k[3]*x[1,:])[:,nx.newaxis], nx.ones((1,3))) cc = nx.dot((6*k[2]*x[1,:] + 2*k[3]*x[0,:])[:,nx.newaxis], nx.ones((1,3))) #print 'aa',aa #print 'bb',bb #print 'cc',cc #print 'xd1.shape',xd1.shape #print 'delta_x.shape',delta_x.shape xd = xd1 + delta_x #print 'xd/1e3',xd/1e3 return xd class CachedUndistorter: def __init__(self): self.vals = {} def compute_indexes_for_val(self,val): R,f,c,k,alpha,KK_new,nr,nc = val if R is None: R = nx.array([[1,0,0],[0,1,0],[0,0,1]]) else: R = nx.asarray(R) if f is None: f = (1,1) else: f = nx.asarray(f) if c is None: c = (0,0) else: c = nx.asarray(c) if k is None: k = (0,0,0,0,0) else: k = nx.asarray(k) if alpha is None: alpha = 0 if KK_new is None: if alpha != 0: raise ValueError('I guess KK_new is wrong in this case, but I am not sure ' '- the 2nd column, 1st row should be f[0]*alpha') KK_new = nx.array([[ f[0], 0, c[0]], [ 0, f[1], c[1]], [ 0, 0, 1]]) else: KK_new = nx.asarray(KK_new) mx, my = nx.meshgrid( nx.arange(nc), nx.arange(nr) ) px = nx.reshape( mx, (nc*nr,) ) py = nx.reshape( my, (nc*nr,) ) ## A = linalg.inverse(KK_new) ## b = nx.array( [px, ## py, ## nx.ones(px.shape)]) rays = nx.dot(linalg.inv(KK_new),nx.array( [px, py, nx.ones(px.shape)])) #print 'rays',rays # Rotation: (or affine transformation): rays2 = nx.dot(nx.transpose(R),rays) #print 'rays2',rays2 x = nx.array( [ rays2[0,:]/rays2[2,:], rays2[1,:]/rays2[2,] ] ) #print 'x',x # Add distortion xd = apply_distortion(x, k) #print 'xd',xd # Reconvert in pixels: px2 = f[0]*(xd[0,:]+alpha*xd[1,:])+c[0] py2 = f[1]*xd[1,:]+c[1] #print 'px2',px2 #print 'py2',py2 # Interpolate between the closest pixels: px_0 = nx.floor(px2) py_0 = nx.floor(py2) if 0: py_1 = py_0 + 1; tmpA= nx.where((px_0>=0) & (px_0 <= (nc-2)) & (py_0 >= 0) & (py_0 <= (nr-2))) if type(tmpA)==tuple: # numarray behavior good_points = tmpA[0] else: # numpy behavior good_points = tmpA px2 = px2[good_points] py2 = py2[good_points] px_0 = px_0[good_points] py_0 = py_0[good_points] alpha_x = px2 - px_0 #print 'alpha_x',alpha_x alpha_y = py2 - py_0 a1 = (1 - alpha_y)*(1 - alpha_x) a2 = (1 - alpha_y)*alpha_x a3 = alpha_y * (1 - alpha_x) a4 = alpha_y * alpha_x #print 'a2',a2 ind_lu = (px_0 * nr + py_0).astype(nx.int_) ind_ru = ((px_0 + 1) * nr + py_0).astype(nx.int_) ind_ld = (px_0 * nr + (py_0 + 1)).astype(nx.int_) ind_rd = ((px_0 + 1) * nr + (py_0 + 1)).astype(nx.int_) ind_new = ((px[good_points])*nr + py[good_points]).astype(nx.int_) indexes = ind_new, ind_lu, ind_ru, ind_ld, ind_rd, a1, a2, a3, a4 return indexes def rect(self,I,R=None,f=None,c=None,k=None,alpha=None,KK_new=None): """ arguments: I is image optional arguments: R is 3x3 rotation (of affine transformation) matrix, defaults to eye(3) f is focal length (horizontal and vertical), defaults to (1,1) c is image center (horizontal and vertical), defaults to (0,0) k is nonlinear parameters, defaults to (0,0,0,0,0) """ I = nx.asarray(I) nr, nc = I.shape # must be 2D (grayscale) image # get cached value val = R,f,c,k,alpha,KK_new,nr,nc if val in self.vals: # in cache? indexes = self.vals[val] # get from cache else: indexes = self.compute_indexes_for_val(val) self.vals[val] = indexes # cache for next time ind_new, ind_lu, ind_ru, ind_ld, ind_rd, a1, a2, a3, a4 = indexes # put image into matlab uni-dimensional index format Ir = nx.ravel(nx.transpose(I)) Irec = 255.0*nx.ones((nr*nc,)) Irec[ind_new] = a1 * Ir[ind_lu] + a2 * Ir[ind_ru] + a3 * Ir[ind_ld] + a4 * Ir[ind_rd] # convert to uint8 format Irec = Irec.astype(nx.uint8) # convert matlab unidimensional format into numpy format Irec = nx.reshape(Irec,(nc,nr)) Irec = nx.transpose(Irec) return Irec _cache = CachedUndistorter() rect = _cache.rect def reference_rect(I,R=None,f=None,c=None,k=None,alpha=None,KK_new=None): # original version, translated from MATLAB. superseded by above """ arguments: I is image optional arguments: R is 3x3 rotation (of affine transformation) matrix, defaults to eye(3) f is focal length (horizontal and vertical), defaults to (1,1) c is image center (horizontal and vertical), defaults to (0,0) k is nonlinear parameters, defaults to (0,0,0,0,0) """ I = nx.asarray(I) if R is None: R = nx.array([[1,0,0],[0,1,0],[0,0,1]]) else: R = nx.asarray(R) if f is None: f = (1,1) else: f = nx.asarray(f) if c is None: c = (0,0) else: c = nx.asarray(c) if k is None: k = (0,0,0,0,0) else: k = nx.asarray(k) if KK_new is None: KK_new = nx.array([[ f[0], 0, c[0]], [ 0, f[1], c[1]], [ 0, 0, 1]]) else: KK_new = nx.asarray(KK_new) if alpha is None: alpha = 0 # Note: R is the motion of the points in space # So: X2 = R*X where X: coord in the old reference frame, X2: coord in the new ref frame. nr, nc = I.shape # must be 2D (grayscale) image # put I in matlab uni-dimensional index format I = I.copy() I = nx.transpose(I).copy() I.ravel() Irec = 255.0*nx.ones((nr*nc,)) mx, my = nx.meshgrid( nx.arange(nc), nx.arange(nr) ) px = nx.reshape( mx, (nc*nr,) ) py = nx.reshape( my, (nc*nr,) ) ## A = linalg.inverse(KK_new) ## b = nx.array( [px, ## py, ## nx.ones(px.shape)]) rays = nx.dot(linalg.inverse(KK_new),nx.array( [px, py, nx.ones(px.shape)])) #print 'rays',rays # Rotation: (or affine transformation): rays2 = nx.dot(nx.transpose(R),rays) #print 'rays2',rays2 x = nx.array( [ rays2[0,:]/rays2[2,:], rays2[1,:]/rays2[2,] ] ) #print 'x',x # Add distortion xd = apply_distortion(x, k) #print 'xd',xd # Reconvert in pixels: px2 = f[0]*(xd[0,:]+alpha*xd[1,:])+c[0] py2 = f[1]*xd[1,:]+c[1] #print 'px2',px2 #print 'py2',py2 # Interpolate between the closest pixels: px_0 = nx.floor(px2) py_0 = nx.floor(py2) if 0: py_1 = py_0 + 1; tmpA= nx.where((px_0>=0) & (px_0 <= (nc-2)) & (py_0 >= 0) & (py_0 <= (nr-2))) if type(tmpA)==tuple: # numarray behavior good_points = tmpA[0] else: # numpy behavior good_points = tmpA px2 = px2[good_points] py2 = py2[good_points] px_0 = px_0[good_points] py_0 = py_0[good_points] alpha_x = px2 - px_0 #print 'alpha_x',alpha_x alpha_y = py2 - py_0 a1 = (1 - alpha_y)*(1 - alpha_x) a2 = (1 - alpha_y)*alpha_x a3 = alpha_y * (1 - alpha_x) a4 = alpha_y * alpha_x #print 'a2',a2 ind_lu = (px_0 * nr + py_0).astype(nx.Int) ind_ru = ((px_0 + 1) * nr + py_0).astype(nx.Int) ind_ld = (px_0 * nr + (py_0 + 1)).astype(nx.Int) ind_rd = ((px_0 + 1) * nr + (py_0 + 1)).astype(nx.Int) ind_new = ((px[good_points])*nr + py[good_points]).astype(nx.Int) Ir = nx.ravel(I) Irec[ind_new] = a1 * Ir[ind_lu] + a2 * Ir[ind_ru] + a3 * Ir[ind_ld] + a4 * Ir[ind_rd] # convert matlab unidimensional format into numarray format Irec = nx.reshape(Irec,(nc,nr)) Irec = nx.transpose(Irec) return Irec def undistort( reconstructor, distorted_image, cam_id ): intrin = reconstructor.get_intrinsic_linear(cam_id) k = reconstructor.get_intrinsic_nonlinear(cam_id) f = intrin[0,0], intrin[1,1] # focal length c = intrin[0,2], intrin[1,2] # camera center im = rect(distorted_image, f=f, c=c, k=k) # perform the undistortion im = im.astype(nx.uint8) return im
py
1a439e52b8b9eca60d26ff69006d64bb6480e5b6
""" Test message and address parsing/formatting functions. """ from email.header import Header from email.headerregistry import Address from email.message import EmailMessage, Message import pytest from hypothesis import example, given from hypothesis.strategies import emails from aiosmtplib.email import ( extract_recipients, extract_sender, flatten_message, parse_address, quote_address, ) @pytest.mark.parametrize( "address, expected_address", ( ('"A.Smith" <[email protected]>', "[email protected]"), ("Pepé Le Pew <pé[email protected]>", "pé[email protected]"), ("<[email protected]>", "[email protected]"), ("B. Smith <[email protected]", "[email protected]"), ), ids=("quotes", "nonascii", "newtld", "missing_end_<"), ) def test_parse_address_with_display_names(address, expected_address): parsed_address = parse_address(address) assert parsed_address == expected_address @given(emails()) @example("email@[123.123.123.123]") @example("[email protected]") def test_parse_address(email): assert parse_address(email) == email @pytest.mark.parametrize( "address, expected_address", ( ('"A.Smith" <[email protected]>', "<[email protected]>"), ("Pepé Le Pew <pé[email protected]>", "<pé[email protected]>"), ("<[email protected]>", "<[email protected]>"), ("email@[123.123.123.123]", "<email@[123.123.123.123]>"), ("[email protected]", "<[email protected]>"), ("B. Smith <[email protected]", "<[email protected]>"), ), ids=("quotes", "nonascii", "newtld", "ipaddr", "underscores", "missing_end_quote"), ) def test_quote_address_with_display_names(address, expected_address): quoted_address = quote_address(address) assert quoted_address == expected_address @given(emails()) @example("email@[123.123.123.123]") @example("[email protected]") def test_quote_address(email): assert quote_address(email) == "<{}>".format(email) def test_flatten_message(): message = EmailMessage() message["To"] = "[email protected]" message["Subject"] = "Hello, World." message["From"] = "[email protected]" message.set_content("This is a test") flat_message = flatten_message(message) expected_message = b"""To: [email protected]\r Subject: Hello, World.\r From: [email protected]\r Content-Type: text/plain; charset="utf-8"\r Content-Transfer-Encoding: 7bit\r MIME-Version: 1.0\r \r This is a test\r """ assert flat_message == expected_message @pytest.mark.parametrize( "utf8, cte_type, expected_chunk", ( (False, "7bit", b"=?utf-8?q?=C3=A5lice?="), (True, "7bit", b"From: \xc3\[email protected]"), (False, "8bit", b"=?utf-8?q?=C3=A5lice?="), (True, "8bit", b"\xc3\[email protected]"), ), ids=("ascii-7bit", "utf8-7bit", "ascii-8bit", "utf8-8bit"), ) def test_flatten_message_utf8_options(utf8, cte_type, expected_chunk): message = EmailMessage() message["From"] = "å[email protected]" flat_message = flatten_message(message, utf8=utf8, cte_type=cte_type) assert expected_chunk in flat_message def test_flatten_message_removes_bcc_from_message_text(): message = EmailMessage() message["Bcc"] = "[email protected]" flat_message = flatten_message(message) assert flat_message == b"\r\n" # empty message def test_flatten_resent_message(): message = EmailMessage() message["To"] = "[email protected]" message["Cc"] = "[email protected]" message["Bcc"] = "[email protected]" message["Subject"] = "Hello, World." message["From"] = "[email protected]" message.set_content("This is a test") message["Resent-Date"] = "Mon, 20 Nov 2017 21:04:27 -0000" message["Resent-To"] = "[email protected]" message["Resent-Cc"] = "[email protected]" message["Resent-Bcc"] = "[email protected]" message["Resent-Subject"] = "Fwd: Hello, World." message["Resent-From"] = "[email protected]" flat_message = flatten_message(message) expected_message = b"""To: [email protected]\r Cc: [email protected]\r Subject: Hello, World.\r From: [email protected]\r Content-Type: text/plain; charset="utf-8"\r Content-Transfer-Encoding: 7bit\r MIME-Version: 1.0\r Resent-Date: Mon, 20 Nov 2017 21:04:27 -0000\r Resent-To: [email protected]\r Resent-Cc: [email protected]\r Resent-Subject: Fwd: Hello, World.\r Resent-From: [email protected]\r \r This is a test\r """ assert flat_message == expected_message @pytest.mark.parametrize( "mime_to_header,mime_cc_header,compat32_to_header," "compat32_cc_header,expected_recipients", ( ( "Alice Smith <[email protected]>, [email protected]", "Bob <[email protected]>", "Alice Smith <[email protected]>, [email protected]", "Bob <[email protected]>", ["[email protected]", "[email protected]", "[email protected]"], ), ( Address(display_name="Alice Smith", username="alice", domain="example.com"), Address(display_name="Bob", username="Bob", domain="example.com"), Header("Alice Smith <[email protected]>"), Header("Bob <[email protected]>"), ["[email protected]", "[email protected]"], ), ( Address(display_name="ålice Smith", username="ålice", domain="example.com"), Address(display_name="Bøb", username="Bøb", domain="example.com"), Header("ålice Smith <å[email protected]>"), Header("Bøb <Bø[email protected]>"), ["å[email protected]", "Bø[email protected]"], ), ( Address(display_name="ålice Smith", username="alice", domain="example.com"), Address(display_name="Bøb", username="Bob", domain="example.com"), Header("ålice Smith <[email protected]>"), Header("Bøb <[email protected]>"), ["[email protected]", "[email protected]"], ), ), ids=("str", "ascii", "utf8_address", "utf8_display_name"), ) def test_extract_recipients( mime_to_header, mime_cc_header, compat32_to_header, compat32_cc_header, expected_recipients, ): mime_message = EmailMessage() mime_message["To"] = mime_to_header mime_message["Cc"] = mime_cc_header mime_recipients = extract_recipients(mime_message) assert mime_recipients == expected_recipients compat32_message = Message() compat32_message["To"] = compat32_to_header compat32_message["Cc"] = compat32_cc_header compat32_recipients = extract_recipients(compat32_message) assert compat32_recipients == expected_recipients def test_extract_recipients_includes_bcc(): message = EmailMessage() message["Bcc"] = "[email protected]" recipients = extract_recipients(message) assert recipients == [message["Bcc"]] def test_extract_recipients_invalid_email(): message = EmailMessage() message["Cc"] = "me" recipients = extract_recipients(message) assert recipients == ["me"] def test_extract_recipients_with_iterable_of_strings(): message = EmailMessage() message["To"] = ("[email protected]", "you") recipients = extract_recipients(message) assert recipients == ["[email protected]", "you"] def test_extract_recipients_resent_message(): message = EmailMessage() message["To"] = "[email protected]" message["Cc"] = "[email protected]" message["Bcc"] = "[email protected]" message["Resent-Date"] = "Mon, 20 Nov 2017 21:04:27 -0000" message["Resent-To"] = "[email protected]" message["Resent-Cc"] = "[email protected]" message["Resent-Bcc"] = "[email protected]" recipients = extract_recipients(message) assert message["Resent-To"] in recipients assert message["Resent-Cc"] in recipients assert message["Resent-Bcc"] in recipients assert message["To"] not in recipients assert message["Cc"] not in recipients assert message["Bcc"] not in recipients def test_extract_recipients_valueerror_on_multiple_resent_message(): message = EmailMessage() message["Resent-Date"] = "Mon, 20 Nov 2016 21:04:27 -0000" message["Resent-Date"] = "Mon, 20 Nov 2017 21:04:27 -0000" with pytest.raises(ValueError): extract_recipients(message) @pytest.mark.parametrize( "mime_header,compat32_header,expected_sender", ( ( "Alice Smith <[email protected]>", "Alice Smith <[email protected]>", "[email protected]", ), ( Address(display_name="Alice Smith", username="alice", domain="example.com"), Header("Alice Smith <[email protected]>"), "[email protected]", ), ( Address(display_name="ålice Smith", username="ålice", domain="example.com"), Header("ålice Smith <å[email protected]>", "utf-8"), "å[email protected]", ), ( Address(display_name="ålice Smith", username="alice", domain="example.com"), Header("ålice Smith <[email protected]>", "utf-8"), "[email protected]", ), ), ids=("str", "ascii", "utf8_address", "utf8_display_name"), ) def test_extract_sender(mime_header, compat32_header, expected_sender): mime_message = EmailMessage() mime_message["From"] = mime_header mime_sender = extract_sender(mime_message) assert mime_sender == expected_sender compat32_message = Message() compat32_message["From"] = compat32_header compat32_sender = extract_sender(compat32_message) assert compat32_sender == expected_sender def test_extract_sender_prefers_sender_header(): message = EmailMessage() message["From"] = "[email protected]" message["Sender"] = "[email protected]" sender = extract_sender(message) assert sender != message["From"] assert sender == message["Sender"] def test_extract_sender_resent_message(): message = EmailMessage() message["From"] = "[email protected]" message["Resent-Date"] = "Mon, 20 Nov 2017 21:04:27 -0000" message["Resent-From"] = "[email protected]" sender = extract_sender(message) assert sender == message["Resent-From"] assert sender != message["From"] def test_extract_sender_valueerror_on_multiple_resent_message(): message = EmailMessage() message["Resent-Date"] = "Mon, 20 Nov 2016 21:04:27 -0000" message["Resent-Date"] = "Mon, 20 Nov 2017 21:04:27 -0000" with pytest.raises(ValueError): extract_sender(message)
py
1a439f30d72c105ebc71bfa5345a1d6b3a06c739
from .basic_operations import process_result
py
1a43a01c39f2fbc0a889ec8f5cfcf3d9972ec05b
#!/usr/bin/env python3 # Copyright (c) 2015-2018 The Syscoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test behavior of -maxuploadtarget. * Verify that getdata requests for old blocks (>1week) are dropped if uploadtarget has been reached. * Verify that getdata requests for recent blocks are respected even if uploadtarget has been reached. * Verify that the upload counters are reset after 24 hours. """ from collections import defaultdict import time from test_framework.messages import CInv, msg_getdata from test_framework.mininode import P2PInterface from test_framework.test_framework import SyscoinTestFramework from test_framework.util import assert_equal, mine_large_block class TestP2PConn(P2PInterface): def __init__(self): super().__init__() self.block_receive_map = defaultdict(int) def on_inv(self, message): pass def on_block(self, message): message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1 class MaxUploadTest(SyscoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-maxuploadtarget=800"]] # Cache for utxos, as the listunspent may take a long time later in the test self.utxo_cache = [] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # Before we connect anything, we first set the time on the node # to be in the past, otherwise things break because the CNode # time counters can't be reset backward after initialization old_time = int(time.time() - 2*60*60*24*7) self.nodes[0].setmocktime(old_time) # Generate some old blocks self.nodes[0].generate(130) # p2p_conns[0] will only request old blocks # p2p_conns[1] will only request new blocks # p2p_conns[2] will test resetting the counters p2p_conns = [] for _ in range(3): p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn())) # Now mine a big block mine_large_block(self.nodes[0], self.utxo_cache) # Store the hash; we'll request this later big_old_block = self.nodes[0].getbestblockhash() old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] big_old_block = int(big_old_block, 16) # Advance to two days ago self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24) # Mine one more block, so that the prior block looks old mine_large_block(self.nodes[0], self.utxo_cache) # We'll be requesting this new block too big_new_block = self.nodes[0].getbestblockhash() big_new_block = int(big_new_block, 16) # p2p_conns[0] will test what happens if we just keep requesting the # the same big old block too many times (expect: disconnect) getdata_request = msg_getdata() getdata_request.inv.append(CInv(2, big_old_block)) max_bytes_per_day = 800*1024*1024 daily_buffer = 144 * 4000000 max_bytes_available = max_bytes_per_day - daily_buffer success_count = max_bytes_available // old_block_size # 576MB will be reserved for relaying new blocks, so expect this to # succeed for ~235 tries. for i in range(success_count): p2p_conns[0].send_message(getdata_request) p2p_conns[0].sync_with_ping() assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1) assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). for i in range(3): p2p_conns[0].send_message(getdata_request) p2p_conns[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) self.log.info("Peer 0 disconnected after downloading old block too many times") # Requesting the current block on p2p_conns[1] should succeed indefinitely, # even when over the max upload target. # We'll try 800 times getdata_request.inv = [CInv(2, big_new_block)] for i in range(800): p2p_conns[1].send_message(getdata_request) p2p_conns[1].sync_with_ping() assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1) self.log.info("Peer 1 able to repeatedly download new block") # But if p2p_conns[1] tries for an old block, it gets disconnected too. getdata_request.inv = [CInv(2, big_old_block)] p2p_conns[1].send_message(getdata_request) p2p_conns[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) self.log.info("Peer 1 disconnected after trying to download old block") self.log.info("Advancing system time on node to clear counters...") # If we advance the time by 24 hours, then the counters should reset, # and p2p_conns[2] should be able to retrieve the old block. self.nodes[0].setmocktime(int(time.time())) p2p_conns[2].sync_with_ping() p2p_conns[2].send_message(getdata_request) p2p_conns[2].sync_with_ping() assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1) self.log.info("Peer 2 able to download old block") self.nodes[0].disconnect_p2ps() #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 self.log.info("Restarting nodes with -whitelist=127.0.0.1") self.stop_node(0) self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"]) # Reconnect to self.nodes[0] self.nodes[0].add_p2p_connection(TestP2PConn()) #retrieve 20 blocks which should be enough to break the 1MB limit getdata_request.inv = [CInv(2, big_new_block)] for i in range(20): self.nodes[0].p2p.send_message(getdata_request) self.nodes[0].p2p.sync_with_ping() assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1) getdata_request.inv = [CInv(2, big_old_block)] self.nodes[0].p2p.send_and_ping(getdata_request) assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist self.log.info("Peer still connected after trying to download old block (whitelisted)") if __name__ == '__main__': MaxUploadTest().main()
py
1a43a0aefd507c4c8efd7adadd18eb9c95e353c5
import argparse CARRY_TEMPLATE = r"""module top ( input clk, input carry_fabric, output out ); wire [{depth}-1:0] a; wire [{depth}-1:0] b; wire [{depth}-1:0] c; LFSR #(.POLY(5)) a_src( .clk(clk), .out(a) ); LFSR #(.POLY(9)) b_src( .clk(clk), .out(b) ); assign carry = {carry}; assign c = a + b + carry; assign out = c[{depth}-1]; endmodule module LFSR ( input clk, output [{depth}-1:0] out ); parameter POLY = 1; reg [{depth}-1:0] r; assign out = r; wire f; assign f = ^(POLY & ~r); always @( posedge clk) r <= {{r[{depth}-1:1], ~f}}; endmodule""" def main(): parser = argparse.ArgumentParser(description="Generates top.v for carry stress test.") parser.add_argument('--init', choices=['0','1', 'fabric'], required=True) parser.add_argument('--carry_depth', type=int, required=True) args = parser.parse_args() carry = args.init if args.init == 'fabric': carry = 'carry_fabric' print(CARRY_TEMPLATE.format( carry = carry, depth=args.carry_depth, )) if __name__ == '__main__': main()
py
1a43a1834869cedf80b0c60c23b4d637184fea57
import discord import discord.utils import discord.ext import re import emoji from redbot.core import commands, Config, bank, checks class April(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_message(self, message: discord.Message): if message.guild is None: return valid = True valid2 = True msg = message author = message.author valid_user = isinstance(author, discord.Member) and not author.bot if not valid_user: return channels = [926112975813750796, 927783621794877460, 926113551968526376, 926113675419471972, 927518938919735326, 927518938919735326, 927539973459169302, 928689945080627201, 930531314363424808] if message.channel.id in channels: if len(message.attachments) == 0: x = re.search(r'^<a.*:|<:.*>$', msg.content) if not x: valid = False else: valid = True x = re.search(r'>*\s[^\s]*\s<', msg.content) if x: valid = False if valid == False: for symbol in msg.content: if symbol not in emoji.UNICODE_EMOJI['en']: valid2 = False else: i = msg.content.replace(symbol, '') x = re.search(r'^\s*<:.*>\s*$', str(i)) if x: valid = True valid2 = True if valid == False and valid2 == False: try: await message.delete() except discord.HTTPException: pass @commands.Cog.listener() async def on_message_edit(self, _prior, message): await self.on_message(message)
py
1a43a1cd7a72140f420ba0fa88306a1d3da6233d
from chill import * source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c') destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/2041.c') procedure('kernel_fdtd_2d') loop(0) known(' nx > 1 ') known(' ny > 1 ') tile(1,2,16,2) tile(1,4,32,4) tile(2,2,16,2) tile(2,4,32,4) tile(3,2,16,2) tile(3,4,32,4)
py
1a43a27c9ce78afbfe1ea6685329e64044454485
#!/usr/bin/env python """ A script that runs clang-format on changes detected via git. It will report if running clang-format generated any changes. In CI, the script considers it a failure if running clang-format makes a change. In the pre-commit hook, the user is prompted to apply any clang-format changes. Running tools/clang_format.py manually with no arguments should replicate the pre-commit hook behavior. Only files that are in CLANG_FORMAT_WHITELIST are checked. """ import subprocess import os import argparse import difflib import re # Whitelist of directories to check. All files that in that directory # (recursively) will be checked. CLANG_FORMAT_WHITELIST = ["torch/csrc/jit/", "test/cpp/jit/"] CPP_FILE_REGEX = re.compile("^.*\\.(h|cpp|cc|c|hpp)$") def parse_args(): parser = argparse.ArgumentParser( description="Execute clang-format on your working copy changes." ) parser.add_argument( "-d", "--diff", default="HEAD", help="Git revision to diff against to get changes", ) parser.add_argument( "--accept-changes", action="store_true", default=False, help=( "If true, apply whatever changes clang-format creates. " "Otherwise, just print the changes and exit" ), ) parser.add_argument( "--check-all", action="store_true", default=False, help="If true, check all whitelisted files instead of just working copy changes", ) parser.add_argument("--verbose", "-v", action="store_true", default=False) return parser.parse_args() def get_whitelisted_files(): """ Parse CLANG_FORMAT_WHITELIST and resolve all directories. Returns the set of whitelist cpp source files. """ matches = [] for dir in CLANG_FORMAT_WHITELIST: for root, dirnames, filenames in os.walk(dir): for filename in filenames: if CPP_FILE_REGEX.match(filename): matches.append(os.path.join(root, filename)) return set(matches) def get_changed_files(rev): """ Get all changed files between the working tree and `rev` """ changed_files = ( subprocess.check_output( ["git", "diff-index", "--diff-filter=AMU", "--name-only", rev] ) .decode() .split("\n") ) return set(changed_files) def get_diffs(files): """ Run clang-format on all `files` and report if it changed anything. Returns a mapping of filename => diff generator """ name_to_diffs = {} for f in files: formatted_text = subprocess.check_output(["clang-format", f]).decode() with open(f) as orig: orig_text = orig.read() if formatted_text != orig_text: orig_lines = orig_text.split("\n") formatted_lines = formatted_text.split("\n") diff = difflib.unified_diff( orig_lines, formatted_lines, "original", "formatted" ) name_to_diffs[f] = diff return name_to_diffs def main(): args = parse_args() whitelisted_files = get_whitelisted_files() if args.check_all: files_to_check = whitelisted_files else: changed_files = get_changed_files(args.diff) files_to_check = changed_files & whitelisted_files if args.verbose: print("Running clang-format on whitelisted files: ") for f in files_to_check: print(f) name_to_diffs = get_diffs(files_to_check) if len(name_to_diffs) == 0: return if args.accept_changes: # run clang-format on the necessary files args = ["clang-format", "-i"] args.extend(name_to_diffs.keys()) subprocess.check_output(args) # add the changes so they will be committed args = ["git", "add"] args.extend(name_to_diffs.keys()) subprocess.check_output(args) else: print("ERROR: Running clang-format created changes: ") for name, diff in name_to_diffs.items(): print("In ", name) for line in diff: print(line) print("\n") if __name__ == "__main__": main()
py
1a43a3b712e2b068ff63047dc81d56b86a66d656
# -*- coding: utf-8 -*- ''' Control the state system on the minion. State Caching ------------- When a highstate is called, the minion automatically caches a copy of the last high data. If you then run a highstate with cache=True it will use that cached highdata and won't hit the fileserver except for ``salt://`` links in the states themselves. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import shutil import sys import tarfile import tempfile import time # Import salt libs import salt.config import salt.payload import salt.state import salt.utils.args import salt.utils.data import salt.utils.event import salt.utils.files import salt.utils.functools import salt.utils.hashutils import salt.utils.jid import salt.utils.json import salt.utils.platform import salt.utils.state import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.defaults.exitcodes from salt.exceptions import CommandExecutionError, SaltInvocationError from salt.runners.state import orchestrate as _orchestrate from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six import msgpack __proxyenabled__ = ['*'] __outputter__ = { 'sls': 'highstate', 'sls_id': 'highstate', 'pkg': 'highstate', 'top': 'highstate', 'single': 'highstate', 'highstate': 'highstate', 'template': 'highstate', 'template_str': 'highstate', 'apply_': 'highstate', 'request': 'highstate', 'check_request': 'highstate', 'run_request': 'highstate', } __func_alias__ = { 'apply_': 'apply' } log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'state' def __virtual__(): ''' Set the virtualname ''' # Update global namespace with functions that are cloned in this module global _orchestrate _orchestrate = salt.utils.functools.namespaced_function(_orchestrate, globals()) return __virtualname__ def _filter_running(runnings): ''' Filter out the result: True + no changes data ''' ret = dict((tag, value) for tag, value in six.iteritems(runnings) if not value['result'] or value['changes']) return ret def _set_retcode(ret, highstate=None): ''' Set the return code based on the data back from the state system ''' # Set default retcode to 0 __context__['retcode'] = salt.defaults.exitcodes.EX_OK if isinstance(ret, list): __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return if not __utils__['state.check_result'](ret, highstate=highstate): __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_FAILURE def _get_pillar_errors(kwargs, pillar=None): ''' Checks all pillars (external and internal) for errors. Return an error message, if anywhere or None. :param kwargs: dictionary of options :param pillar: external pillar :return: None or an error message ''' return None if kwargs.get('force') else (pillar or {}).get('_errors', __pillar__.get('_errors')) or None def _wait(jid): ''' Wait for all previously started state jobs to finish running ''' if jid is None: jid = salt.utils.jid.gen_jid(__opts__) states = _prior_running_states(jid) while states: time.sleep(1) states = _prior_running_states(jid) def _snapper_pre(opts, jid): ''' Create a snapper pre snapshot ''' snapper_pre = None try: if not opts['test'] and __opts__.get('snapper_states'): # Run the snapper pre snapshot snapper_pre = __salt__['snapper.create_snapshot']( config=__opts__.get('snapper_states_config', 'root'), snapshot_type='pre', description='Salt State run for jid {0}'.format(jid), __pub_jid=jid) except Exception: log.error('Failed to create snapper pre snapshot for jid: %s', jid) return snapper_pre def _snapper_post(opts, jid, pre_num): ''' Create the post states snapshot ''' try: if not opts['test'] and __opts__.get('snapper_states') and pre_num: # Run the snapper pre snapshot __salt__['snapper.create_snapshot']( config=__opts__.get('snapper_states_config', 'root'), snapshot_type='post', pre_number=pre_num, description='Salt State run for jid {0}'.format(jid), __pub_jid=jid) except Exception: log.error('Failed to create snapper pre snapshot for jid: %s', jid) def _get_pause(jid, state_id=None): ''' Return the pause information for a given jid ''' pause_dir = os.path.join(__opts__['cachedir'], 'state_pause') pause_path = os.path.join(pause_dir, jid) if not os.path.exists(pause_dir): try: os.makedirs(pause_dir) except OSError: # File created in the gap pass data = {} if state_id is not None: if state_id not in data: data[state_id] = {} if os.path.exists(pause_path): with salt.utils.files.fopen(pause_path, 'rb') as fp_: data = msgpack.loads(fp_.read()) return data, pause_path def get_pauses(jid=None): ''' Get a report on all of the currently paused state runs and pause run settings. Optionally send in a jid if you only desire to see a single pause data set. ''' ret = {} active = __salt__['saltutil.is_running']('state.*') pause_dir = os.path.join(__opts__['cachedir'], 'state_pause') if not os.path.exists(pause_dir): return ret if jid is None: jids = os.listdir(pause_dir) elif isinstance(jid, list): jids = salt.utils.data.stringify(jid) else: jids = [six.text_type(jid)] for scan_jid in jids: is_active = False for active_data in active: if active_data['jid'] == scan_jid: is_active = True if not is_active: try: pause_path = os.path.join(pause_dir, scan_jid) os.remove(pause_path) except OSError: # Already gone pass continue data, pause_path = _get_pause(scan_jid) ret[scan_jid] = data return ret def soft_kill(jid, state_id=None): ''' Set up a state run to die before executing the given state id, this instructs a running state to safely exit at a given state id. This needs to pass in the jid of the running state. If a state_id is not passed then the jid referenced will be safely exited at the beginning of the next state run. The given state id is the id got a given state execution, so given a state that looks like this: .. code-block:: yaml vim: pkg.installed: [] The state_id to pass to `soft_kill` is `vim` CLI Examples: .. code-block:: bash salt '*' state.soft_kill 20171130110407769519 salt '*' state.soft_kill 20171130110407769519 vim ''' jid = six.text_type(jid) if state_id is None: state_id = '__all__' data, pause_path = _get_pause(jid, state_id) data[state_id]['kill'] = True with salt.utils.files.fopen(pause_path, 'wb') as fp_: fp_.write(msgpack.dumps(data)) def pause(jid, state_id=None, duration=None): ''' Set up a state id pause, this instructs a running state to pause at a given state id. This needs to pass in the jid of the running state and can optionally pass in a duration in seconds. If a state_id is not passed then the jid referenced will be paused at the beginning of the next state run. The given state id is the id got a given state execution, so given a state that looks like this: .. code-block:: yaml vim: pkg.installed: [] The state_id to pass to `pause` is `vim` CLI Examples: .. code-block:: bash salt '*' state.pause 20171130110407769519 salt '*' state.pause 20171130110407769519 vim salt '*' state.pause 20171130110407769519 vim 20 ''' jid = six.text_type(jid) if state_id is None: state_id = '__all__' data, pause_path = _get_pause(jid, state_id) if duration: data[state_id]['duration'] = int(duration) with salt.utils.files.fopen(pause_path, 'wb') as fp_: fp_.write(msgpack.dumps(data)) def resume(jid, state_id=None): ''' Remove a pause from a jid, allowing it to continue. If the state_id is not specified then the a general pause will be resumed. The given state_id is the id got a given state execution, so given a state that looks like this: .. code-block:: yaml vim: pkg.installed: [] The state_id to pass to `rm_pause` is `vim` CLI Examples: .. code-block:: bash salt '*' state.resume 20171130110407769519 salt '*' state.resume 20171130110407769519 vim ''' jid = six.text_type(jid) if state_id is None: state_id = '__all__' data, pause_path = _get_pause(jid, state_id) if state_id in data: data.pop(state_id) if state_id == '__all__': data = {} with salt.utils.files.fopen(pause_path, 'wb') as fp_: fp_.write(msgpack.dumps(data)) def orchestrate(mods, saltenv='base', test=None, exclude=None, pillar=None, pillarenv=None): ''' .. versionadded:: 2016.11.0 Execute the orchestrate runner from a masterless minion. .. seealso:: More Orchestrate documentation * :ref:`Full Orchestrate Tutorial <orchestrate-runner>` * :py:mod:`Docs for the ``salt`` state module <salt.states.saltmod>` CLI Examples: .. code-block:: bash salt-call --local state.orchestrate webserver salt-call --local state.orchestrate webserver saltenv=dev test=True salt-call --local state.orchestrate webserver saltenv=dev pillarenv=aws ''' return _orchestrate(mods=mods, saltenv=saltenv, test=test, exclude=exclude, pillar=pillar, pillarenv=pillarenv) def running(concurrent=False): ''' Return a list of strings that contain state return data if a state function is already running. This function is used to prevent multiple state calls from being run at the same time. CLI Example: .. code-block:: bash salt '*' state.running ''' ret = [] if concurrent: return ret active = __salt__['saltutil.is_running']('state.*') for data in active: err = ( 'The function "{0}" is running as PID {1} and was started at ' '{2} with jid {3}' ).format( data['fun'], data['pid'], salt.utils.jid.jid_to_time(data['jid']), data['jid'], ) ret.append(err) return ret def _prior_running_states(jid): ''' Return a list of dicts of prior calls to state functions. This function is used to queue state calls so only one is run at a time. ''' ret = [] active = __salt__['saltutil.is_running']('state.*') for data in active: try: data_jid = int(data['jid']) except ValueError: continue if data_jid < int(jid): ret.append(data) return ret def _check_queue(queue, kwargs): ''' Utility function to queue the state run if requested and to check for conflicts in currently running states ''' if queue: _wait(kwargs.get('__pub_jid')) else: conflict = running(concurrent=kwargs.get('concurrent', False)) if conflict: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return conflict def _get_initial_pillar(opts): return __pillar__ if __opts__.get('__cli', None) == 'salt-call' \ and opts['pillarenv'] == __opts__['pillarenv'] \ else None def low(data, queue=False, **kwargs): ''' Execute a single low data call This function is mostly intended for testing the state system and is not likely to be needed in everyday usage. CLI Example: .. code-block:: bash salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}' ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict try: st_ = salt.state.State(__opts__, proxy=__proxy__) except NameError: st_ = salt.state.State(__opts__) err = st_.verify_data(data) if err: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return err ret = st_.call(data) if isinstance(ret, list): __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR if __utils__['state.check_result'](ret): __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_FAILURE return ret def _get_test_value(test=None, **kwargs): ''' Determine the correct value for the test flag. ''' ret = True if test is None: if salt.utils.args.test_mode(test=test, **kwargs): ret = True elif __salt__['config.get']('test', omit_opts=True) is True: ret = True else: ret = __opts__.get('test', None) else: ret = test return ret def high(data, test=None, queue=False, **kwargs): ''' Execute the compound calls stored in a single set of high data This function is mostly intended for testing the state system and is not likely to be needed in everyday usage. CLI Example: .. code-block:: bash salt '*' state.high '{"vim": {"pkg": ["installed"]}}' ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, proxy=__proxy__, context=__context__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) ret = st_.call_high(data) _set_retcode(ret, highstate=data) return ret def template(tem, queue=False, **kwargs): ''' Execute the information stored in a template file on the minion. This function does not ask a master for a SLS file to render but instead directly processes the file at the provided path on the minion. CLI Example: .. code-block:: bash salt '*' state.template '<Path to template on the minion>' ''' if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) try: st_ = salt.state.HighState(opts, context=__context__, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, context=__context__, initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE raise CommandExecutionError('Pillar failed to render', info=errors) if not tem.endswith('.sls'): tem = '{sls}.sls'.format(sls=tem) high_state, errors = st_.render_state(tem, kwargs.get('saltenv', ''), '', None, local=True) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return errors ret = st_.state.call_high(high_state) _set_retcode(ret, highstate=high_state) return ret def template_str(tem, queue=False, **kwargs): ''' Execute the information stored in a string from an sls template CLI Example: .. code-block:: bash salt '*' state.template_str '<Template String>' ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) try: st_ = salt.state.State(opts, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.State(opts, initial_pillar=_get_initial_pillar(opts)) ret = st_.call_template_str(tem) _set_retcode(ret) return ret def apply_(mods=None, **kwargs): ''' .. versionadded:: 2015.5.0 This function will call :mod:`state.highstate <salt.modules.state.highstate>` or :mod:`state.sls <salt.modules.state.sls>` based on the arguments passed to this function. It exists as a more intuitive way of applying states. .. rubric:: APPLYING ALL STATES CONFIGURED IN TOP.SLS (A.K.A. :ref:`HIGHSTATE <running-highstate>`) To apply all configured states, simply run ``state.apply``: .. code-block:: bash salt '*' state.apply The following additional arguments are also accepted when applying all states configured in top.sls: test Run states in test-only (dry-run) mode mock The mock option allows for the state run to execute without actually calling any states. This then returns a mocked return which will show the requisite ordering as well as fully validate the state run. .. versionadded:: 2015.8.4 pillar Custom Pillar values, passed as a dictionary of key-value pairs .. code-block:: bash salt '*' state.apply stuff pillar='{"foo": "bar"}' .. note:: Values passed this way will override Pillar values set via ``pillar_roots`` or an external Pillar source. exclude Exclude specific states from execution. Accepts a list of sls names, a comma-separated string of sls names, or a list of dictionaries containing ``sls`` or ``id`` keys. Glob-patterns may be used to match multiple states. .. code-block:: bash salt '*' state.apply exclude=bar,baz salt '*' state.apply exclude=foo* salt '*' state.apply exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]" queue : False Instead of failing immediately when another state run is in progress, queue the new state run to begin running once the other has finished. This option starts a new thread for each queued state run, so use this option sparingly. localconfig Optionally, instead of using the minion config, load minion opts from the file specified by this argument, and then merge them with the options from the minion config. This functionality allows for specific states to be run with their own custom minion configuration, including different pillars, file_roots, etc. .. code-block:: bash salt '*' state.apply localconfig=/path/to/minion.yml .. rubric:: APPLYING INDIVIDUAL SLS FILES (A.K.A. :py:func:`STATE.SLS <salt.modules.state.sls>`) To apply individual SLS files, pass them as a comma-separated list: .. code-block:: bash # Run the states configured in salt://stuff.sls (or salt://stuff/init.sls) salt '*' state.apply stuff # Run the states configured in salt://stuff.sls (or salt://stuff/init.sls) # and salt://pkgs.sls (or salt://pkgs/init.sls). salt '*' state.apply stuff,pkgs # Run the states configured in a more deeply nested directory such as salt://my/organized/stuff.sls (or salt://my/organized/stuff/init.sls) salt '*' state.apply my.organized.stuff The following additional arguments are also accepted when applying individual SLS files: test Run states in test-only (dry-run) mode mock The mock option allows for the state run to execute without actually calling any states. This then returns a mocked return which will show the requisite ordering as well as fully validate the state run. .. versionadded:: 2015.8.4 pillar Custom Pillar values, passed as a dictionary of key-value pairs .. code-block:: bash salt '*' state.apply stuff pillar='{"foo": "bar"}' .. note:: Values passed this way will override Pillar values set via ``pillar_roots`` or an external Pillar source. queue : False Instead of failing immediately when another state run is in progress, queue the new state run to begin running once the other has finished. This option starts a new thread for each queued state run, so use this option sparingly. concurrent : False Execute state runs concurrently instead of serially .. warning:: This flag is potentially dangerous. It is designed for use when multiple state runs can safely be run at the same time. Do *not* use this flag for performance optimization. saltenv Specify a salt fileserver environment to be used when applying states .. versionchanged:: 0.17.0 Argument name changed from ``env`` to ``saltenv`` .. versionchanged:: 2014.7.0 If no saltenv is specified, the minion config will be checked for an ``environment`` parameter and if found, it will be used. If none is found, ``base`` will be used. In prior releases, the minion config was not checked and ``base`` would always be assumed when the saltenv was not explicitly set. pillarenv Specify a Pillar environment to be used when applying states. This can also be set in the minion config file using the :conf_minion:`pillarenv` option. When neither the :conf_minion:`pillarenv` minion config option nor this CLI argument is used, all Pillar environments will be merged together. localconfig Optionally, instead of using the minion config, load minion opts from the file specified by this argument, and then merge them with the options from the minion config. This functionality allows for specific states to be run with their own custom minion configuration, including different pillars, file_roots, etc. .. code-block:: bash salt '*' state.apply stuff localconfig=/path/to/minion.yml sync_mods If specified, the desired custom module types will be synced prior to running the SLS files: .. code-block:: bash salt '*' state.apply stuff sync_mods=states,modules salt '*' state.apply stuff sync_mods=all .. note:: This option is ignored when no SLS files are specified, as a :ref:`highstate <running-highstate>` automatically syncs all custom module types. .. versionadded:: 2017.7.8,2018.3.3,2019.2.0 ''' if mods: return sls(mods, **kwargs) return highstate(**kwargs) def request(mods=None, **kwargs): ''' .. versionadded:: 2015.5.0 Request that the local admin execute a state run via `salt-call state.run_request`. All arguments match those of state.apply. CLI Example: .. code-block:: bash salt '*' state.request salt '*' state.request stuff salt '*' state.request stuff,pkgs ''' kwargs['test'] = True ret = apply_(mods, **kwargs) notify_path = os.path.join(__opts__['cachedir'], 'req_state.p') serial = salt.payload.Serial(__opts__) req = check_request() req.update({kwargs.get('name', 'default'): { 'test_run': ret, 'mods': mods, 'kwargs': kwargs } }) with salt.utils.files.set_umask(0o077): try: if salt.utils.platform.is_windows(): # Make sure cache file isn't read-only __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path)) with salt.utils.files.fopen(notify_path, 'w+b') as fp_: serial.dump(req, fp_) except (IOError, OSError): log.error( 'Unable to write state request file %s. Check permission.', notify_path ) return ret def check_request(name=None): ''' .. versionadded:: 2015.5.0 Return the state request information, if any CLI Example: .. code-block:: bash salt '*' state.check_request ''' notify_path = os.path.join(__opts__['cachedir'], 'req_state.p') serial = salt.payload.Serial(__opts__) if os.path.isfile(notify_path): with salt.utils.files.fopen(notify_path, 'rb') as fp_: req = serial.load(fp_) if name: return req[name] return req return {} def clear_request(name=None): ''' .. versionadded:: 2015.5.0 Clear out the state execution request without executing it CLI Example: .. code-block:: bash salt '*' state.clear_request ''' notify_path = os.path.join(__opts__['cachedir'], 'req_state.p') serial = salt.payload.Serial(__opts__) if not os.path.isfile(notify_path): return True if not name: try: os.remove(notify_path) except (IOError, OSError): pass else: req = check_request() if name in req: req.pop(name) else: return False with salt.utils.files.set_umask(0o077): try: if salt.utils.platform.is_windows(): # Make sure cache file isn't read-only __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path)) with salt.utils.files.fopen(notify_path, 'w+b') as fp_: serial.dump(req, fp_) except (IOError, OSError): log.error( 'Unable to write state request file %s. Check permission.', notify_path ) return True def run_request(name='default', **kwargs): ''' .. versionadded:: 2015.5.0 Execute the pending state request CLI Example: .. code-block:: bash salt '*' state.run_request ''' req = check_request() if name not in req: return {} n_req = req[name] if 'mods' not in n_req or 'kwargs' not in n_req: return {} req[name]['kwargs'].update(kwargs) if 'test' in n_req['kwargs']: n_req['kwargs'].pop('test') if req: ret = apply_(n_req['mods'], **n_req['kwargs']) try: os.remove(os.path.join(__opts__['cachedir'], 'req_state.p')) except (IOError, OSError): pass return ret return {} def highstate(test=None, queue=False, **kwargs): ''' Retrieve the state data from the salt master for this minion and execute it test Run states in test-only (dry-run) mode pillar Custom Pillar values, passed as a dictionary of key-value pairs .. code-block:: bash salt '*' state.highstate stuff pillar='{"foo": "bar"}' .. note:: Values passed this way will override Pillar values set via ``pillar_roots`` or an external Pillar source. .. versionchanged:: 2016.3.0 GPG-encrypted CLI Pillar data is now supported via the GPG renderer. See :ref:`here <encrypted-cli-pillar-data>` for details. pillar_enc Specify which renderer to use to decrypt encrypted data located within the ``pillar`` value. Currently, only ``gpg`` is supported. .. versionadded:: 2016.3.0 exclude Exclude specific states from execution. Accepts a list of sls names, a comma-separated string of sls names, or a list of dictionaries containing ``sls`` or ``id`` keys. Glob-patterns may be used to match multiple states. .. code-block:: bash salt '*' state.highstate exclude=bar,baz salt '*' state.highstate exclude=foo* salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]" saltenv Specify a salt fileserver environment to be used when applying states .. versionchanged:: 0.17.0 Argument name changed from ``env`` to ``saltenv``. .. versionchanged:: 2014.7.0 If no saltenv is specified, the minion config will be checked for a ``saltenv`` parameter and if found, it will be used. If none is found, ``base`` will be used. In prior releases, the minion config was not checked and ``base`` would always be assumed when the saltenv was not explicitly set. pillarenv Specify a Pillar environment to be used when applying states. This can also be set in the minion config file using the :conf_minion:`pillarenv` option. When neither the :conf_minion:`pillarenv` minion config option nor this CLI argument is used, all Pillar environments will be merged together. queue : False Instead of failing immediately when another state run is in progress, queue the new state run to begin running once the other has finished. This option starts a new thread for each queued state run, so use this option sparingly. localconfig Optionally, instead of using the minion config, load minion opts from the file specified by this argument, and then merge them with the options from the minion config. This functionality allows for specific states to be run with their own custom minion configuration, including different pillars, file_roots, etc. mock The mock option allows for the state run to execute without actually calling any states. This then returns a mocked return which will show the requisite ordering as well as fully validate the state run. .. versionadded:: 2015.8.4 CLI Examples: .. code-block:: bash salt '*' state.highstate salt '*' state.highstate whitelist=sls1_to_run,sls2_to_run salt '*' state.highstate exclude=sls_to_exclude salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]" salt '*' state.highstate pillar="{foo: 'Foo!', bar: 'Bar!'}" ''' if _disabled(['highstate']): log.debug('Salt highstate run is disabled. To re-enable, run state.enable highstate') ret = { 'name': 'Salt highstate run is disabled. To re-enable, run state.enable highstate', 'result': 'False', 'comment': 'Disabled' } return ret conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') if 'saltenv' in kwargs: opts['saltenv'] = kwargs['saltenv'] if 'pillarenv' in kwargs: opts['pillarenv'] = kwargs['pillarenv'] pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.HighState(opts, pillar_override, kwargs.get('__pub_jid'), pillar_enc=pillar_enc, proxy=__proxy__, context=__context__, mocked=kwargs.get('mock', False), initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, pillar_override, kwargs.get('__pub_jid'), pillar_enc=pillar_enc, mocked=kwargs.get('mock', False), initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE return ['Pillar failed to render with the following messages:'] + errors st_.push_active() orchestration_jid = kwargs.get('orchestration_jid') snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy')) try: ret = st_.call_highstate( exclude=kwargs.get('exclude', []), cache=kwargs.get('cache', None), cache_name=kwargs.get('cache_name', 'highstate'), force=kwargs.get('force', False), whitelist=kwargs.get('whitelist'), orchestration_jid=orchestration_jid) finally: st_.pop_active() if isinstance(ret, dict) and (__salt__['config.option']('state_data', '') == 'terse' or kwargs.get('terse')): ret = _filter_running(ret) _set_retcode(ret, highstate=st_.building_highstate) _snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. __opts__['test'] = orig_test return ret def sls(mods, test=None, exclude=None, queue=False, sync_mods=None, **kwargs): ''' Execute the states in one or more SLS files test Run states in test-only (dry-run) mode pillar Custom Pillar values, passed as a dictionary of key-value pairs .. code-block:: bash salt '*' state.sls stuff pillar='{"foo": "bar"}' .. note:: Values passed this way will override existing Pillar values set via ``pillar_roots`` or an external Pillar source. Pillar values that are not included in the kwarg will not be overwritten. .. versionchanged:: 2016.3.0 GPG-encrypted CLI Pillar data is now supported via the GPG renderer. See :ref:`here <encrypted-cli-pillar-data>` for details. pillar_enc Specify which renderer to use to decrypt encrypted data located within the ``pillar`` value. Currently, only ``gpg`` is supported. .. versionadded:: 2016.3.0 exclude Exclude specific states from execution. Accepts a list of sls names, a comma-separated string of sls names, or a list of dictionaries containing ``sls`` or ``id`` keys. Glob-patterns may be used to match multiple states. .. code-block:: bash salt '*' state.sls foo,bar,baz exclude=bar,baz salt '*' state.sls foo,bar,baz exclude=ba* salt '*' state.sls foo,bar,baz exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]" queue : False Instead of failing immediately when another state run is in progress, queue the new state run to begin running once the other has finished. This option starts a new thread for each queued state run, so use this option sparingly. concurrent : False Execute state runs concurrently instead of serially .. warning:: This flag is potentially dangerous. It is designed for use when multiple state runs can safely be run at the same time. Do *not* use this flag for performance optimization. saltenv Specify a salt fileserver environment to be used when applying states .. versionchanged:: 0.17.0 Argument name changed from ``env`` to ``saltenv``. .. versionchanged:: 2014.7.0 If no saltenv is specified, the minion config will be checked for an ``environment`` parameter and if found, it will be used. If none is found, ``base`` will be used. In prior releases, the minion config was not checked and ``base`` would always be assumed when the saltenv was not explicitly set. pillarenv Specify a Pillar environment to be used when applying states. This can also be set in the minion config file using the :conf_minion:`pillarenv` option. When neither the :conf_minion:`pillarenv` minion config option nor this CLI argument is used, all Pillar environments will be merged together. localconfig Optionally, instead of using the minion config, load minion opts from the file specified by this argument, and then merge them with the options from the minion config. This functionality allows for specific states to be run with their own custom minion configuration, including different pillars, file_roots, etc. mock The mock option allows for the state run to execute without actually calling any states. This then returns a mocked return which will show the requisite ordering as well as fully validate the state run. .. versionadded:: 2015.8.4 sync_mods If specified, the desired custom module types will be synced prior to running the SLS files: .. code-block:: bash salt '*' state.sls stuff sync_mods=states,modules salt '*' state.sls stuff sync_mods=all .. versionadded:: 2017.7.8,2018.3.3,2019.2.0 CLI Example: .. code-block:: bash # Run the states configured in salt://example.sls (or salt://example/init.sls) salt '*' state.apply example # Run the states configured in salt://core.sls (or salt://core/init.sls) # and salt://edit/vim.sls (or salt://edit/vim/init.sls) salt '*' state.sls core,edit.vim # Run the states configured in a more deeply nested directory such as salt://my/nested/state.sls (or salt://my/nested/state/init.sls) salt '*' state.sls my.nested.state salt '*' state.sls core exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]" salt '*' state.sls myslsfile pillar="{foo: 'Foo!', bar: 'Bar!'}" ''' concurrent = kwargs.get('concurrent', False) if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') # Modification to __opts__ lost after this if-else if queue: _wait(kwargs.get('__pub_jid')) else: conflict = running(concurrent) if conflict: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return conflict if isinstance(mods, list): disabled = _disabled(mods) else: disabled = _disabled([mods]) if disabled: for state in disabled: log.debug( 'Salt state %s is disabled. To re-enable, run ' 'state.enable %s', state, state ) __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return disabled orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) # Since this is running a specific SLS file (or files), fall back to the # 'base' saltenv if none is configured and none was passed. if opts['saltenv'] is None: opts['saltenv'] = 'base' pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) serial = salt.payload.Serial(__opts__) cfn = os.path.join( __opts__['cachedir'], '{0}.cache.p'.format(kwargs.get('cache_name', 'highstate')) ) if sync_mods is True: sync_mods = ['all'] if sync_mods is not None: sync_mods = salt.utils.args.split_input(sync_mods) else: sync_mods = [] if 'all' in sync_mods and sync_mods != ['all']: # Prevent unnecessary extra syncing sync_mods = ['all'] for module_type in sync_mods: try: __salt__['saltutil.sync_{0}'.format(module_type)]( saltenv=opts['saltenv'] ) except KeyError: log.warning( 'Invalid custom module type \'%s\', ignoring', module_type ) try: st_ = salt.state.HighState(opts, pillar_override, kwargs.get('__pub_jid'), pillar_enc=pillar_enc, proxy=__proxy__, context=__context__, mocked=kwargs.get('mock', False), initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, pillar_override, kwargs.get('__pub_jid'), pillar_enc=pillar_enc, mocked=kwargs.get('mock', False), initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE return ['Pillar failed to render with the following messages:'] + errors orchestration_jid = kwargs.get('orchestration_jid') with salt.utils.files.set_umask(0o077): if kwargs.get('cache'): if os.path.isfile(cfn): with salt.utils.files.fopen(cfn, 'rb') as fp_: high_ = serial.load(fp_) return st_.state.call_high(high_, orchestration_jid) # If the state file is an integer, convert to a string then to unicode if isinstance(mods, six.integer_types): mods = salt.utils.stringutils.to_unicode(str(mods)) # future lint: disable=blacklisted-function mods = salt.utils.args.split_input(mods) st_.push_active() try: high_, errors = st_.render_highstate({opts['saltenv']: mods}) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return errors if exclude: exclude = salt.utils.args.split_input(exclude) if '__exclude__' in high_: high_['__exclude__'].extend(exclude) else: high_['__exclude__'] = exclude snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy')) ret = st_.state.call_high(high_, orchestration_jid) finally: st_.pop_active() if __salt__['config.option']('state_data', '') == 'terse' or kwargs.get('terse'): ret = _filter_running(ret) cache_file = os.path.join(__opts__['cachedir'], 'sls.p') with salt.utils.files.set_umask(0o077): try: if salt.utils.platform.is_windows(): # Make sure cache file isn't read-only __salt__['cmd.run'](['attrib', '-R', cache_file], python_shell=False) with salt.utils.files.fopen(cache_file, 'w+b') as fp_: serial.dump(ret, fp_) except (IOError, OSError): log.error( 'Unable to write to SLS cache file %s. Check permission.', cache_file ) _set_retcode(ret, high_) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. __opts__['test'] = orig_test try: with salt.utils.files.fopen(cfn, 'w+b') as fp_: try: serial.dump(high_, fp_) except TypeError: # Can't serialize pydsl pass except (IOError, OSError): log.error( 'Unable to write to highstate cache file %s. Do you have permissions?', cfn ) _snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre) return ret def top(topfn, test=None, queue=False, **kwargs): ''' Execute a specific top file instead of the default. This is useful to apply configurations from a different environment (for example, dev or prod), without modifying the default top file. queue : False Instead of failing immediately when another state run is in progress, queue the new state run to begin running once the other has finished. This option starts a new thread for each queued state run, so use this option sparingly. saltenv Specify a salt fileserver environment to be used when applying states pillarenv Specify a Pillar environment to be used when applying states. This can also be set in the minion config file using the :conf_minion:`pillarenv` option. When neither the :conf_minion:`pillarenv` minion config option nor this CLI argument is used, all Pillar environments will be merged together. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' state.top reverse_top.sls salt '*' state.top prod_top.sls exclude=sls_to_exclude salt '*' state.top dev_top.sls exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]" ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.HighState(opts, pillar_override, pillar_enc=pillar_enc, context=__context__, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, pillar_override, pillar_enc=pillar_enc, context=__context__, initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE return ['Pillar failed to render with the following messages:'] + errors st_.push_active() st_.opts['state_top'] = salt.utils.url.create(topfn) ret = {} orchestration_jid = kwargs.get('orchestration_jid') if 'saltenv' in kwargs: st_.opts['state_top_saltenv'] = kwargs['saltenv'] try: snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy')) ret = st_.call_highstate( exclude=kwargs.get('exclude', []), cache=kwargs.get('cache', None), cache_name=kwargs.get('cache_name', 'highstate'), orchestration_jid=orchestration_jid) finally: st_.pop_active() _set_retcode(ret, highstate=st_.building_highstate) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. _snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre) __opts__['test'] = orig_test return ret def show_highstate(queue=False, **kwargs): ''' Retrieve the highstate data from the salt master and display it Custom Pillar data can be passed with the ``pillar`` kwarg. CLI Example: .. code-block:: bash salt '*' state.show_highstate ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) try: st_ = salt.state.HighState(opts, pillar_override, pillar_enc=pillar_enc, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, pillar_override, pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE raise CommandExecutionError('Pillar failed to render', info=errors) st_.push_active() try: ret = st_.compile_highstate() finally: st_.pop_active() _set_retcode(ret) return ret def show_lowstate(queue=False, **kwargs): ''' List out the low data that will be applied to this minion CLI Example: .. code-block:: bash salt '*' state.show_lowstate ''' conflict = _check_queue(queue, kwargs) if conflict is not None: assert False return conflict opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) try: st_ = salt.state.HighState(opts, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE raise CommandExecutionError('Pillar failed to render', info=errors) st_.push_active() try: ret = st_.compile_low_chunks() finally: st_.pop_active() return ret def show_state_usage(queue=False, **kwargs): ''' Retrieve the highstate data from the salt master to analyse used and unused states Custom Pillar data can be passed with the ``pillar`` kwarg. CLI Example: .. code-block:: bash salt '*' state.show_state_usage ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict pillar = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar is not None \ and not isinstance(pillar, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) st_ = salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc) st_.push_active() try: ret = st_.compile_state_usage() finally: st_.pop_active() _set_retcode(ret) return ret def show_states(queue=False, **kwargs): ''' Returns the list of states that will be applied on highstate. CLI Example: .. code-block:: bash salt '*' state.show_states .. versionadded:: 2019.2.0 ''' conflict = _check_queue(queue, kwargs) if conflict is not None: assert False return conflict opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) try: st_ = salt.state.HighState(opts, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE raise CommandExecutionError('Pillar failed to render', info=errors) st_.push_active() states = OrderedDict() try: result = st_.compile_low_chunks() if not isinstance(result, list): raise Exception(result) for s in result: if not isinstance(s, dict): _set_retcode(result) return result states[s['__sls__']] = True finally: st_.pop_active() return list(states.keys()) def sls_id(id_, mods, test=None, queue=False, **kwargs): ''' Call a single ID from the named module(s) and handle all requisites The state ID comes *before* the module ID(s) on the command line. id ID to call mods Comma-delimited list of modules to search for given id and its requisites .. versionadded:: 2014.7.0 saltenv : base Specify a salt fileserver environment to be used when applying states pillarenv Specify a Pillar environment to be used when applying states. This can also be set in the minion config file using the :conf_minion:`pillarenv` option. When neither the :conf_minion:`pillarenv` minion config option nor this CLI argument is used, all Pillar environments will be merged together. pillar Custom Pillar values, passed as a dictionary of key-value pairs .. code-block:: bash salt '*' state.sls_id my_state my_module pillar='{"foo": "bar"}' .. note:: Values passed this way will override existing Pillar values set via ``pillar_roots`` or an external Pillar source. Pillar values that are not included in the kwarg will not be overwritten. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' state.sls_id my_state my_module salt '*' state.sls_id my_state my_module,a_common_module ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) # Since this is running a specific ID within a specific SLS file, fall back # to the 'base' saltenv if none is configured and none was passed. if opts['saltenv'] is None: opts['saltenv'] = 'base' pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.HighState(opts, pillar_override, pillar_enc=pillar_enc, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, pillar_override, pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE return ['Pillar failed to render with the following messages:'] + errors split_mods = salt.utils.args.split_input(mods) st_.push_active() try: high_, errors = st_.render_highstate({opts['saltenv']: split_mods}) finally: st_.pop_active() errors += st_.state.verify_high(high_) # Apply requisites to high data high_, req_in_errors = st_.state.requisite_in(high_) if req_in_errors: # This if statement should not be necessary if there were no errors, # but it is required to get the unit tests to pass. errors.extend(req_in_errors) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return errors chunks = st_.state.compile_high_data(high_) ret = {} for chunk in chunks: if chunk.get('__id__', '') == id_: ret.update(st_.state.call_chunk(chunk, {}, chunks)) _set_retcode(ret, highstate=highstate) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. __opts__['test'] = orig_test if not ret: raise SaltInvocationError( 'No matches for ID \'{0}\' found in SLS \'{1}\' within saltenv ' '\'{2}\''.format(id_, mods, opts['saltenv']) ) return ret def show_low_sls(mods, test=None, queue=False, **kwargs): ''' Display the low data from a specific sls. The default environment is ``base``, use ``saltenv`` to specify a different environment. saltenv Specify a salt fileserver environment to be used when applying states pillar Custom Pillar values, passed as a dictionary of key-value pairs .. code-block:: bash salt '*' state.show_low_sls stuff pillar='{"foo": "bar"}' .. note:: Values passed this way will override Pillar values set via ``pillar_roots`` or an external Pillar source. pillarenv Specify a Pillar environment to be used when applying states. This can also be set in the minion config file using the :conf_minion:`pillarenv` option. When neither the :conf_minion:`pillarenv` minion config option nor this CLI argument is used, all Pillar environments will be merged together. CLI Example: .. code-block:: bash salt '*' state.show_low_sls foo salt '*' state.show_low_sls foo saltenv=dev ''' if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) # Since this is dealing with a specific SLS file (or files), fall back to # the 'base' saltenv if none is configured and none was passed. if opts['saltenv'] is None: opts['saltenv'] = 'base' pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.HighState(opts, pillar_override, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, pillar_override, initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE raise CommandExecutionError('Pillar failed to render', info=errors) mods = salt.utils.args.split_input(mods) st_.push_active() try: high_, errors = st_.render_highstate({opts['saltenv']: mods}) finally: st_.pop_active() errors += st_.state.verify_high(high_) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return errors ret = st_.state.compile_high_data(high_) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. __opts__['test'] = orig_test return ret def show_sls(mods, test=None, queue=False, **kwargs): ''' Display the state data from a specific sls or list of sls files on the master. The default environment is ``base``, use ``saltenv`` to specify a different environment. This function does not support topfiles. For ``top.sls`` please use ``show_top`` instead. Custom Pillar data can be passed with the ``pillar`` kwarg. saltenv Specify a salt fileserver environment to be used when applying states pillarenv Specify a Pillar environment to be used when applying states. This can also be set in the minion config file using the :conf_minion:`pillarenv` option. When neither the :conf_minion:`pillarenv` minion config option nor this CLI argument is used, all Pillar environments will be merged together. CLI Example: .. code-block:: bash salt '*' state.show_sls core,edit.vim saltenv=dev ''' if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) # Since this is dealing with a specific SLS file (or files), fall back to # the 'base' saltenv if none is configured and none was passed. if opts['saltenv'] is None: opts['saltenv'] = 'base' pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.HighState(opts, pillar_override, pillar_enc=pillar_enc, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, pillar_override, pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE raise CommandExecutionError('Pillar failed to render', info=errors) mods = salt.utils.args.split_input(mods) st_.push_active() try: high_, errors = st_.render_highstate({opts['saltenv']: mods}) finally: st_.pop_active() errors += st_.state.verify_high(high_) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. __opts__['test'] = orig_test if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return errors return high_ def sls_exists(mods, test=None, queue=False, **kwargs): ''' Tests for the existance the of a specific SLS or list of SLS files on the master. Similar to :py:func:`state.show_sls <salt.modules.state.show_sls>`, rather than returning state details, returns True or False. The default environment is ``base``, use ``saltenv`` to specify a different environment. .. versionadded:: 2019.2.0 saltenv Specify a salt fileserver environment from which to look for the SLS files specified in the ``mods`` argument CLI Example: .. code-block:: bash salt '*' state.sls_exists core,edit.vim saltenv=dev ''' return isinstance( show_sls(mods, test=test, queue=queue, **kwargs), dict ) def id_exists(ids, mods, test=None, queue=False, **kwargs): ''' Tests for the existence of a specific ID or list of IDs within the specified SLS file(s). Similar to :py:func:`state.sls_exists <salt.modules.state.sls_exists>`, returns True or False. The default environment is base``, use ``saltenv`` to specify a different environment. .. versionadded:: 2019.2.0 saltenv Specify a salt fileserver environment from which to look for the SLS files specified in the ``mods`` argument CLI Example: .. code-block:: bash salt '*' state.id_exists create_myfile,update_template filestate saltenv=dev ''' ids = salt.utils.args.split_input(ids) ids = set(ids) sls_ids = set(x['__id__'] for x in show_low_sls(mods, test=test, queue=queue, **kwargs)) return ids.issubset(sls_ids) def show_top(queue=False, **kwargs): ''' Return the top data that the minion will use for a highstate CLI Example: .. code-block:: bash salt '*' state.show_top ''' if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) try: st_ = salt.state.HighState(opts, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE raise CommandExecutionError('Pillar failed to render', info=errors) errors = [] top_ = st_.get_top() errors += st_.verify_tops(top_) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return errors matches = st_.top_matches(top_) return matches def single(fun, name, test=None, queue=False, **kwargs): ''' Execute a single state function with the named kwargs, returns False if insufficient data is sent to the command By default, the values of the kwargs will be parsed as YAML. So, you can specify lists values, or lists of single entry key-value maps, as you would in a YAML salt file. Alternatively, JSON format of keyword values is also supported. CLI Example: .. code-block:: bash salt '*' state.single pkg.installed name=vim ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict comps = fun.split('.') if len(comps) < 2: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return 'Invalid function passed' kwargs.update({'state': comps[0], 'fun': comps[1], '__id__': name, 'name': name}) orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) err = st_.verify_data(kwargs) if err: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return err st_._mod_init(kwargs) snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy')) ret = {'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs): st_.call(kwargs)} _set_retcode(ret) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. _snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre) __opts__['test'] = orig_test return ret def clear_cache(): ''' Clear out cached state files, forcing even cache runs to refresh the cache on the next state execution. Remember that the state cache is completely disabled by default, this execution only applies if cache=True is used in states CLI Example: .. code-block:: bash salt '*' state.clear_cache ''' ret = [] for fn_ in os.listdir(__opts__['cachedir']): if fn_.endswith('.cache.p'): path = os.path.join(__opts__['cachedir'], fn_) if not os.path.isfile(path): continue os.remove(path) ret.append(fn_) return ret def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs): ''' Execute a packaged state run, the packaged state run will exist in a tarball available locally. This packaged state can be generated using salt-ssh. CLI Example: .. code-block:: bash salt '*' state.pkg /tmp/salt_state.tgz 760a9353810e36f6d81416366fc426dc md5 ''' # TODO - Add ability to download from salt master or other source popts = salt.utils.state.get_sls_opts(__opts__, **kwargs) if not os.path.isfile(pkg_path): return {} if not salt.utils.hashutils.get_hash(pkg_path, hash_type) == pkg_sum: return {} root = tempfile.mkdtemp() s_pkg = tarfile.open(pkg_path, 'r:gz') # Verify that the tarball does not extract outside of the intended root members = s_pkg.getmembers() for member in members: if salt.utils.stringutils.to_unicode(member.path).startswith((os.sep, '..{0}'.format(os.sep))): return {} elif '..{0}'.format(os.sep) in salt.utils.stringutils.to_unicode(member.path): return {} s_pkg.extractall(root) s_pkg.close() lowstate_json = os.path.join(root, 'lowstate.json') with salt.utils.files.fopen(lowstate_json, 'r') as fp_: lowstate = salt.utils.json.load(fp_) # Check for errors in the lowstate for chunk in lowstate: if not isinstance(chunk, dict): return lowstate pillar_json = os.path.join(root, 'pillar.json') if os.path.isfile(pillar_json): with salt.utils.files.fopen(pillar_json, 'r') as fp_: pillar_override = salt.utils.json.load(fp_) else: pillar_override = None roster_grains_json = os.path.join(root, 'roster_grains.json') if os.path.isfile(roster_grains_json): with salt.utils.files.fopen(roster_grains_json, 'r') as fp_: roster_grains = salt.utils.json.load(fp_) if os.path.isfile(roster_grains_json): popts['grains'] = roster_grains popts['fileclient'] = 'local' popts['file_roots'] = {} popts['test'] = _get_test_value(test, **kwargs) envs = os.listdir(root) for fn_ in envs: full = os.path.join(root, fn_) if not os.path.isdir(full): continue popts['file_roots'][fn_] = [full] st_ = salt.state.State(popts, pillar_override=pillar_override) snapper_pre = _snapper_pre(popts, kwargs.get('__pub_jid', 'called localy')) ret = st_.call_chunks(lowstate) ret = st_.call_listen(lowstate, ret) try: shutil.rmtree(root) except (IOError, OSError): pass _set_retcode(ret) _snapper_post(popts, kwargs.get('__pub_jid', 'called localy'), snapper_pre) return ret def disable(states): ''' Disable state runs. CLI Example: .. code-block:: bash salt '*' state.disable highstate salt '*' state.disable highstate,test.succeed_without_changes .. note:: To disable a state file from running provide the same name that would be passed in a state.sls call. salt '*' state.disable bind.config ''' ret = { 'res': True, 'msg': '' } states = salt.utils.args.split_input(states) msg = [] _disabled = __salt__['grains.get']('state_runs_disabled') if not isinstance(_disabled, list): _disabled = [] _changed = False for _state in states: if _state in _disabled: msg.append('Info: {0} state already disabled.'.format(_state)) else: msg.append('Info: {0} state disabled.'.format(_state)) _disabled.append(_state) _changed = True if _changed: __salt__['grains.setval']('state_runs_disabled', _disabled) ret['msg'] = '\n'.join(msg) # refresh the grains __salt__['saltutil.refresh_modules']() return ret def enable(states): ''' Enable state function or sls run CLI Example: .. code-block:: bash salt '*' state.enable highstate salt '*' state.enable test.succeed_without_changes .. note:: To enable a state file from running provide the same name that would be passed in a state.sls call. salt '*' state.disable bind.config ''' ret = { 'res': True, 'msg': '' } states = salt.utils.args.split_input(states) log.debug('states %s', states) msg = [] _disabled = __salt__['grains.get']('state_runs_disabled') if not isinstance(_disabled, list): _disabled = [] _changed = False for _state in states: log.debug('_state %s', _state) if _state not in _disabled: msg.append('Info: {0} state already enabled.'.format(_state)) else: msg.append('Info: {0} state enabled.'.format(_state)) _disabled.remove(_state) _changed = True if _changed: __salt__['grains.setval']('state_runs_disabled', _disabled) ret['msg'] = '\n'.join(msg) # refresh the grains __salt__['saltutil.refresh_modules']() return ret def list_disabled(): ''' List the states which are currently disabled CLI Example: .. code-block:: bash salt '*' state.list_disabled ''' return __salt__['grains.get']('state_runs_disabled') def _disabled(funs): ''' Return messages for disabled states that match state functions in funs. ''' ret = [] _disabled = __salt__['grains.get']('state_runs_disabled') for state in funs: for _state in _disabled: if '.*' in _state: target_state = _state.split('.')[0] target_state = target_state + '.' if not target_state.endswith('.') else target_state if state.startswith(target_state): err = ( 'The state file "{0}" is currently disabled by "{1}", ' 'to re-enable, run state.enable {1}.' ).format( state, _state, ) ret.append(err) continue else: if _state == state: err = ( 'The state file "{0}" is currently disabled, ' 'to re-enable, run state.enable {0}.' ).format( _state, ) ret.append(err) continue return ret def event(tagmatch='*', count=-1, quiet=False, sock_dir=None, pretty=False, node='minion'): r''' Watch Salt's event bus and block until the given tag is matched .. versionadded:: 2016.3.0 .. versionchanged:: 2019.2.0 ``tagmatch`` can now be either a glob or regular expression. This is useful for utilizing Salt's event bus from shell scripts or for taking simple actions directly from the CLI. Enable debug logging to see ignored events. :param tagmatch: the event is written to stdout for each tag that matches this glob or regular expression. :param count: this number is decremented for each event that matches the ``tagmatch`` parameter; pass ``-1`` to listen forever. :param quiet: do not print to stdout; just block :param sock_dir: path to the Salt master's event socket file. :param pretty: Output the JSON all on a single line if ``False`` (useful for shell tools); pretty-print the JSON output if ``True``. :param node: Watch the minion-side or master-side event bus. CLI Example: .. code-block:: bash salt-call --local state.event pretty=True ''' with salt.utils.event.get_event( node, sock_dir or __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=True) as sevent: while True: ret = sevent.get_event(full=True, auto_reconnect=True) if ret is None: continue if salt.utils.stringutils.expr_match(ret['tag'], tagmatch): if not quiet: salt.utils.stringutils.print_cli( str('{0}\t{1}').format( # future lint: blacklisted-function salt.utils.stringutils.to_str(ret['tag']), salt.utils.json.dumps( ret['data'], sort_keys=pretty, indent=None if not pretty else 4) ) ) sys.stdout.flush() if count > 0: count -= 1 log.debug('Remaining event matches: %s', count) if count == 0: break else: log.debug('Skipping event tag: %s', ret['tag']) continue
py
1a43a3cd0d6ea1556be2e5c4cba7aabcfde895bf
# -*- coding: utf8 -*- # Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException from tencentcloud.common.abstract_client import AbstractClient from tencentcloud.tdmq.v20200217 import models class TdmqClient(AbstractClient): _apiVersion = '2020-02-17' _endpoint = 'tdmq.tencentcloudapi.com' _service = 'tdmq' def AcknowledgeMessage(self, request): """根据提供的 MessageID 确认指定 topic 中的消息 :param request: Request instance for AcknowledgeMessage. :type request: :class:`tencentcloud.tdmq.v20200217.models.AcknowledgeMessageRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.AcknowledgeMessageResponse` """ try: params = request._serialize() body = self.call("AcknowledgeMessage", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.AcknowledgeMessageResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ClearCmqQueue(self, request): """清空cmq消息队列中的消息 :param request: Request instance for ClearCmqQueue. :type request: :class:`tencentcloud.tdmq.v20200217.models.ClearCmqQueueRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ClearCmqQueueResponse` """ try: params = request._serialize() body = self.call("ClearCmqQueue", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ClearCmqQueueResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ClearCmqSubscriptionFilterTags(self, request): """清空订阅者消息标签 :param request: Request instance for ClearCmqSubscriptionFilterTags. :type request: :class:`tencentcloud.tdmq.v20200217.models.ClearCmqSubscriptionFilterTagsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ClearCmqSubscriptionFilterTagsResponse` """ try: params = request._serialize() body = self.call("ClearCmqSubscriptionFilterTags", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ClearCmqSubscriptionFilterTagsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateAMQPCluster(self, request): """创建AMQP集群 :param request: Request instance for CreateAMQPCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateAMQPClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateAMQPClusterResponse` """ try: params = request._serialize() body = self.call("CreateAMQPCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateAMQPClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateAMQPExchange(self, request): """创建AMQP Exchange :param request: Request instance for CreateAMQPExchange. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateAMQPExchangeRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateAMQPExchangeResponse` """ try: params = request._serialize() body = self.call("CreateAMQPExchange", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateAMQPExchangeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateAMQPQueue(self, request): """创建AMQP队列 :param request: Request instance for CreateAMQPQueue. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateAMQPQueueRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateAMQPQueueResponse` """ try: params = request._serialize() body = self.call("CreateAMQPQueue", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateAMQPQueueResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateAMQPRouteRelation(self, request): """创建AMQP路由关系 :param request: Request instance for CreateAMQPRouteRelation. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateAMQPRouteRelationRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateAMQPRouteRelationResponse` """ try: params = request._serialize() body = self.call("CreateAMQPRouteRelation", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateAMQPRouteRelationResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateAMQPVHost(self, request): """创建Amqp Vhost :param request: Request instance for CreateAMQPVHost. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateAMQPVHostRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateAMQPVHostResponse` """ try: params = request._serialize() body = self.call("CreateAMQPVHost", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateAMQPVHostResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateCluster(self, request): """创建用户的集群 :param request: Request instance for CreateCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateClusterResponse` """ try: params = request._serialize() body = self.call("CreateCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateCmqQueue(self, request): """创建cmq队列接口 :param request: Request instance for CreateCmqQueue. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateCmqQueueRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateCmqQueueResponse` """ try: params = request._serialize() body = self.call("CreateCmqQueue", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateCmqQueueResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateCmqSubscribe(self, request): """创建cmq订阅接口 :param request: Request instance for CreateCmqSubscribe. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateCmqSubscribeRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateCmqSubscribeResponse` """ try: params = request._serialize() body = self.call("CreateCmqSubscribe", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateCmqSubscribeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateCmqTopic(self, request): """创建cmq主题 :param request: Request instance for CreateCmqTopic. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateCmqTopicRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateCmqTopicResponse` """ try: params = request._serialize() body = self.call("CreateCmqTopic", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateCmqTopicResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateEnvironment(self, request): """用于在用户账户下创建消息队列 Tdmq 命名空间 :param request: Request instance for CreateEnvironment. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateEnvironmentRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateEnvironmentResponse` """ try: params = request._serialize() body = self.call("CreateEnvironment", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateEnvironmentResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateEnvironmentRole(self, request): """创建环境角色授权 :param request: Request instance for CreateEnvironmentRole. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateEnvironmentRoleRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateEnvironmentRoleResponse` """ try: params = request._serialize() body = self.call("CreateEnvironmentRole", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateEnvironmentRoleResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateRocketMQCluster(self, request): """此接口用于创建一个RocketMQ集群 :param request: Request instance for CreateRocketMQCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateRocketMQClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateRocketMQClusterResponse` """ try: params = request._serialize() body = self.call("CreateRocketMQCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateRocketMQClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateRocketMQGroup(self, request): """创建RocketMQ消费组 :param request: Request instance for CreateRocketMQGroup. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateRocketMQGroupRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateRocketMQGroupResponse` """ try: params = request._serialize() body = self.call("CreateRocketMQGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateRocketMQGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateRocketMQNamespace(self, request): """创建RocketMQ命名空间 :param request: Request instance for CreateRocketMQNamespace. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateRocketMQNamespaceRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateRocketMQNamespaceResponse` """ try: params = request._serialize() body = self.call("CreateRocketMQNamespace", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateRocketMQNamespaceResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateRocketMQTopic(self, request): """创建RocketMQ主题 :param request: Request instance for CreateRocketMQTopic. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateRocketMQTopicRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateRocketMQTopicResponse` """ try: params = request._serialize() body = self.call("CreateRocketMQTopic", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateRocketMQTopicResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateRole(self, request): """创建角色 :param request: Request instance for CreateRole. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateRoleRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateRoleResponse` """ try: params = request._serialize() body = self.call("CreateRole", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateRoleResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateSubscription(self, request): """创建一个主题的订阅关系 :param request: Request instance for CreateSubscription. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateSubscriptionRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateSubscriptionResponse` """ try: params = request._serialize() body = self.call("CreateSubscription", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateSubscriptionResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateTopic(self, request): """新增指定分区、类型的消息主题 :param request: Request instance for CreateTopic. :type request: :class:`tencentcloud.tdmq.v20200217.models.CreateTopicRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.CreateTopicResponse` """ try: params = request._serialize() body = self.call("CreateTopic", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateTopicResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteAMQPCluster(self, request): """删除AMQP集群 :param request: Request instance for DeleteAMQPCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteAMQPClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteAMQPClusterResponse` """ try: params = request._serialize() body = self.call("DeleteAMQPCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteAMQPClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteAMQPExchange(self, request): """删除Amqp交换机 :param request: Request instance for DeleteAMQPExchange. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteAMQPExchangeRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteAMQPExchangeResponse` """ try: params = request._serialize() body = self.call("DeleteAMQPExchange", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteAMQPExchangeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteAMQPQueue(self, request): """删除Amqp队列 :param request: Request instance for DeleteAMQPQueue. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteAMQPQueueRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteAMQPQueueResponse` """ try: params = request._serialize() body = self.call("DeleteAMQPQueue", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteAMQPQueueResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteAMQPRouteRelation(self, request): """删除Amqp路由关系 :param request: Request instance for DeleteAMQPRouteRelation. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteAMQPRouteRelationRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteAMQPRouteRelationResponse` """ try: params = request._serialize() body = self.call("DeleteAMQPRouteRelation", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteAMQPRouteRelationResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteAMQPVHost(self, request): """删除Vhost :param request: Request instance for DeleteAMQPVHost. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteAMQPVHostRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteAMQPVHostResponse` """ try: params = request._serialize() body = self.call("DeleteAMQPVHost", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteAMQPVHostResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteCluster(self, request): """删除集群 :param request: Request instance for DeleteCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteClusterResponse` """ try: params = request._serialize() body = self.call("DeleteCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteCmqQueue(self, request): """删除cmq队列 :param request: Request instance for DeleteCmqQueue. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteCmqQueueRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteCmqQueueResponse` """ try: params = request._serialize() body = self.call("DeleteCmqQueue", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteCmqQueueResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteCmqSubscribe(self, request): """删除cmq订阅 :param request: Request instance for DeleteCmqSubscribe. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteCmqSubscribeRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteCmqSubscribeResponse` """ try: params = request._serialize() body = self.call("DeleteCmqSubscribe", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteCmqSubscribeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteCmqTopic(self, request): """删除cmq主题 :param request: Request instance for DeleteCmqTopic. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteCmqTopicRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteCmqTopicResponse` """ try: params = request._serialize() body = self.call("DeleteCmqTopic", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteCmqTopicResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteEnvironmentRoles(self, request): """删除环境角色授权。 :param request: Request instance for DeleteEnvironmentRoles. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteEnvironmentRolesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteEnvironmentRolesResponse` """ try: params = request._serialize() body = self.call("DeleteEnvironmentRoles", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteEnvironmentRolesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteEnvironments(self, request): """批量删除租户下的命名空间 :param request: Request instance for DeleteEnvironments. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteEnvironmentsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteEnvironmentsResponse` """ try: params = request._serialize() body = self.call("DeleteEnvironments", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteEnvironmentsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteRocketMQCluster(self, request): """删除RocketMQ集群 :param request: Request instance for DeleteRocketMQCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteRocketMQClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteRocketMQClusterResponse` """ try: params = request._serialize() body = self.call("DeleteRocketMQCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteRocketMQClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteRocketMQGroup(self, request): """删除RocketMQ消费组 :param request: Request instance for DeleteRocketMQGroup. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteRocketMQGroupRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteRocketMQGroupResponse` """ try: params = request._serialize() body = self.call("DeleteRocketMQGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteRocketMQGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteRocketMQNamespace(self, request): """删除RocketMQ命名空间 :param request: Request instance for DeleteRocketMQNamespace. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteRocketMQNamespaceRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteRocketMQNamespaceResponse` """ try: params = request._serialize() body = self.call("DeleteRocketMQNamespace", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteRocketMQNamespaceResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteRocketMQTopic(self, request): """删除RocketMQ主题 :param request: Request instance for DeleteRocketMQTopic. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteRocketMQTopicRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteRocketMQTopicResponse` """ try: params = request._serialize() body = self.call("DeleteRocketMQTopic", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteRocketMQTopicResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteRoles(self, request): """删除角色,支持批量。 :param request: Request instance for DeleteRoles. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteRolesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteRolesResponse` """ try: params = request._serialize() body = self.call("DeleteRoles", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteRolesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteSubscriptions(self, request): """删除订阅关系 :param request: Request instance for DeleteSubscriptions. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteSubscriptionsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteSubscriptionsResponse` """ try: params = request._serialize() body = self.call("DeleteSubscriptions", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteSubscriptionsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteTopics(self, request): """批量删除topics :param request: Request instance for DeleteTopics. :type request: :class:`tencentcloud.tdmq.v20200217.models.DeleteTopicsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DeleteTopicsResponse` """ try: params = request._serialize() body = self.call("DeleteTopics", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteTopicsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeAMQPCluster(self, request): """获取单个Amqp集群信息 :param request: Request instance for DescribeAMQPCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPClusterResponse` """ try: params = request._serialize() body = self.call("DescribeAMQPCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeAMQPClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeAMQPClusters(self, request): """获取amqp集群列表 :param request: Request instance for DescribeAMQPClusters. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPClustersRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPClustersResponse` """ try: params = request._serialize() body = self.call("DescribeAMQPClusters", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeAMQPClustersResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeAMQPCreateQuota(self, request): """获取用户的配额,如Queue容量,Exchange容量,Vhost容量,单Vhost Tps数,剩余可创建集群数 :param request: Request instance for DescribeAMQPCreateQuota. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPCreateQuotaRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPCreateQuotaResponse` """ try: params = request._serialize() body = self.call("DescribeAMQPCreateQuota", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeAMQPCreateQuotaResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeAMQPExchanges(self, request): """获取AMQP Exchange列表 :param request: Request instance for DescribeAMQPExchanges. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPExchangesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPExchangesResponse` """ try: params = request._serialize() body = self.call("DescribeAMQPExchanges", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeAMQPExchangesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeAMQPQueues(self, request): """获取Amqp队列列表 :param request: Request instance for DescribeAMQPQueues. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPQueuesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPQueuesResponse` """ try: params = request._serialize() body = self.call("DescribeAMQPQueues", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeAMQPQueuesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeAMQPRouteRelations(self, request): """获取Amqp路由关系列表 :param request: Request instance for DescribeAMQPRouteRelations. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPRouteRelationsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPRouteRelationsResponse` """ try: params = request._serialize() body = self.call("DescribeAMQPRouteRelations", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeAMQPRouteRelationsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeAMQPVHosts(self, request): """获取Amqp Vhost 列表 :param request: Request instance for DescribeAMQPVHosts. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPVHostsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeAMQPVHostsResponse` """ try: params = request._serialize() body = self.call("DescribeAMQPVHosts", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeAMQPVHostsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeBindClusters(self, request): """获取用户绑定的专享集群列表 :param request: Request instance for DescribeBindClusters. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeBindClustersRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeBindClustersResponse` """ try: params = request._serialize() body = self.call("DescribeBindClusters", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeBindClustersResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeBindVpcs(self, request): """获取租户VPC绑定关系 :param request: Request instance for DescribeBindVpcs. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeBindVpcsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeBindVpcsResponse` """ try: params = request._serialize() body = self.call("DescribeBindVpcs", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeBindVpcsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeClusterDetail(self, request): """获取集群的详细信息 :param request: Request instance for DescribeClusterDetail. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeClusterDetailRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeClusterDetailResponse` """ try: params = request._serialize() body = self.call("DescribeClusterDetail", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeClusterDetailResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeClusters(self, request): """获取集群列表 :param request: Request instance for DescribeClusters. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeClustersRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeClustersResponse` """ try: params = request._serialize() body = self.call("DescribeClusters", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeClustersResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeCmqDeadLetterSourceQueues(self, request): """枚举cmq死信队列源队列 :param request: Request instance for DescribeCmqDeadLetterSourceQueues. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqDeadLetterSourceQueuesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqDeadLetterSourceQueuesResponse` """ try: params = request._serialize() body = self.call("DescribeCmqDeadLetterSourceQueues", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeCmqDeadLetterSourceQueuesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeCmqQueueDetail(self, request): """查询cmq队列详情 :param request: Request instance for DescribeCmqQueueDetail. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqQueueDetailRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqQueueDetailResponse` """ try: params = request._serialize() body = self.call("DescribeCmqQueueDetail", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeCmqQueueDetailResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeCmqQueues(self, request): """查询cmq全量队列 :param request: Request instance for DescribeCmqQueues. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqQueuesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqQueuesResponse` """ try: params = request._serialize() body = self.call("DescribeCmqQueues", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeCmqQueuesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeCmqSubscriptionDetail(self, request): """查询cmq订阅详情 :param request: Request instance for DescribeCmqSubscriptionDetail. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqSubscriptionDetailRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqSubscriptionDetailResponse` """ try: params = request._serialize() body = self.call("DescribeCmqSubscriptionDetail", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeCmqSubscriptionDetailResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeCmqTopicDetail(self, request): """查询cmq主题详情 :param request: Request instance for DescribeCmqTopicDetail. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqTopicDetailRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqTopicDetailResponse` """ try: params = request._serialize() body = self.call("DescribeCmqTopicDetail", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeCmqTopicDetailResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeCmqTopics(self, request): """枚举cmq全量主题 :param request: Request instance for DescribeCmqTopics. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqTopicsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeCmqTopicsResponse` """ try: params = request._serialize() body = self.call("DescribeCmqTopics", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeCmqTopicsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeEnvironmentAttributes(self, request): """获取指定命名空间的属性 :param request: Request instance for DescribeEnvironmentAttributes. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeEnvironmentAttributesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeEnvironmentAttributesResponse` """ try: params = request._serialize() body = self.call("DescribeEnvironmentAttributes", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeEnvironmentAttributesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeEnvironmentRoles(self, request): """获取命名空间角色列表 :param request: Request instance for DescribeEnvironmentRoles. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeEnvironmentRolesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeEnvironmentRolesResponse` """ try: params = request._serialize() body = self.call("DescribeEnvironmentRoles", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeEnvironmentRolesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeEnvironments(self, request): """获取租户下命名空间列表 :param request: Request instance for DescribeEnvironments. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeEnvironmentsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeEnvironmentsResponse` """ try: params = request._serialize() body = self.call("DescribeEnvironments", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeEnvironmentsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeNamespaceBundlesOpt(self, request): """运营端获取命名空间bundle列表 :param request: Request instance for DescribeNamespaceBundlesOpt. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeNamespaceBundlesOptRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeNamespaceBundlesOptResponse` """ try: params = request._serialize() body = self.call("DescribeNamespaceBundlesOpt", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeNamespaceBundlesOptResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeNodeHealthOpt(self, request): """运营端获节点健康状态 :param request: Request instance for DescribeNodeHealthOpt. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeNodeHealthOptRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeNodeHealthOptResponse` """ try: params = request._serialize() body = self.call("DescribeNodeHealthOpt", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeNodeHealthOptResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeProducers(self, request): """获取生产者列表,仅显示在线的生产者 :param request: Request instance for DescribeProducers. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeProducersRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeProducersResponse` """ try: params = request._serialize() body = self.call("DescribeProducers", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeProducersResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeRocketMQCluster(self, request): """获取单个RocketMQ集群信息 :param request: Request instance for DescribeRocketMQCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeRocketMQClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeRocketMQClusterResponse` """ try: params = request._serialize() body = self.call("DescribeRocketMQCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeRocketMQClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeRocketMQClusters(self, request): """获取RocketMQ集群列表 :param request: Request instance for DescribeRocketMQClusters. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeRocketMQClustersRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeRocketMQClustersResponse` """ try: params = request._serialize() body = self.call("DescribeRocketMQClusters", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeRocketMQClustersResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeRocketMQGroups(self, request): """获取RocketMQ消费组列表 :param request: Request instance for DescribeRocketMQGroups. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeRocketMQGroupsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeRocketMQGroupsResponse` """ try: params = request._serialize() body = self.call("DescribeRocketMQGroups", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeRocketMQGroupsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeRocketMQNamespaces(self, request): """获取RocketMQ命名空间列表 :param request: Request instance for DescribeRocketMQNamespaces. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeRocketMQNamespacesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeRocketMQNamespacesResponse` """ try: params = request._serialize() body = self.call("DescribeRocketMQNamespaces", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeRocketMQNamespacesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeRocketMQTopics(self, request): """获取RocketMQ主题列表 :param request: Request instance for DescribeRocketMQTopics. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeRocketMQTopicsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeRocketMQTopicsResponse` """ try: params = request._serialize() body = self.call("DescribeRocketMQTopics", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeRocketMQTopicsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeRoles(self, request): """获取角色列表 :param request: Request instance for DescribeRoles. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeRolesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeRolesResponse` """ try: params = request._serialize() body = self.call("DescribeRoles", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeRolesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeSubscriptions(self, request): """查询指定环境和主题下的订阅者列表 :param request: Request instance for DescribeSubscriptions. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeSubscriptionsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeSubscriptionsResponse` """ try: params = request._serialize() body = self.call("DescribeSubscriptions", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeSubscriptionsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeTopics(self, request): """获取环境下主题列表 :param request: Request instance for DescribeTopics. :type request: :class:`tencentcloud.tdmq.v20200217.models.DescribeTopicsRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.DescribeTopicsResponse` """ try: params = request._serialize() body = self.call("DescribeTopics", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeTopicsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyAMQPCluster(self, request): """更新Amqp集群信息 :param request: Request instance for ModifyAMQPCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyAMQPClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyAMQPClusterResponse` """ try: params = request._serialize() body = self.call("ModifyAMQPCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyAMQPClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyAMQPExchange(self, request): """更新Amqp交换机 :param request: Request instance for ModifyAMQPExchange. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyAMQPExchangeRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyAMQPExchangeResponse` """ try: params = request._serialize() body = self.call("ModifyAMQPExchange", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyAMQPExchangeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyAMQPQueue(self, request): """更新Amqp队列 :param request: Request instance for ModifyAMQPQueue. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyAMQPQueueRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyAMQPQueueResponse` """ try: params = request._serialize() body = self.call("ModifyAMQPQueue", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyAMQPQueueResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyAMQPVHost(self, request): """更新Vhost :param request: Request instance for ModifyAMQPVHost. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyAMQPVHostRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyAMQPVHostResponse` """ try: params = request._serialize() body = self.call("ModifyAMQPVHost", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyAMQPVHostResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyCluster(self, request): """更新集群信息 :param request: Request instance for ModifyCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyClusterResponse` """ try: params = request._serialize() body = self.call("ModifyCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyCmqQueueAttribute(self, request): """修改cmq队列属性 :param request: Request instance for ModifyCmqQueueAttribute. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyCmqQueueAttributeRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyCmqQueueAttributeResponse` """ try: params = request._serialize() body = self.call("ModifyCmqQueueAttribute", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyCmqQueueAttributeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyCmqSubscriptionAttribute(self, request): """修改cmq订阅属性 :param request: Request instance for ModifyCmqSubscriptionAttribute. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyCmqSubscriptionAttributeRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyCmqSubscriptionAttributeResponse` """ try: params = request._serialize() body = self.call("ModifyCmqSubscriptionAttribute", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyCmqSubscriptionAttributeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyCmqTopicAttribute(self, request): """修改cmq主题属性 :param request: Request instance for ModifyCmqTopicAttribute. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyCmqTopicAttributeRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyCmqTopicAttributeResponse` """ try: params = request._serialize() body = self.call("ModifyCmqTopicAttribute", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyCmqTopicAttributeResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyEnvironmentAttributes(self, request): """修改指定命名空间的属性值 :param request: Request instance for ModifyEnvironmentAttributes. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyEnvironmentAttributesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyEnvironmentAttributesResponse` """ try: params = request._serialize() body = self.call("ModifyEnvironmentAttributes", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyEnvironmentAttributesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyEnvironmentRole(self, request): """修改环境角色授权。 :param request: Request instance for ModifyEnvironmentRole. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyEnvironmentRoleRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyEnvironmentRoleResponse` """ try: params = request._serialize() body = self.call("ModifyEnvironmentRole", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyEnvironmentRoleResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyRocketMQCluster(self, request): """更新RocketMQ集群信息 :param request: Request instance for ModifyRocketMQCluster. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyRocketMQClusterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyRocketMQClusterResponse` """ try: params = request._serialize() body = self.call("ModifyRocketMQCluster", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyRocketMQClusterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyRocketMQGroup(self, request): """更新RocketMQ消费组信息 :param request: Request instance for ModifyRocketMQGroup. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyRocketMQGroupRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyRocketMQGroupResponse` """ try: params = request._serialize() body = self.call("ModifyRocketMQGroup", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyRocketMQGroupResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyRocketMQNamespace(self, request): """更新RocketMQ命名空间 :param request: Request instance for ModifyRocketMQNamespace. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyRocketMQNamespaceRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyRocketMQNamespaceResponse` """ try: params = request._serialize() body = self.call("ModifyRocketMQNamespace", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyRocketMQNamespaceResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyRocketMQTopic(self, request): """更新RocketMQ主题信息 :param request: Request instance for ModifyRocketMQTopic. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyRocketMQTopicRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyRocketMQTopicResponse` """ try: params = request._serialize() body = self.call("ModifyRocketMQTopic", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyRocketMQTopicResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyRole(self, request): """角色修改 :param request: Request instance for ModifyRole. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyRoleRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyRoleResponse` """ try: params = request._serialize() body = self.call("ModifyRole", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyRoleResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ModifyTopic(self, request): """修改主题备注和分区数 :param request: Request instance for ModifyTopic. :type request: :class:`tencentcloud.tdmq.v20200217.models.ModifyTopicRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ModifyTopicResponse` """ try: params = request._serialize() body = self.call("ModifyTopic", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ModifyTopicResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def PublishCmqMsg(self, request): """发送cmq主题消息 :param request: Request instance for PublishCmqMsg. :type request: :class:`tencentcloud.tdmq.v20200217.models.PublishCmqMsgRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.PublishCmqMsgResponse` """ try: params = request._serialize() body = self.call("PublishCmqMsg", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.PublishCmqMsgResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ReceiveMessage(self, request): """接收发送到指定 topic 中的消息 :param request: Request instance for ReceiveMessage. :type request: :class:`tencentcloud.tdmq.v20200217.models.ReceiveMessageRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ReceiveMessageResponse` """ try: params = request._serialize() body = self.call("ReceiveMessage", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ReceiveMessageResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ResetMsgSubOffsetByTimestamp(self, request): """根据时间戳进行消息回溯,精确到毫秒 :param request: Request instance for ResetMsgSubOffsetByTimestamp. :type request: :class:`tencentcloud.tdmq.v20200217.models.ResetMsgSubOffsetByTimestampRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.ResetMsgSubOffsetByTimestampResponse` """ try: params = request._serialize() body = self.call("ResetMsgSubOffsetByTimestamp", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ResetMsgSubOffsetByTimestampResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def RewindCmqQueue(self, request): """回溯cmq队列 :param request: Request instance for RewindCmqQueue. :type request: :class:`tencentcloud.tdmq.v20200217.models.RewindCmqQueueRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.RewindCmqQueueResponse` """ try: params = request._serialize() body = self.call("RewindCmqQueue", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.RewindCmqQueueResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def SendBatchMessages(self, request): """批量发送消息 注意:TDMQ 批量发送消息的接口是在 TDMQ-HTTP 的服务侧将消息打包为一个 Batch,然后将该 Batch 在服务内部当作一次 TCP 请求发送出去。所以在使用过程中,用户还是按照单条消息发送的逻辑,每一条消息是一个独立的 HTTP 的请求,在 TDMQ-HTTP 的服务内部,会将多个 HTTP 的请求聚合为一个 Batch 发送到服务端。即,批量发送消息在使用上与发送单条消息是一致的,batch 的聚合是在 TDMQ-HTTP 的服务内部完成的。 :param request: Request instance for SendBatchMessages. :type request: :class:`tencentcloud.tdmq.v20200217.models.SendBatchMessagesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.SendBatchMessagesResponse` """ try: params = request._serialize() body = self.call("SendBatchMessages", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.SendBatchMessagesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def SendCmqMsg(self, request): """发送cmq消息 :param request: Request instance for SendCmqMsg. :type request: :class:`tencentcloud.tdmq.v20200217.models.SendCmqMsgRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.SendCmqMsgResponse` """ try: params = request._serialize() body = self.call("SendCmqMsg", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.SendCmqMsgResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def SendMessages(self, request): """发送单条消息 :param request: Request instance for SendMessages. :type request: :class:`tencentcloud.tdmq.v20200217.models.SendMessagesRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.SendMessagesResponse` """ try: params = request._serialize() body = self.call("SendMessages", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.SendMessagesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def SendMsg(self, request): """此接口仅用于测试发生消息,不能作为现网正式生产使用 :param request: Request instance for SendMsg. :type request: :class:`tencentcloud.tdmq.v20200217.models.SendMsgRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.SendMsgResponse` """ try: params = request._serialize() body = self.call("SendMsg", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.SendMsgResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def UnbindCmqDeadLetter(self, request): """解绑cmq死信队列 :param request: Request instance for UnbindCmqDeadLetter. :type request: :class:`tencentcloud.tdmq.v20200217.models.UnbindCmqDeadLetterRequest` :rtype: :class:`tencentcloud.tdmq.v20200217.models.UnbindCmqDeadLetterResponse` """ try: params = request._serialize() body = self.call("UnbindCmqDeadLetter", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.UnbindCmqDeadLetterResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message)
py
1a43a3e3d4288acc161d006d31418fe6c60106fe
# !/usr/bin/env python3 # Author: C.K # Email: [email protected] # DateTime:2021-08-10 21:45:28 # Description: import os, sys class Solution: def letterCombinations(self, digits: str) -> List[str]: mapping = { '2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz' } if len(digits) == 0: return [] if len(digits) == 1: return list(mapping[digits[0]]) prev = self.letterCombinations(digits[:-1]) additional = mapping[digits[-1]] return [s + c for s in prev for c in additional] if __name__ == "__main__": pass
py
1a43a3ff259548a159cd447dfa68b023887e1c87
# integrase.py - integrase example # RMM, 20 Aug 2018 (based on psuedo-code from W. Poole) # # This example shows how to use the txtl library to create a model for # an integrase that flips a promoter between two different # orientations, one of which expresses GFP and the other that # expresses RFP. import txtl # Import mechanism libraries import txtl.mechanisms as ML1 import examples.mechanisms as ML2 # Extract and Energy Parameters are stored in a user curated CSV #! TODO: mixture + mixture -> mixture myRXN = txtl.extract('BL21_DE3') + txtl.buffer('stdbuffer') # Define the DNA assembly that the integrase acts on flip_gene = txtl.FlippableAssembly(promoter='ptet', utr5=['BCD2', 'BCD2'], cds=['GFP', 'RFP'], integrase="Bxb1", translation=ML2.translation) integrase_gene = txtl.DNAassembly(promoter='pMedium', utr5='bcd8', cds='Bxb1', translation=ML2.translation) # Put in 10 nM of the flippable construct, 2 nM of the integrase DNA #! TODO: scalar * construct -> mixture myRXN += 10*flip_gene + 2*integrase_gene # Define the mechanisms/variants to be used in instantiating the model #! TODO: not sure if this can/should be implemented in this way. #! Might need to use a dictionary instead? myRXN.mechanisms += = [ML.integrases, ML.sigma70_transcription, ML2.translation, ML.first_order_mRNA_degredation, ML.enzymatic_energy_consumption] # Should return some kind of easily readable/analyzable data structure # CRN = myRXN.compile_crn() # Returns SBML text and possibly also saves an SBML file SBML = myRXN.write_sbml('integrase.sbml') # Not necessary, but might be nice to have automatic integration with BioSCRAPE # ResultsODE = myRXN.simulate_with_bioscrape_ode(time = 10) # ResultsSSA = myRXN.simulate_with_bioscrape_SSA(volume = 5, time = 10)
py
1a43a46c11649f880078bee3e46cde206645701e
import unittest import atheris import atheris_libprotobuf_mutator from atheris import fuzz_test_lib from google.protobuf import wrappers_pb2 @atheris.instrument_func def simple_proto_comparison(msg): if msg.value == "abc": raise RuntimeError("Solved") class AtherisLibprotobufMutatorTests(unittest.TestCase): def testSimpleProtoComparison(self): fuzz_test_lib.run_fuzztest( simple_proto_comparison, custom_setup=atheris_libprotobuf_mutator.Setup, setup_kwargs={"proto": wrappers_pb2.StringValue}, expected_output=b"Solved", timeout=60) if __name__ == "__main__": unittest.main()
py
1a43a556afbce2bbae23c087cd700f8cefe758ba
import logging.handlers LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': '[%(asctime)s][%(process)d][%(thread)d][%(levelname)-5s][%(filename)s:%(lineno)d][%(funcName)s]: %(message)s', 'datefmt': '%Y/%m/%d %H:%M:%S', }, 'simple': { 'format': '[%(asctime)s][%(levelname)s] %(message)s', 'datefmt': '%Y/%m/%d %H:%M:%S', }, }, 'handlers': { 'console': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, 'socket': { 'level': 'INFO', 'class': 'logging.handlers.SocketHandler', 'formatter': 'verbose', 'host': 'localhost', 'port': logging.handlers.DEFAULT_TCP_LOGGING_PORT, }, 'file': { 'level': 'DEBUG', 'class': 'logging.FileHandler', 'filename': 'debug.log', 'formatter': 'verbose', }, }, 'loggers': { 'jsea_blog': { 'handlers': ['console', 'socket'], 'level': 'DEBUG', 'propagate': True, }, 'django.request': { 'handlers': ['console', 'socket'], 'level': 'DEBUG', }, }, }
py
1a43a64d418b7ead5b94da553f78c174a61078f9
from django.urls import path from . import views app_name = 'events' urlpatterns = [ path('', views.EventListView.as_view(), name='all'), path('<int:pk>/', views.EventView.as_view(), name='details'), path('<int:pk>/edit/', views.EventUpdateView.as_view(), name='edit'), path('<int:pk>/attended/', views.EventAttendeeEditView.as_view(), name='attended'), path('new', views.EventCreateView.as_view(), name='new'), path('<int:pk>/delete/', views.EventDeleteView.as_view(), name='delete'), path('<int:event_id>/register/', views.register_on_event, name="register") ]
py
1a43a65663f3c88bdc4098b2184430e0193e50b7
import argparse from email.mime import image import os from tqdm import tqdm import pandas as pd import logging from src.utils.common import read_yaml, create_directories from src.stage_01_get_data import main as loader_main from sklearn.metrics import confusion_matrix, f1_score import numpy as np import warnings import torch STAGE = "STAGE_NAME" ## <<< change stage name logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) formatter = logging.Formatter("[%(asctime)s: %(levelname)s: %(module)s]: %(message)s") file_handler = logging.FileHandler(os.path.join("logs", "running_logs.log")) file_handler.setFormatter(formatter) logger.addHandler(file_handler) warnings.filterwarnings('ignore') def main(config_path): ## read config files config = read_yaml(config_path) train_data_loader, test_data_loader, labels_dict = loader_main(config_path) pred = np.array([]) target = np.array([]) prediction_data_dir = config['data']['PRED_DATA_DIR'] create_directories([prediction_data_dir]) prediction_data_file_name = config['data']['PRED_DATA_FILE_NAME'] prediction_data_file_path = os.path.join(prediction_data_dir, prediction_data_file_name) model_dir = config['artifacts']['model_dir'] trained_model_name = config['artifacts']['trained_model_name'] trained_model_path = os.path.join(model_dir, trained_model_name) model = torch.load(trained_model_path) logger.info(f"trained model loaded") DEVICE = "cuda" if torch.cuda.is_available() else "cpu" model.to(DEVICE) logger.info(f"trained model loaded into {DEVICE}") with torch.no_grad(): for batch, data in enumerate(test_data_loader): images = data[0].to(DEVICE) labels = data[1].to(DEVICE) y_pred = model(images) pred = np.concatenate((pred, torch.argmax(y_pred, 1).cpu().numpy())) target = np.concatenate((target, labels.cpu().numpy())) logger.info("prediction for test data finished") df = pd.DataFrame({"Actual":target, "Prediction":pred}) df.to_csv(prediction_data_file_path) logger.info(f"saved prediction results into {prediction_data_file_path}") cm = confusion_matrix(target, pred) print(cm) fs = f1_score(target, pred, average=None) print(fs) logger.info(fs) if __name__ == '__main__': args = argparse.ArgumentParser() args.add_argument("--config", "-c", default="configs/config.yaml") parsed_args = args.parse_args() try: logger.info("\n********************") logger.info(f">>>>> stage {STAGE} started <<<<<") main(config_path=parsed_args.config) logger.info(f">>>>> stage {STAGE} completed!<<<<<\n") except Exception as e: logger.exception(e) raise e
py
1a43a771f27e7e258952cf42b867cc0d5febac4f
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class Replication(Resource): """An object that represents a replication for a container registry. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: The resource ID. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. :vartype type: str :param location: Required. The location of the resource. This cannot be changed after the resource is created. :type location: str :param tags: The tags of the resource. :type tags: dict[str, str] :ivar provisioning_state: The provisioning state of the replication at the time the operation was called. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled' :vartype provisioning_state: str or ~azure.mgmt.containerregistry.v2019_04_01.models.ProvisioningState :ivar status: The status of the replication at the time the operation was called. :vartype status: ~azure.mgmt.containerregistry.v2019_04_01.models.Status """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, 'provisioning_state': {'readonly': True}, 'status': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'Status'}, } def __init__(self, **kwargs): super(Replication, self).__init__(**kwargs) self.provisioning_state = None self.status = None
py
1a43a7df3bab6d5564602b105eb20c03bda37f5e
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import time import threading import os import traceback import json import shutil import weakref import csv from decimal import Decimal import base64 from functools import partial import queue import asyncio from typing import Optional, TYPE_CHECKING, Sequence, List, Union from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget, QMenuBar, QFileDialog, QCheckBox, QLabel, QVBoxLayout, QGridLayout, QLineEdit, QHBoxLayout, QPushButton, QScrollArea, QTextEdit, QShortcut, QMainWindow, QCompleter, QInputDialog, QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog) import electrum from electrum import (keystore, ecc, constants, util, bitcoin, commands, paymentrequest) from electrum.bitcoin import COIN, is_address from electrum.plugin import run_hook from electrum.i18n import _ from electrum.util import (format_time, format_satoshis, format_fee_satoshis, format_satoshis_plain, UserCancelled, profiler, export_meta, import_meta, bh2u, bfh, InvalidPassword, decimal_point_to_base_unit_name, UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException, get_new_wallet_name, send_exception_to_crash_reporter, InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs) from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING from electrum.transaction import (Transaction, PartialTxInput, PartialTransaction, PartialTxOutput) from electrum.address_synchronizer import AddTransactionException from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet, sweep_preparations, InternalAddressCorruption) from electrum.version import ELECTRUM_VERSION from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError from electrum.exchange_rate import FxThread from electrum.simple_config import SimpleConfig from electrum.logging import Logger from electrum.util import PR_PAID, PR_FAILED from electrum.util import pr_expiration_values from electrum.lnutil import ln_dummy_address from .exception_window import Exception_Hook from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit from .qrcodewidget import QRCodeWidget, QRDialog from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit from .transaction_dialog import show_transaction from .fee_slider import FeeSlider from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog, WindowModalDialog, ChoicesLayout, HelpLabel, Buttons, OkButton, InfoButton, WWLabel, TaskThread, CancelButton, CloseButton, HelpButton, MessageBoxMixin, EnterButton, import_meta_gui, export_meta_gui, filename_field, address_field, char_width_in_lineedit, webopen, TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT) from .util import ButtonsTextEdit from .installwizard import WIF_HELP_TEXT from .history_list import HistoryList, HistoryModel from .update_checker import UpdateCheck, UpdateCheckThread from .channels_list import ChannelsList from .confirm_tx_dialog import ConfirmTxDialog from .transaction_dialog import PreviewTxDialog if TYPE_CHECKING: from . import ElectrumGui LN_NUM_PAYMENT_ATTEMPTS = 10 class StatusBarButton(QPushButton): def __init__(self, icon, tooltip, func): QPushButton.__init__(self, icon, '') self.setToolTip(tooltip) self.setFlat(True) self.setMaximumWidth(25) self.clicked.connect(self.onPress) self.func = func self.setIconSize(QSize(25,25)) self.setCursor(QCursor(Qt.PointingHandCursor)) def onPress(self, checked=False): '''Drops the unwanted PyQt5 "checked" argument''' self.func() def keyPressEvent(self, e): if e.key() == Qt.Key_Return: self.func() def protected(func): '''Password request wrapper. The password is passed to the function as the 'password' named argument. "None" indicates either an unencrypted wallet, or the user cancelled the password request. An empty input is passed as the empty string.''' def request_password(self, *args, **kwargs): parent = self.top_level_window() password = None while self.wallet.has_keystore_encryption(): password = self.password_dialog(parent=parent) if password is None: # User cancelled password input return try: self.wallet.check_password(password) break except Exception as e: self.show_error(str(e), parent=parent) continue kwargs['password'] = password return func(self, *args, **kwargs) return request_password class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): payment_request_ok_signal = pyqtSignal() payment_request_error_signal = pyqtSignal() network_signal = pyqtSignal(str, object) #ln_payment_attempt_signal = pyqtSignal(str) alias_received_signal = pyqtSignal() computing_privkeys_signal = pyqtSignal() show_privkeys_signal = pyqtSignal() def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet): QMainWindow.__init__(self) self.gui_object = gui_object self.config = config = gui_object.config # type: SimpleConfig self.gui_thread = gui_object.gui_thread self.setup_exception_hook() self.network = gui_object.daemon.network # type: Network assert wallet, "no wallet" self.wallet = wallet self.fx = gui_object.daemon.fx # type: FxThread self.contacts = wallet.contacts self.tray = gui_object.tray self.app = gui_object.app self.cleaned_up = False self.payment_request = None # type: Optional[paymentrequest.PaymentRequest] self.payto_URI = None self.checking_accounts = False self.qr_window = None self.pluginsdialog = None self.tl_windows = [] Logger.__init__(self) self.tx_notification_queue = queue.Queue() self.tx_notification_last_time = 0 self.create_status_bar() self.need_update = threading.Event() self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT) try: decimal_point_to_base_unit_name(self.decimal_point) except UnknownBaseUnit: self.decimal_point = DECIMAL_POINT_DEFAULT self.num_zeros = int(config.get('num_zeros', 0)) self.completions = QStringListModel() coincontrol_sb = self.create_coincontrol_statusbar() self.tabs = tabs = QTabWidget(self) self.send_tab = self.create_send_tab() self.receive_tab = self.create_receive_tab() self.addresses_tab = self.create_addresses_tab() self.utxo_tab = self.create_utxo_tab() self.console_tab = self.create_console_tab() self.contacts_tab = self.create_contacts_tab() self.channels_tab = self.create_channels_tab() tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History')) tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send')) tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive')) def add_optional_tab(tabs, tab, icon, description, name): tab.tab_icon = icon tab.tab_description = description tab.tab_pos = len(tabs) tab.tab_name = name if self.config.get('show_{}_tab'.format(name), False): tabs.addTab(tab, icon, description.replace("&", "")) add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses") if self.wallet.has_lightning(): add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels") add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo") add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts") add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console") tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) central_widget = QWidget() vbox = QVBoxLayout(central_widget) vbox.setContentsMargins(0, 0, 0, 0) vbox.addWidget(tabs) vbox.addWidget(coincontrol_sb) self.setCentralWidget(central_widget) if self.config.get("is_maximized"): self.showMaximized() self.setWindowIcon(read_QIcon("electrum.png")) self.init_menubar() wrtabs = weakref.proxy(tabs) QShortcut(QKeySequence("Ctrl+W"), self, self.close) QShortcut(QKeySequence("Ctrl+Q"), self, self.close) QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet) QShortcut(QKeySequence("F5"), self, self.update_wallet) QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count())) for i in range(wrtabs.count()): QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i)) self.payment_request_ok_signal.connect(self.payment_request_ok) self.payment_request_error_signal.connect(self.payment_request_error) self.history_list.setFocus(True) # network callbacks if self.network: self.network_signal.connect(self.on_network_qt) interests = ['wallet_updated', 'network_updated', 'blockchain_updated', 'new_transaction', 'status', 'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes', 'on_history', 'channel', 'channels_updated', 'payment_failed', 'payment_succeeded', 'invoice_status', 'request_status', 'ln_gossip_sync_progress'] # To avoid leaking references to "self" that prevent the # window from being GC-ed when closed, callbacks should be # methods of this class only, and specifically not be # partials, lambdas or methods of subobjects. Hence... self.network.register_callback(self.on_network, interests) # set initial message self.console.showMessage(self.network.banner) # update fee slider in case we missed the callback #self.fee_slider.update() self.load_wallet(wallet) gui_object.timer.timeout.connect(self.timer_actions) self.fetch_alias() # If the option hasn't been set yet if config.get('check_updates') is None: choice = self.question(title="Electrum - " + _("Enable update check"), msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " + _("Would you like to be notified when there is a newer version of Electrum available?")) config.set_key('check_updates', bool(choice), save=True) if config.get('check_updates', False): # The references to both the thread and the window need to be stored somewhere # to prevent GC from getting in our way. def on_version_received(v): if UpdateCheck.is_newer(v): self.update_check_button.setText(_("Update to Electrum {} is available").format(v)) self.update_check_button.clicked.connect(lambda: self.show_update_check(v)) self.update_check_button.show() self._update_check_thread = UpdateCheckThread(self) self._update_check_thread.checked.connect(on_version_received) self._update_check_thread.start() def setup_exception_hook(self): Exception_Hook(self) def on_fx_history(self): self.history_model.refresh('fx_history') self.address_list.update() def on_fx_quotes(self): self.update_status() # Refresh edits with the new rate edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e edit.textEdited.emit(edit.text()) edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e edit.textEdited.emit(edit.text()) # History tab needs updating if it used spot if self.fx.history_used_spot: self.history_model.refresh('fx_quotes') self.address_list.update() def toggle_tab(self, tab): show = not self.config.get('show_{}_tab'.format(tab.tab_name), False) self.config.set_key('show_{}_tab'.format(tab.tab_name), show) item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description) tab.menu_action.setText(item_text) if show: # Find out where to place the tab index = len(self.tabs) for i in range(len(self.tabs)): try: if tab.tab_pos < self.tabs.widget(i).tab_pos: index = i break except AttributeError: pass self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", "")) else: i = self.tabs.indexOf(tab) self.tabs.removeTab(i) def push_top_level_window(self, window): '''Used for e.g. tx dialog box to ensure new dialogs are appropriately parented. This used to be done by explicitly providing the parent window, but that isn't something hardware wallet prompts know.''' self.tl_windows.append(window) def pop_top_level_window(self, window): self.tl_windows.remove(window) def top_level_window(self, test_func=None): '''Do the right thing in the presence of tx dialog windows''' override = self.tl_windows[-1] if self.tl_windows else None if override and test_func and not test_func(override): override = None # only override if ok for test_func return self.top_level_window_recurse(override, test_func) def diagnostic_name(self): #return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name()) return self.wallet.diagnostic_name() def is_hidden(self): return self.isMinimized() or self.isHidden() def show_or_hide(self): if self.is_hidden(): self.bring_to_top() else: self.hide() def bring_to_top(self): self.show() self.raise_() def on_error(self, exc_info): e = exc_info[1] if isinstance(e, UserCancelled): pass elif isinstance(e, UserFacingException): self.show_error(str(e)) else: try: self.logger.error("on_error", exc_info=exc_info) except OSError: pass # see #4418 self.show_error(repr(e)) def on_network(self, event, *args): # Handle in GUI thread self.network_signal.emit(event, args) def on_network_qt(self, event, args=None): # Handle a network message in the GUI thread # note: all windows get events from all wallets! if event == 'wallet_updated': wallet = args[0] if wallet == self.wallet: self.need_update.set() elif event == 'network_updated': self.gui_object.network_updated_signal_obj.network_updated_signal \ .emit(event, args) self.network_signal.emit('status', None) elif event == 'blockchain_updated': # to update number of confirmations in history self.need_update.set() elif event == 'new_transaction': wallet, tx = args if wallet == self.wallet: self.tx_notification_queue.put(tx) elif event == 'on_quotes': self.on_fx_quotes() elif event == 'on_history': self.on_fx_history() elif event == 'channels_updated': self.channels_list.update_rows.emit(*args) elif event == 'channel': self.channels_list.update_single_row.emit(*args) self.update_status() elif event == 'request_status': self.on_request_status(*args) elif event == 'invoice_status': self.on_invoice_status(*args) elif event == 'payment_succeeded': self.on_payment_succeeded(*args) elif event == 'payment_failed': self.on_payment_failed(*args) elif event == 'status': self.update_status() elif event == 'banner': self.console.showMessage(args[0]) elif event == 'verified': wallet, tx_hash, tx_mined_status = args if wallet == self.wallet: self.history_model.update_tx_mined_status(tx_hash, tx_mined_status) elif event == 'fee': pass elif event == 'fee_histogram': self.history_model.on_fee_histogram() elif event == 'ln_gossip_sync_progress': self.update_lightning_icon() else: self.logger.info(f"unexpected network event: {event} {args}") def fetch_alias(self): self.alias_info = None alias = self.config.get('alias') if alias: alias = str(alias) def f(): self.alias_info = self.contacts.resolve_openalias(alias) self.alias_received_signal.emit() t = threading.Thread(target=f) t.setDaemon(True) t.start() def close_wallet(self): if self.wallet: self.logger.info(f'close_wallet {self.wallet.storage.path}') run_hook('close_wallet', self.wallet) @profiler def load_wallet(self, wallet): wallet.thread = TaskThread(self, self.on_error) self.update_recently_visited(wallet.storage.path) if wallet.lnworker and wallet.network: wallet.network.trigger_callback('channels_updated', wallet) self.need_update.set() # Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized # update menus self.seed_menu.setEnabled(self.wallet.has_seed()) self.update_lock_icon() self.update_buttons_on_seed() self.update_console() self.clear_receive_tab() self.request_list.update() self.channels_list.update() self.tabs.show() self.init_geometry() if self.config.get('hide_gui') and self.gui_object.tray.isVisible(): self.hide() else: self.show() self.watching_only_changed() run_hook('load_wallet', wallet, self) try: wallet.try_detecting_internal_addresses_corruption() except InternalAddressCorruption as e: self.show_error(str(e)) send_exception_to_crash_reporter(e) def init_geometry(self): winpos = self.wallet.db.get("winpos-qt") try: screen = self.app.desktop().screenGeometry() assert screen.contains(QRect(*winpos)) self.setGeometry(*winpos) except: self.logger.info("using default geometry") self.setGeometry(100, 100, 840, 400) def watching_only_changed(self): name = "Electrum Testnet" if constants.net.TESTNET else "Electrum" title = '%s %s - %s' % (name, ELECTRUM_VERSION, self.wallet.basename()) extra = [self.wallet.db.get('wallet_type', '?')] if self.wallet.is_watching_only(): extra.append(_('watching only')) title += ' [%s]'% ', '.join(extra) self.setWindowTitle(title) self.password_menu.setEnabled(self.wallet.may_have_password()) self.import_privkey_menu.setVisible(self.wallet.can_import_privkey()) self.import_address_menu.setVisible(self.wallet.can_import_address()) self.export_menu.setEnabled(self.wallet.can_export()) def warn_if_watching_only(self): if self.wallet.is_watching_only(): msg = ' '.join([ _("This wallet is watching-only."), _("This means you will not be able to spend Bitcoins with it."), _("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.") ]) self.show_warning(msg, title=_('Watch-only wallet')) def warn_if_lightning_backup(self): if self.wallet.is_lightning_backup(): msg = '\n\n'.join([ _("This file is a backup of a lightning wallet."), _("You will not be able to perform lightning payments using this file, and the lightning balance displayed in this wallet might be outdated.") + ' ' + \ _("If you have lost the original wallet file, you can use this file to trigger a forced closure of your channels."), _("Do you want to have your channels force-closed?") ]) if self.question(msg, title=_('Lightning Backup')): self.network.maybe_init_lightning() self.wallet.lnworker.start_network(self.network) def warn_if_testnet(self): if not constants.net.TESTNET: return # user might have opted out already if self.config.get('dont_show_testnet_warning', False): return # only show once per process lifecycle if getattr(self.gui_object, '_warned_testnet', False): return self.gui_object._warned_testnet = True msg = ''.join([ _("You are in testnet mode."), ' ', _("Testnet coins are worthless."), '\n', _("Testnet is separate from the main Bitcoin network. It is used for testing.") ]) cb = QCheckBox(_("Don't show this again.")) cb_checked = False def on_cb(x): nonlocal cb_checked cb_checked = x == Qt.Checked cb.stateChanged.connect(on_cb) self.show_warning(msg, title=_('Testnet'), checkbox=cb) if cb_checked: self.config.set_key('dont_show_testnet_warning', True) def open_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) if not filename: return self.gui_object.new_window(filename) def backup_wallet(self): try: new_path = self.wallet.save_backup() except BaseException as reason: self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup")) return if new_path: self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created")) else: self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created")) def update_recently_visited(self, filename): recent = self.config.get('recently_open', []) try: sorted(recent) except: recent = [] if filename in recent: recent.remove(filename) recent.insert(0, filename) recent = [path for path in recent if os.path.exists(path)] recent = recent[:5] self.config.set_key('recently_open', recent) self.recently_visited_menu.clear() for i, k in enumerate(sorted(recent)): b = os.path.basename(k) def loader(k): return lambda: self.gui_object.new_window(k) self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1))) self.recently_visited_menu.setEnabled(len(recent)) def get_wallet_folder(self): return os.path.dirname(os.path.abspath(self.wallet.storage.path)) def new_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename = get_new_wallet_name(wallet_folder) full_path = os.path.join(wallet_folder, filename) self.gui_object.start_new_window(full_path, None) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) self.recently_visited_menu = file_menu.addMenu(_("&Recently open")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("Delete"), self.remove_wallet) file_menu.addSeparator() file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&Information"), self.show_wallet_info) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses) wallet_menu.addSeparator() addresses_menu = wallet_menu.addMenu(_("&Addresses")) addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config)) labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) history_menu = wallet_menu.addMenu(_("&History")) history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config)) history_menu.addAction(_("&Summary"), self.history_list.show_summary) history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog) history_menu.addAction(_("&Export"), self.history_list.export_history_dialog) contacts_menu = wallet_menu.addMenu(_("Contacts")) contacts_menu.addAction(_("&New"), self.new_contact_dialog) contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts()) contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts()) invoices_menu = wallet_menu.addMenu(_("Invoices")) invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices()) invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices()) wallet_menu.addSeparator() wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F")) def add_toggle_action(view_menu, tab): is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False) item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab)) view_menu = menubar.addMenu(_("&View")) add_toggle_action(view_menu, self.addresses_tab) add_toggle_action(view_menu, self.utxo_tab) if self.wallet.has_lightning(): add_toggle_action(view_menu, self.channels_tab) add_toggle_action(view_menu, self.contacts_tab) add_toggle_action(view_menu, self.console_tab) tools_menu = menubar.addMenu(_("&Tools")) # Settings / Preferences are all reserved keywords in macOS using this as work around tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network)) tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network)) tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower)) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode) self.raw_transaction_menu = raw_transaction_menu run_hook('init_menubar_tools', self, tools_menu) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) help_menu.addAction(_("&Check for updates"), self.show_update_check) help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org")) help_menu.addSeparator() help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents) help_menu.addAction(_("&Report Bug"), self.show_report_bug) help_menu.addSeparator() help_menu.addAction(_("&Donate to server"), self.donate_to_server) self.setMenuBar(menubar) def donate_to_server(self): d = self.network.get_donation_address() if d: host = self.network.get_parameters().host self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host)) else: self.show_error(_('No donation address for this server')) def show_about(self): QMessageBox.about(self, "Electrum", (_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " + _("You do not need to perform regular backups, because your wallet can be " "recovered from a secret phrase that you can memorize or write on paper.") + " " + _("Startup times are instant because it operates in conjunction with high-performance " "servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" + _("Uses icons from the Icons8 icon pack (icons8.com)."))) def show_update_check(self, version=None): self.gui_object._update_check = UpdateCheck(self, version) def show_report_bug(self): msg = ' '.join([ _("Please report any bugs as issues on github:<br/>"), f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''', _("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."), _("Try to explain not only what the bug is, but how it occurs.") ]) self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True) def notify_transactions(self): if self.tx_notification_queue.qsize() == 0: return if not self.wallet.up_to_date: return # no notifications while syncing now = time.time() rate_limit = 20 # seconds if self.tx_notification_last_time + rate_limit > now: return self.tx_notification_last_time = now self.logger.info("Notifying GUI about new transactions") txns = [] while True: try: txns.append(self.tx_notification_queue.get_nowait()) except queue.Empty: break # Combine the transactions if there are at least three if len(txns) >= 3: total_amount = 0 for tx in txns: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if not is_relevant: continue total_amount += v self.notify(_("{} new transactions: Total amount received in the new transactions {}") .format(len(txns), self.format_amount_and_units(total_amount))) else: for tx in txns: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if not is_relevant: continue self.notify(_("New transaction: {}").format(self.format_amount_and_units(v))) def notify(self, message): if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000) except TypeError: self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000) # custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user def getOpenFileName(self, title, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def getSaveFileName(self, title, filename, filter="", *, default_extension: str = None, default_filter: str = None) -> Optional[str]: directory = self.config.get('io_dir', os.path.expanduser('~')) path = os.path.join(directory, filename) file_dialog = QFileDialog(self, title, path, filter) file_dialog.setAcceptMode(QFileDialog.AcceptSave) if default_extension: # note: on MacOS, the selected filter's first extension seems to have priority over this... file_dialog.setDefaultSuffix(default_extension) if default_filter: assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}" file_dialog.selectNameFilter(default_filter) if file_dialog.exec() != QDialog.Accepted: return None selected_path = file_dialog.selectedFiles()[0] if selected_path and directory != os.path.dirname(selected_path): self.config.set_key('io_dir', os.path.dirname(selected_path), True) return selected_path def timer_actions(self): self.request_list.refresh_status() # Note this runs in the GUI thread if self.need_update.is_set(): self.need_update.clear() self.update_wallet() elif not self.wallet.up_to_date: # this updates "synchronizing" progress self.update_status() # resolve aliases # FIXME this is a blocking network call that has a timeout of 5 sec self.payto_e.resolve() self.notify_transactions() def format_amount(self, x, is_diff=False, whitespaces=False): return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces) def format_amount_and_units(self, amount): text = self.format_amount(amount) + ' '+ self.base_unit() x = self.fx.format_amount_and_units(amount) if self.fx else None if text and x: text += ' (%s)'%x return text def format_fee_rate(self, fee_rate): # fee_rate is in sat/kB return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte' def get_decimal_point(self): return self.decimal_point def base_unit(self): return decimal_point_to_base_unit_name(self.decimal_point) def connect_fields(self, window, btc_e, fiat_e, fee_e): def edit_changed(edit): if edit.follows: return edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) fiat_e.is_last_edited = (edit == fiat_e) amount = edit.get_amount() rate = self.fx.exchange_rate() if self.fx else Decimal('NaN') if rate.is_nan() or amount is None: if edit is fiat_e: btc_e.setText("") if fee_e: fee_e.setText("") else: fiat_e.setText("") else: if edit is fiat_e: btc_e.follows = True btc_e.setAmount(int(amount / Decimal(rate) * COIN)) btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) btc_e.follows = False if fee_e: window.update_fee() else: fiat_e.follows = True fiat_e.setText(self.fx.ccy_amount_str( amount * Decimal(rate) / COIN, False)) fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) fiat_e.follows = False btc_e.follows = False fiat_e.follows = False fiat_e.textChanged.connect(partial(edit_changed, fiat_e)) btc_e.textChanged.connect(partial(edit_changed, btc_e)) fiat_e.is_last_edited = False def update_status(self): if not self.wallet: return if self.network is None: text = _("Offline") icon = read_QIcon("status_disconnected.png") elif self.network.is_connected(): server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height fork_str = "_fork" if len(self.network.get_blockchains())>1 else "" # Server height can be 0 after switching to a new server # until we get a headers subscription request response. # Display the synchronizing message in that case. if not self.wallet.up_to_date or server_height == 0: num_sent, num_answered = self.wallet.get_history_sync_state_details() text = ("{} ({}/{})" .format(_("Synchronizing..."), num_answered, num_sent)) icon = read_QIcon("status_waiting.png") elif server_lag > 1: text = _("Server is lagging ({} blocks)").format(server_lag) icon = read_QIcon("status_lagging%s.png"%fork_str) else: c, u, x = self.wallet.get_balance() text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c)) if u: text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip()) if x: text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip()) if self.wallet.lnworker: l = self.wallet.lnworker.get_balance() text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip()) # append fiat balance and price if self.fx.is_enabled(): text += self.fx.get_fiat_status_text(c + u + x, self.base_unit(), self.get_decimal_point()) or '' if not self.network.proxy: icon = read_QIcon("status_connected%s.png"%fork_str) else: icon = read_QIcon("status_connected_proxy%s.png"%fork_str) else: if self.network.proxy: text = "{} ({})".format(_("Not connected"), _("proxy enabled")) else: text = _("Not connected") icon = read_QIcon("status_disconnected.png") self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename())) self.balance_label.setText(text) if self.status_button: self.status_button.setIcon( icon ) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_tabs() def update_tabs(self, wallet=None): if wallet is None: wallet = self.wallet if wallet != self.wallet: return self.history_model.refresh('update_tabs') self.request_list.update() self.address_list.update() self.utxo_list.update() self.contact_list.update() self.invoice_list.update() self.channels_list.update_rows.emit(wallet) self.update_completions() def create_channels_tab(self): self.channels_list = ChannelsList(self) t = self.channels_list.get_toolbar() return self.create_list_tab(self.channels_list, t) def create_history_tab(self): self.history_model = HistoryModel(self) self.history_list = l = HistoryList(self, self.history_model) self.history_model.set_view(self.history_list) l.searchable_list = l toolbar = l.create_toolbar(self.config) toolbar_shown = bool(self.config.get('show_toolbar_history', False)) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def show_address(self, addr): from . import address_dialog d = address_dialog.AddressDialog(self, addr) d.exec_() def show_channel(self, channel_id): from . import channel_details channel_details.ChannelDetailsDialog(self, channel_id).show() def show_transaction(self, tx, *, tx_desc=None): '''tx_desc is set only for txs created in the Send tab''' show_transaction(tx, parent=self, desc=tx_desc) def show_lightning_transaction(self, tx_item): from .lightning_tx_dialog import LightningTxDialog d = LightningTxDialog(self, tx_item) d.show() def create_receive_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.receive_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) self.receive_message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 0, 0) grid.addWidget(self.receive_message_e, 0, 1, 1, 4) self.receive_message_e.textChanged.connect(self.update_receive_qr) self.receive_amount_e = BTCAmountEdit(self.get_decimal_point) grid.addWidget(QLabel(_('Requested amount')), 1, 0) grid.addWidget(self.receive_amount_e, 1, 1) self.receive_amount_e.textChanged.connect(self.update_receive_qr) self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_receive_e.setVisible(False) grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft) self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None) self.connect_fields(self, self.amount_e, self.fiat_send_e, None) self.expires_combo = QComboBox() evl = sorted(pr_expiration_values.items()) evl_keys = [i[0] for i in evl] evl_values = [i[1] for i in evl] default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING) try: i = evl_keys.index(default_expiry) except ValueError: i = 0 self.expires_combo.addItems(evl_values) self.expires_combo.setCurrentIndex(i) self.expires_combo.setFixedWidth(self.receive_amount_e.width()) def on_expiry(i): self.config.set_key('request_expiry', evl_keys[i]) self.expires_combo.currentIndexChanged.connect(on_expiry) msg = ' '.join([ _('Expiration date of your request.'), _('This information is seen by the recipient if you send them a signed payment request.'), _('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'), _('The bitcoin address never expires and will always be part of this electrum wallet.'), ]) grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0) grid.addWidget(self.expires_combo, 2, 1) self.expires_label = QLineEdit('') self.expires_label.setReadOnly(1) self.expires_label.setFocusPolicy(Qt.NoFocus) self.expires_label.hide() grid.addWidget(self.expires_label, 2, 1) self.clear_invoice_button = QPushButton(_('Clear')) self.clear_invoice_button.clicked.connect(self.clear_receive_tab) self.create_invoice_button = QPushButton(_('On-chain')) self.create_invoice_button.setIcon(read_QIcon("bitcoin.png")) self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False)) self.receive_buttons = buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_invoice_button) buttons.addWidget(self.create_invoice_button) if self.wallet.has_lightning(): self.create_lightning_invoice_button = QPushButton(_('Lightning')) self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png")) self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True)) buttons.addWidget(self.create_lightning_invoice_button) grid.addLayout(buttons, 4, 3, 1, 2) self.receive_payreq_e = ButtonsTextEdit() self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT)) self.receive_payreq_e.addCopyButton(self.app) self.receive_payreq_e.setReadOnly(True) self.receive_payreq_e.textChanged.connect(self.update_receive_qr) self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus) self.receive_qr = QRCodeWidget(fixedSize=220) self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window() self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor)) self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor)) self.receive_address_e = ButtonsTextEdit() self.receive_address_e.setFont(QFont(MONOSPACE_FONT)) self.receive_address_e.addCopyButton(self.app) self.receive_address_e.setReadOnly(True) self.receive_address_e.textChanged.connect(self.update_receive_address_styling) qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self) qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png" self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code")) self.receive_requests_label = QLabel(_('Incoming payments')) from .request_list import RequestList self.request_list = RequestList(self) receive_tabs = QTabWidget() receive_tabs.addTab(self.receive_address_e, _('Address')) receive_tabs.addTab(self.receive_payreq_e, _('Request')) receive_tabs.addTab(self.receive_qr, _('QR Code')) receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0)) receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i)) # layout vbox_g = QVBoxLayout() vbox_g.addLayout(grid) vbox_g.addStretch() hbox = QHBoxLayout() hbox.addLayout(vbox_g) hbox.addStretch() hbox.addWidget(receive_tabs) w = QWidget() w.searchable_list = self.request_list vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.receive_requests_label) vbox.addWidget(self.request_list) vbox.setStretchFactor(self.request_list, 1000) return w def delete_requests(self, keys): for key in keys: self.wallet.delete_request(key) self.request_list.update() self.clear_receive_tab() def delete_lightning_payreq(self, payreq_key): self.wallet.lnworker.delete_invoice(payreq_key) self.request_list.update() self.invoice_list.update() self.clear_receive_tab() def sign_payment_request(self, addr): alias = self.config.get('alias') alias_privkey = None if alias and self.alias_info: alias_addr, alias_name, validated = self.alias_info if alias_addr: if self.wallet.is_mine(alias_addr): msg = _('This payment request will be signed.') + '\n' + _('Please enter your password') password = None if self.wallet.has_keystore_encryption(): password = self.password_dialog(msg) if not password: return try: self.wallet.sign_payment_request(addr, alias, alias_addr, password) except Exception as e: self.show_error(repr(e)) return else: return def create_invoice(self, is_lightning): amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING) if is_lightning: key = self.wallet.lnworker.add_request(amount, message, expiry) else: key = self.create_bitcoin_request(amount, message, expiry) self.address_list.update() self.request_list.update() self.request_list.select_key(key) # clear request fields self.receive_amount_e.setText('') self.receive_message_e.setText('') def create_bitcoin_request(self, amount, message, expiration): addr = self.wallet.get_unused_address() if addr is None: if not self.wallet.is_deterministic(): msg = [ _('No more addresses in your wallet.'), _('You are using a non-deterministic wallet, which cannot create new addresses.'), _('If you want to create new addresses, use a deterministic wallet instead.') ] self.show_message(' '.join(msg)) return if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")): return addr = self.wallet.create_new_address(False) req = self.wallet.make_payment_request(addr, amount, message, expiration) try: self.wallet.add_payment_request(req) except Exception as e: self.logger.exception('Error adding payment request') self.show_error(_('Error adding payment request') + ':\n' + repr(e)) else: self.sign_payment_request(addr) return addr def do_copy(self, content: str, *, title: str = None) -> None: self.app.clipboard().setText(content) if title is None: tooltip_text = _("Text copied to clipboard").format(title) else: tooltip_text = _("{} copied to clipboard").format(title) QToolTip.showText(QCursor.pos(), tooltip_text, self) def export_payment_request(self, addr): r = self.wallet.receive_requests.get(addr) pr = paymentrequest.serialize_request(r).SerializeToString() name = r['id'] + '.bip70' fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70") if fileName: with open(fileName, "wb+") as f: f.write(util.to_bytes(pr)) self.show_message(_("Request saved successfully")) self.saved = True def clear_receive_tab(self): self.receive_payreq_e.setText('') self.receive_address_e.setText('') self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) self.expires_label.hide() self.expires_combo.show() self.request_list.clearSelection() def toggle_qr_window(self): from . import qrwindow if not self.qr_window: self.qr_window = qrwindow.QR_Window(self) self.qr_window.setVisible(True) self.qr_window_geometry = self.qr_window.geometry() else: if not self.qr_window.isVisible(): self.qr_window.setVisible(True) self.qr_window.setGeometry(self.qr_window_geometry) else: self.qr_window_geometry = self.qr_window.geometry() self.qr_window.setVisible(False) self.update_receive_qr() def show_send_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab)) def show_receive_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab)) def update_receive_qr(self): uri = str(self.receive_payreq_e.text()) if maybe_extract_bolt11_invoice(uri): # encode lightning invoices as uppercase so QR encoding can use # alphanumeric mode; resulting in smaller QR codes uri = uri.upper() self.receive_qr.setData(uri) if self.qr_window and self.qr_window.isVisible(): self.qr_window.qrw.setData(uri) def update_receive_address_styling(self): addr = str(self.receive_address_e.text()) if is_address(addr) and self.wallet.is_used(addr): self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) self.receive_address_e.setToolTip(_("This address has already been used. " "For better privacy, do not reuse it for new payments.")) else: self.receive_address_e.setStyleSheet("") self.receive_address_e.setToolTip("") def create_send_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.send_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) from .paytoedit import PayToEdit self.amount_e = BTCAmountEdit(self.get_decimal_point) self.payto_e = PayToEdit(self) msg = _('Recipient of the funds.') + '\n\n'\ + _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)') payto_label = HelpLabel(_('Pay to'), msg) grid.addWidget(payto_label, 1, 0) grid.addWidget(self.payto_e, 1, 1, 1, -1) completer = QCompleter() completer.setCaseSensitivity(False) self.payto_e.set_completer(completer) completer.setModel(self.completions) msg = _('Description of the transaction (not mandatory).') + '\n\n'\ + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.') description_label = HelpLabel(_('Description'), msg) grid.addWidget(description_label, 2, 0) self.message_e = FreezableLineEdit() self.message_e.setMinimumWidth(700) grid.addWidget(self.message_e, 2, 1, 1, -1) msg = _('Amount to be sent.') + '\n\n' \ + _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \ + _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \ + _('Keyboard shortcut: type "!" to send all your coins.') amount_label = HelpLabel(_('Amount'), msg) grid.addWidget(amount_label, 3, 0) grid.addWidget(self.amount_e, 3, 1) self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_send_e.setVisible(False) grid.addWidget(self.fiat_send_e, 3, 2) self.amount_e.frozen.connect( lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly())) self.max_button = EnterButton(_("Max"), self.spend_max) self.max_button.setFixedWidth(100) self.max_button.setCheckable(True) grid.addWidget(self.max_button, 3, 3) self.save_button = EnterButton(_("Save"), self.do_save_invoice) self.send_button = EnterButton(_("Pay"), self.do_pay) self.clear_button = EnterButton(_("Clear"), self.do_clear) buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_button) buttons.addWidget(self.save_button) buttons.addWidget(self.send_button) grid.addLayout(buttons, 6, 1, 1, 4) self.amount_e.shortcut.connect(self.spend_max) def reset_max(text): self.max_button.setChecked(False) enable = not bool(text) and not self.amount_e.isReadOnly() #self.max_button.setEnabled(enable) self.amount_e.textEdited.connect(reset_max) self.fiat_send_e.textEdited.connect(reset_max) self.set_onchain(False) self.invoices_label = QLabel(_('Outgoing payments')) from .invoice_list import InvoiceList self.invoice_list = InvoiceList(self) vbox0 = QVBoxLayout() vbox0.addLayout(grid) hbox = QHBoxLayout() hbox.addLayout(vbox0) hbox.addStretch(1) w = QWidget() vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.invoices_label) vbox.addWidget(self.invoice_list) vbox.setStretchFactor(self.invoice_list, 1000) w.searchable_list = self.invoice_list run_hook('create_send_tab', grid) return w def spend_max(self): if run_hook('abort_send', self): return outputs = self.payto_e.get_outputs(True) if not outputs: return make_tx = lambda fee_est: self.wallet.make_unsigned_transaction( coins=self.get_coins(), outputs=outputs, fee=fee_est, is_sweep=False) try: tx = make_tx(None) except (NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs) as e: self.max_button.setChecked(False) self.show_error(str(e)) return self.max_button.setChecked(True) amount = tx.output_value() __, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0) amount_after_all_fees = amount - x_fee_amount self.amount_e.setAmount(amount_after_all_fees) def get_contact_payto(self, key): _type, label = self.contacts.get(key) return label + ' <' + key + '>' if _type == 'address' else key def update_completions(self): l = [self.get_contact_payto(key) for key in self.contacts.keys()] self.completions.setStringList(l) @protected def protect(self, func, args, password): return func(*args, password) def read_outputs(self) -> List[PartialTxOutput]: if self.payment_request: outputs = self.payment_request.get_outputs() else: outputs = self.payto_e.get_outputs(self.max_button.isChecked()) return outputs def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool: """Returns whether there are errors with outputs. Also shows error dialog to user if so. """ if not outputs: self.show_error(_('No outputs')) return True for o in outputs: if o.scriptpubkey is None: self.show_error(_('Bitcoin Address is None')) return True if o.value is None: self.show_error(_('Invalid Amount')) return True return False # no errors def check_send_tab_payto_line_and_show_errors(self) -> bool: """Returns whether there are errors. Also shows error dialog to user if so. """ pr = self.payment_request if pr: if pr.has_expired(): self.show_error(_('Payment request has expired')) return True if not pr: errors = self.payto_e.get_errors() if errors: self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})" for err in errors])) return True if self.payto_e.is_alias and self.payto_e.validated is False: alias = self.payto_e.toPlainText() msg = _('WARNING: the alias "{}" could not be validated via an additional ' 'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n' msg += _('Do you wish to continue?') if not self.question(msg): return True return False # no errors def pay_lightning_invoice(self, invoice, amount_sat=None): attempts = LN_NUM_PAYMENT_ATTEMPTS def task(): self.wallet.lnworker.pay(invoice, amount_sat, attempts) self.do_clear() self.wallet.thread.add(task) self.invoice_list.update() def on_request_status(self, key, status): if key not in self.wallet.receive_requests: return if status == PR_PAID: self.notify(_('Payment received') + '\n' + key) self.need_update.set() def on_invoice_status(self, key): req = self.wallet.get_invoice(key) if req is None: return self.invoice_list.update_item(key, req) def on_payment_succeeded(self, key, description=None): self.show_message(_('Payment succeeded')) self.need_update.set() def on_payment_failed(self, key, reason): self.show_error(_('Payment failed') + '\n\n' + reason) def read_invoice(self): if self.check_send_tab_payto_line_and_show_errors(): return if not self._is_onchain: invoice = self.payto_e.lightning_invoice if not invoice: return if not self.wallet.lnworker: self.show_error(_('Lightning is disabled')) return invoice_dict = self.wallet.lnworker.parse_bech32_invoice(invoice) if invoice_dict.get('amount') is None: amount = self.amount_e.get_amount() if amount: invoice_dict['amount'] = amount else: self.show_error(_('No amount')) return return invoice_dict else: outputs = self.read_outputs() if self.check_send_tab_onchain_outputs_and_show_errors(outputs): return message = self.message_e.text() return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI) def do_save_invoice(self): invoice = self.read_invoice() if not invoice: return self.wallet.save_invoice(invoice) self.do_clear() self.invoice_list.update() def do_pay(self): invoice = self.read_invoice() if not invoice: return self.wallet.save_invoice(invoice) self.invoice_list.update() self.do_clear() self.do_pay_invoice(invoice) def pay_multiple_invoices(self, invoices): outputs = [] for invoice in invoices: outputs += invoice['outputs'] self.pay_onchain_dialog(self.get_coins(), outputs) def do_pay_invoice(self, invoice): if invoice['type'] == PR_TYPE_LN: self.pay_lightning_invoice(invoice['invoice'], amount_sat=invoice['amount']) elif invoice['type'] == PR_TYPE_ONCHAIN: outputs = invoice['outputs'] self.pay_onchain_dialog(self.get_coins(), outputs) else: raise Exception('unknown invoice type') def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]: coins = self.get_manually_selected_coins() if coins is not None: return coins else: return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only) def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]: """Return a list of selected coins or None. Note: None means selection is not being used, while an empty sequence means the user specifically selected that. """ return self.utxo_list.get_spend_list() def pay_onchain_dialog(self, inputs: Sequence[PartialTxInput], outputs: List[PartialTxOutput], *, external_keypairs=None) -> None: # trustedcoin requires this if run_hook('abort_send', self): return is_sweep = bool(external_keypairs) make_tx = lambda fee_est: self.wallet.make_unsigned_transaction( coins=inputs, outputs=outputs, fee=fee_est, is_sweep=is_sweep) output_values = [x.value for x in outputs] if output_values.count('!') > 1: self.show_error(_("More than one output set to spend max")) return if self.config.get('advanced_preview'): self.preview_tx_dialog(make_tx=make_tx, external_keypairs=external_keypairs) return output_value = '!' if '!' in output_values else sum(output_values) d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep) if d.not_enough_funds: self.show_message(_('Not Enough Funds')) return cancelled, is_send, password, tx = d.run() if cancelled: return if is_send: def sign_done(success): if success: self.broadcast_or_show(tx) self.sign_tx_with_password(tx, callback=sign_done, password=password, external_keypairs=external_keypairs) else: self.preview_tx_dialog(make_tx=make_tx, external_keypairs=external_keypairs) def preview_tx_dialog(self, *, make_tx, external_keypairs=None): d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs, window=self) d.show() def broadcast_or_show(self, tx: Transaction): if not tx.is_complete(): self.show_transaction(tx) return if not self.network: self.show_error(_("You can't broadcast a transaction without a live network connection.")) self.show_transaction(tx) return self.broadcast_transaction(tx) @protected def sign_tx(self, tx, *, callback, external_keypairs, password): self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs) def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None): '''Sign the transaction in a separate thread. When done, calls the callback with a success code of True or False. ''' def on_success(result): callback(True) def on_failure(exc_info): self.on_error(exc_info) callback(False) on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success if external_keypairs: # can sign directly task = partial(tx.sign, external_keypairs) else: task = partial(self.wallet.sign_transaction, tx, password) msg = _('Signing transaction...') WaitingDialog(self, msg, task, on_success, on_failure) def broadcast_transaction(self, tx: Transaction): def broadcast_thread(): # non-GUI thread pr = self.payment_request if pr and pr.has_expired(): self.payment_request = None return False, _("Invoice has expired") try: self.network.run_from_another_thread(self.network.broadcast_transaction(tx)) except TxBroadcastError as e: return False, e.get_message_for_gui() except BestEffortRequestFailed as e: return False, repr(e) # success txid = tx.txid() if pr: self.payment_request = None refund_address = self.wallet.get_receiving_address() coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) ack_status, ack_msg = fut.result(timeout=20) self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}") return True, txid # Capture current TL window; override might be removed on return parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin)) def broadcast_done(result): # GUI thread if result: success, msg = result if success: parent.show_message(_('Payment sent.') + '\n' + msg) self.invoice_list.update() else: msg = msg or '' parent.show_error(msg) WaitingDialog(self, _('Broadcasting transaction...'), broadcast_thread, broadcast_done, self.on_error) def mktx_for_open_channel(self, funding_sat): coins = self.get_coins(nonlocal_only=True) make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins, funding_sat=funding_sat, fee_est=fee_est) return make_tx def open_channel(self, connect_str, funding_sat, push_amt): # use ConfirmTxDialog # we need to know the fee before we broadcast, because the txid is required make_tx = self.mktx_for_open_channel(funding_sat) d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False) # disable preview button because the user must not broadcast tx before establishment_flow d.preview_button.setEnabled(False) cancelled, is_send, password, funding_tx = d.run() if not is_send: return if cancelled: return # read funding_sat from tx; converts '!' to int value funding_sat = funding_tx.output_value_for_address(ln_dummy_address()) def task(): return self.wallet.lnworker.open_channel(connect_str=connect_str, funding_tx=funding_tx, funding_sat=funding_sat, push_amt_sat=push_amt, password=password) def on_success(args): chan, funding_tx = args n = chan.constraints.funding_txn_minimum_depth message = '\n'.join([ _('Channel established.'), _('Remote peer ID') + ':' + chan.node_id.hex(), _('This channel will be usable after {} confirmations').format(n) ]) if not funding_tx.is_complete(): message += '\n\n' + _('Please sign and broadcast the funding transaction') self.show_message(message) if not funding_tx.is_complete(): self.show_transaction(funding_tx) def on_failure(exc_info): type_, e, traceback = exc_info self.show_error(_('Could not open channel: {}').format(repr(e))) WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure) def query_choice(self, msg, choices): # Needed by QtHandler for hardware wallets dialog = WindowModalDialog(self.top_level_window()) clayout = ChoicesLayout(msg, choices) vbox = QVBoxLayout(dialog) vbox.addLayout(clayout.layout()) vbox.addLayout(Buttons(OkButton(dialog))) if not dialog.exec_(): return None return clayout.selected_index() def lock_amount(self, b: bool) -> None: self.amount_e.setFrozen(b) self.max_button.setEnabled(not b) def prepare_for_payment_request(self): self.show_send_tab() self.payto_e.is_pr = True for e in [self.payto_e, self.message_e]: e.setFrozen(True) self.lock_amount(True) self.payto_e.setText(_("please wait...")) return True def delete_invoices(self, keys): for key in keys: self.wallet.delete_invoice(key) self.invoice_list.update() def payment_request_ok(self): pr = self.payment_request if not pr: return key = pr.get_id() invoice = self.wallet.get_invoice(key) if invoice and invoice['status'] == PR_PAID: self.show_message("invoice already paid") self.do_clear() self.payment_request = None return self.payto_e.is_pr = True if not pr.has_expired(): self.payto_e.setGreen() else: self.payto_e.setExpired() self.payto_e.setText(pr.get_requestor()) self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point)) self.message_e.setText(pr.get_memo()) # signal to set fee self.amount_e.textEdited.emit("") def payment_request_error(self): pr = self.payment_request if not pr: return self.show_message(pr.error) self.payment_request = None self.do_clear() def on_pr(self, request: 'paymentrequest.PaymentRequest'): self.set_onchain(True) self.payment_request = request if self.payment_request.verify(self.contacts): self.payment_request_ok_signal.emit() else: self.payment_request_error_signal.emit() def parse_lightning_invoice(self, invoice): """Parse ln invoice, and prepare the send tab for it.""" from electrum.lnaddr import lndecode, LnDecodeException try: lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP) except Exception as e: raise LnDecodeException(e) from e pubkey = bh2u(lnaddr.pubkey.serialize()) for k,v in lnaddr.tags: if k == 'd': description = v break else: description = '' self.payto_e.setFrozen(True) self.payto_e.setText(pubkey) self.message_e.setText(description) if lnaddr.amount is not None: self.amount_e.setAmount(lnaddr.amount * COIN) #self.amount_e.textEdited.emit("") self.set_onchain(False) def set_onchain(self, b): self._is_onchain = b self.max_button.setEnabled(b) def pay_to_URI(self, URI): if not URI: return try: out = util.parse_URI(URI, self.on_pr) except InvalidBitcoinURI as e: self.show_error(_("Error parsing URI") + f":\n{e}") return self.show_send_tab() self.payto_URI = out r = out.get('r') sig = out.get('sig') name = out.get('name') if r or (name and sig): self.prepare_for_payment_request() return address = out.get('address') amount = out.get('amount') label = out.get('label') message = out.get('message') # use label as description (not BIP21 compliant) if label and not message: message = label if address: self.payto_e.setText(address) if message: self.message_e.setText(message) if amount: self.amount_e.setAmount(amount) self.amount_e.textEdited.emit("") def do_clear(self): self.max_button.setChecked(False) self.payment_request = None self.payto_URI = None self.payto_e.is_pr = False self.set_onchain(False) for e in [self.payto_e, self.message_e, self.amount_e]: e.setText('') e.setFrozen(False) self.update_status() run_hook('do_clear', self) def set_frozen_state_of_addresses(self, addrs, freeze: bool): self.wallet.set_frozen_state_of_addresses(addrs, freeze) self.address_list.update() self.utxo_list.update() def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool): self.wallet.set_frozen_state_of_coins(utxos, freeze) self.utxo_list.update() def create_list_tab(self, l, toolbar=None): w = QWidget() w.searchable_list = l vbox = QVBoxLayout() w.setLayout(vbox) #vbox.setContentsMargins(0, 0, 0, 0) #vbox.setSpacing(0) if toolbar: vbox.addLayout(toolbar) vbox.addWidget(l) return w def create_addresses_tab(self): from .address_list import AddressList self.address_list = l = AddressList(self) toolbar = l.create_toolbar(self.config) toolbar_shown = bool(self.config.get('show_toolbar_addresses', False)) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def create_utxo_tab(self): from .utxo_list import UTXOList self.utxo_list = UTXOList(self) return self.create_list_tab(self.utxo_list) def create_contacts_tab(self): from .contact_list import ContactList self.contact_list = l = ContactList(self) return self.create_list_tab(l) def remove_address(self, addr): if self.question(_("Do you want to remove {} from your wallet?").format(addr)): self.wallet.delete_address(addr) self.need_update.set() # history, addresses, coins self.clear_receive_tab() def paytomany(self): self.show_send_tab() self.payto_e.paytomany() msg = '\n'.join([ _('Enter a list of outputs in the \'Pay to\' field.'), _('One output per line.'), _('Format: address, amount'), _('You may load a CSV file using the file icon.') ]) self.show_message(msg, title=_('Pay to many')) def payto_contacts(self, labels): paytos = [self.get_contact_payto(label) for label in labels] self.show_send_tab() if len(paytos) == 1: self.payto_e.setText(paytos[0]) self.amount_e.setFocus() else: text = "\n".join([payto + ", 0" for payto in paytos]) self.payto_e.setText(text) self.payto_e.setFocus() def set_contact(self, label, address): if not is_address(address): self.show_error(_('Invalid Address')) self.contact_list.update() # Displays original unchanged value return False self.contacts[address] = ('address', label) self.contact_list.update() self.history_list.update() self.update_completions() return True def delete_contacts(self, labels): if not self.question(_("Remove {} from your list of contacts?") .format(" + ".join(labels))): return for label in labels: self.contacts.pop(label) self.history_list.update() self.contact_list.update() self.update_completions() def show_invoice(self, key): invoice = self.wallet.get_invoice(key) if invoice is None: self.show_error('Cannot find payment request in wallet.') return bip70 = invoice.get('bip70') if bip70: pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70)) pr.verify(self.contacts) self.show_bip70_details(pr) def show_bip70_details(self, pr: 'paymentrequest.PaymentRequest'): key = pr.get_id() d = WindowModalDialog(self, _("BIP70 Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0) grid.addWidget(QLabel(pr.get_requestor()), 0, 1) grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) outputs_str = '\n'.join(map(lambda x: self.format_amount(x.value)+ self.base_unit() + ' @ ' + x.address, pr.get_outputs())) grid.addWidget(QLabel(outputs_str), 1, 1) expires = pr.get_expiration_date() grid.addWidget(QLabel(_("Memo") + ':'), 2, 0) grid.addWidget(QLabel(pr.get_memo()), 2, 1) grid.addWidget(QLabel(_("Signature") + ':'), 3, 0) grid.addWidget(QLabel(pr.get_verify_status()), 3, 1) if expires: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(expires)), 4, 1) vbox.addLayout(grid) def do_export(): name = str(key) + '.bip70' fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70") if not fn: return with open(fn, 'wb') as f: data = f.write(pr.raw) self.show_message(_('Invoice saved as' + ' ' + fn)) exportButton = EnterButton(_('Save'), do_export) # note: "delete" disabled as invoice is saved with a different key in wallet.invoices that we do not have here # def do_delete(): # if self.question(_('Delete invoice?')): # self.wallet.delete_invoice(key) # self.history_list.update() # self.invoice_list.update() # d.close() # deleteButton = EnterButton(_('Delete'), do_delete) vbox.addLayout(Buttons(exportButton, CloseButton(d))) d.exec_() def create_console_tab(self): from .console import Console self.console = console = Console() return console def update_console(self): console = self.console console.history = self.wallet.db.get("qt-console-history", []) console.history_index = len(console.history) console.updateNamespace({ 'wallet': self.wallet, 'network': self.network, 'plugins': self.gui_object.plugins, 'window': self, 'config': self.config, 'electrum': electrum, 'daemon': self.gui_object.daemon, 'util': util, 'bitcoin': bitcoin, }) c = commands.Commands(config=self.config, network=self.network, callback=lambda: self.console.set_json(True)) methods = {} def mkfunc(f, method): return lambda *args, **kwargs: f(method, args, self.password_dialog, **{**kwargs, 'wallet': self.wallet}) for m in dir(c): if m[0]=='_' or m in ['network','wallet','config']: continue methods[m] = mkfunc(c._run, m) console.updateNamespace(methods) def create_status_bar(self): sb = QStatusBar() sb.setFixedHeight(35) self.balance_label = QLabel("Loading wallet...") self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.balance_label.setStyleSheet("""QLabel { padding: 0 }""") sb.addWidget(self.balance_label) self.search_box = QLineEdit() self.search_box.textChanged.connect(self.do_search) self.search_box.hide() sb.addPermanentWidget(self.search_box) self.update_check_button = QPushButton("") self.update_check_button.setFlat(True) self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor)) self.update_check_button.setIcon(read_QIcon("update.png")) self.update_check_button.hide() sb.addPermanentWidget(self.update_check_button) self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog ) sb.addPermanentWidget(self.password_button) sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) ) self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog ) sb.addPermanentWidget(self.seed_button) self.lightning_button = None if self.wallet.has_lightning() and self.network: self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog) self.update_lightning_icon() sb.addPermanentWidget(self.lightning_button) self.status_button = None if self.network: self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog) sb.addPermanentWidget(self.status_button) run_hook('create_status_bar', sb) self.setStatusBar(sb) def create_coincontrol_statusbar(self): self.coincontrol_sb = sb = QStatusBar() sb.setSizeGripEnabled(False) #sb.setFixedHeight(3 * char_width_in_lineedit()) sb.setStyleSheet('QStatusBar::item {border: None;} ' + ColorScheme.GREEN.as_stylesheet(True)) self.coincontrol_label = QLabel() self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse) sb.addWidget(self.coincontrol_label) clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None)) clear_cc_button.setStyleSheet("margin-right: 5px;") sb.addPermanentWidget(clear_cc_button) sb.setVisible(False) return sb def set_coincontrol_msg(self, msg: Optional[str]) -> None: if not msg: self.coincontrol_label.setText("") self.coincontrol_sb.setVisible(False) return self.coincontrol_label.setText(msg) self.coincontrol_sb.setVisible(True) def update_lightning_icon(self): if self.lightning_button is None: return if not self.network.is_lightning_running(): return cur, total = self.network.lngossip.get_sync_progress_estimate() # self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}") progress_percent = 0 progress_str = "??%" if cur is not None and total is not None and total > 0: # note: Progress is rescaled such that 95% is considered "done". # "Real" progress can stay around 98-99% for a long time, which # might needlessly worry users. progress_percent = (1.0 / 0.95 * cur / total) * 100 progress_percent = min(progress_percent, 100) progress_percent = round(progress_percent) progress_str = f"{progress_percent}%" if progress_percent >= 100: self.lightning_button.setMaximumWidth(25) self.lightning_button.setText('') self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced.")) else: self.lightning_button.setMaximumWidth(25 + 4 * char_width_in_lineedit()) self.lightning_button.setText(progress_str) self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n" "Payments are more likely to succeed with a more complete graph.")) def update_lock_icon(self): icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png") self.password_button.setIcon(icon) def update_buttons_on_seed(self): self.seed_button.setVisible(self.wallet.has_seed()) self.password_button.setVisible(self.wallet.may_have_password()) def change_password_dialog(self): from electrum.storage import StorageEncryptionVersion if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD: from .password_dialog import ChangePasswordDialogForHW d = ChangePasswordDialogForHW(self, self.wallet) ok, encrypt_file = d.run() if not ok: return try: hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption() except UserCancelled: return except BaseException as e: self.logger.exception('') self.show_error(repr(e)) return old_password = hw_dev_pw if self.wallet.has_password() else None new_password = hw_dev_pw if encrypt_file else None else: from .password_dialog import ChangePasswordDialogForSW d = ChangePasswordDialogForSW(self, self.wallet) ok, old_password, new_password, encrypt_file = d.run() if not ok: return try: self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file) except InvalidPassword as e: self.show_error(str(e)) return except BaseException: self.logger.exception('Failed to update password') self.show_error(_('Failed to update password')) return msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected') self.show_message(msg, title=_("Success")) self.update_lock_icon() def toggle_search(self): self.search_box.setHidden(not self.search_box.isHidden()) if not self.search_box.isHidden(): self.search_box.setFocus(1) else: self.do_search('') def do_search(self, t): tab = self.tabs.currentWidget() if hasattr(tab, 'searchable_list'): tab.searchable_list.filter(t) def new_contact_dialog(self): d = WindowModalDialog(self, _("New Contact")) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_('New Contact') + ':')) grid = QGridLayout() line1 = QLineEdit() line1.setFixedWidth(32 * char_width_in_lineedit()) line2 = QLineEdit() line2.setFixedWidth(32 * char_width_in_lineedit()) grid.addWidget(QLabel(_("Address")), 1, 0) grid.addWidget(line1, 1, 1) grid.addWidget(QLabel(_("Name")), 2, 0) grid.addWidget(line2, 2, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if d.exec_(): self.set_contact(line2.text(), line1.text()) def disable_lightning(self): warning = _('This will delete your lightning private keys') r = self.question(_('Disable Lightning payments?') + '\n\n' + warning) if not r: return self.wallet.remove_lightning() self.show_warning(_('Lightning keys have been removed. This wallet will be closed')) self.close() def enable_lightning(self): warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.") warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.") r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2) if not r: return self.wallet.init_lightning() self.show_warning(_('Lightning keys have been initialized. This wallet will be closed')) self.close() def show_wallet_info(self): dialog = WindowModalDialog(self, _("Wallet Information")) dialog.setMinimumSize(500, 100) mpk_list = self.wallet.get_master_public_keys() vbox = QVBoxLayout() wallet_type = self.wallet.db.get('wallet_type', '') if self.wallet.is_watching_only(): wallet_type += ' [{}]'.format(_('watching-only')) seed_available = _('True') if self.wallet.has_seed() else _('False') keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()] grid = QGridLayout() basename = os.path.basename(self.wallet.storage.path) grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0) grid.addWidget(QLabel(basename), 0, 1) grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0) grid.addWidget(QLabel(wallet_type), 1, 1) grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0) grid.addWidget(QLabel(self.wallet.txin_type), 2, 1) grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0) grid.addWidget(QLabel(str(seed_available)), 3, 1) if len(keystore_types) <= 1: grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0) ks_type = str(keystore_types[0]) if keystore_types else _('No keystore') grid.addWidget(QLabel(ks_type), 4, 1) # lightning if self.wallet.can_have_lightning(): if self.wallet.has_lightning(): lightning_b = QPushButton(_('Disable')) lightning_b.clicked.connect(dialog.close) lightning_b.clicked.connect(self.disable_lightning) lightning_label = QLabel(_('Enabled')) lightning_b.setDisabled(bool(self.wallet.lnworker.channels)) else: lightning_b = QPushButton(_('Enable')) lightning_b.clicked.connect(dialog.close) lightning_b.clicked.connect(self.enable_lightning) lightning_label = QLabel(_('Disabled')) grid.addWidget(QLabel(_('Lightning')), 5, 0) grid.addWidget(lightning_label, 5, 1) grid.addWidget(lightning_b, 5, 2) vbox.addLayout(grid) if self.wallet.is_deterministic(): mpk_text = ShowQRTextEdit() mpk_text.setMaximumHeight(150) mpk_text.addCopyButton(self.app) def show_mpk(index): mpk_text.setText(mpk_list[index]) mpk_text.repaint() # macOS hack for #4777 # only show the combobox in case multiple accounts are available if len(mpk_list) > 1: # only show the combobox if multiple master keys are defined def label(idx, ks): if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'): return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}' else: return _("keystore") + f' {idx+1}' labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())] on_click = lambda clayout: show_mpk(clayout.selected_index()) labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click) vbox.addLayout(labels_clayout.layout()) else: vbox.addWidget(QLabel(_("Master Public Key"))) show_mpk(0) vbox.addWidget(mpk_text) vbox.addStretch(1) btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog)) vbox.addLayout(btns) dialog.setLayout(vbox) dialog.exec_() def remove_wallet(self): if self.question('\n'.join([ _('Delete wallet file?'), "%s"%self.wallet.storage.path, _('If your wallet contains funds, make sure you have saved its seed.')])): self._delete_wallet() @protected def _delete_wallet(self, password): wallet_path = self.wallet.storage.path basename = os.path.basename(wallet_path) r = self.gui_object.daemon.delete_wallet(wallet_path) self.close() if r: self.show_error(_("Wallet removed: {}").format(basename)) else: self.show_error(_("Wallet file not found: {}").format(basename)) @protected def show_seed_dialog(self, password): if not self.wallet.has_seed(): self.show_message(_('This wallet has no seed')) return keystore = self.wallet.get_keystore() try: seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) except BaseException as e: self.show_error(repr(e)) return from .seed_dialog import SeedDialog d = SeedDialog(self, seed, passphrase) d.exec_() def show_qrcode(self, data, title = _("QR code"), parent=None): if not data: return d = QRDialog(data, parent or self, title) d.exec_() @protected def show_private_key(self, address, password): if not address: return try: pk = self.wallet.export_private_key(address, password) except Exception as e: self.logger.exception('') self.show_message(repr(e)) return xtype = bitcoin.deserialize_privkey(pk)[0] d = WindowModalDialog(self, _("Private key")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Address") + ': ' + address)) vbox.addWidget(QLabel(_("Script type") + ': ' + xtype)) vbox.addWidget(QLabel(_("Private key") + ':')) keys_e = ShowQRTextEdit(text=pk) keys_e.addCopyButton(self.app) vbox.addWidget(keys_e) # if redeem_script: # vbox.addWidget(QLabel(_("Redeem Script") + ':')) # rds_e = ShowQRTextEdit(text=redeem_script) # rds_e.addCopyButton(self.app) # vbox.addWidget(rds_e) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() msg_sign = _("Signing with an address actually means signing with the corresponding " "private key, and verifying with the corresponding public key. The " "address you have entered does not have a unique public key, so these " "operations cannot be performed.") + '\n\n' + \ _('The operation is undefined. Not just in Electrum, but in general.') @protected def do_sign(self, address, message, signature, password): address = address.text().strip() message = message.toPlainText().strip() if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return if not self.wallet.is_mine(address): self.show_message(_('Address not in wallet.')) return txin_type = self.wallet.get_txin_type(address) if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']: self.show_message(_('Cannot sign messages with this type of address:') + \ ' ' + txin_type + '\n\n' + self.msg_sign) return task = partial(self.wallet.sign_message, address, message, password) def show_signed_message(sig): try: signature.setText(base64.b64encode(sig).decode('ascii')) except RuntimeError: # (signature) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=show_signed_message) def do_verify(self, address, message, signature): address = address.text().strip() message = message.toPlainText().strip().encode('utf-8') if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return try: # This can throw on invalid base64 sig = base64.b64decode(str(signature.toPlainText())) verified = ecc.verify_message_with_address(address, sig, message) except Exception as e: verified = False if verified: self.show_message(_("Signature verified")) else: self.show_error(_("Wrong signature")) def sign_verify_message(self, address=''): d = WindowModalDialog(self, _('Sign/verify Message')) d.setMinimumSize(610, 290) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 2, 0) layout.addWidget(address_e, 2, 1) signature_e = QTextEdit() signature_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Signature')), 3, 0) layout.addWidget(signature_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Verify")) b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() @protected def do_decrypt(self, message_e, pubkey_e, encrypted_e, password): if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return cyphertext = encrypted_e.toPlainText() task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password) def setText(text): try: message_e.setText(text.decode('utf-8')) except RuntimeError: # (message_e) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=setText) def do_encrypt(self, message_e, pubkey_e, encrypted_e): message = message_e.toPlainText() message = message.encode('utf-8') try: public_key = ecc.ECPubkey(bfh(pubkey_e.text())) except BaseException as e: self.logger.exception('Invalid Public key') self.show_warning(_('Invalid Public key')) return encrypted = public_key.encrypt_message(message) encrypted_e.setText(encrypted.decode('ascii')) def encrypt_message(self, address=''): d = WindowModalDialog(self, _('Encrypt/decrypt Message')) d.setMinimumSize(610, 490) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) pubkey_e = QLineEdit() if address: pubkey = self.wallet.get_public_key(address) pubkey_e.setText(pubkey) layout.addWidget(QLabel(_('Public key')), 2, 0) layout.addWidget(pubkey_e, 2, 1) encrypted_e = QTextEdit() encrypted_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Encrypted')), 3, 0) layout.addWidget(encrypted_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Encrypt")) b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Decrypt")) b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def password_dialog(self, msg=None, parent=None): from .password_dialog import PasswordDialog parent = parent or self d = PasswordDialog(parent, msg) return d.run() def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']: from electrum.transaction import tx_from_any try: return tx_from_any(data) except BaseException as e: self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e)) return def read_tx_from_qrcode(self): from electrum import qrscanner try: data = qrscanner.scan_barcode(self.config.get_video_device()) except BaseException as e: self.show_error(repr(e)) return if not data: return # if the user scanned a bitcoin URI if str(data).startswith("bitcoin:"): self.pay_to_URI(data) return # else if the user scanned an offline signed tx tx = self.tx_from_text(data) if not tx: return self.show_transaction(tx) def read_tx_from_file(self) -> Optional[Transaction]: fileName = self.getOpenFileName(_("Select your transaction file"), TRANSACTION_FILE_EXTENSION_FILTER_ANY) if not fileName: return try: with open(fileName, "rb") as f: file_content = f.read() # type: Union[str, bytes] except (ValueError, IOError, os.error) as reason: self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found")) return return self.tx_from_text(file_content) def do_process_from_text(self): text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction")) if not text: return tx = self.tx_from_text(text) if tx: self.show_transaction(tx) def do_process_from_file(self): tx = self.read_tx_from_file() if tx: self.show_transaction(tx) def do_process_from_txid(self): from electrum import transaction txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':') if ok and txid: txid = str(txid).strip() try: raw_tx = self.network.run_from_another_thread( self.network.get_transaction(txid, timeout=10)) except UntrustedServerReturnedError as e: self.logger.info(f"Error getting transaction from network: {repr(e)}") self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui()) return except Exception as e: self.show_message(_("Error getting transaction from network") + ":\n" + repr(e)) return else: tx = transaction.Transaction(raw_tx) self.show_transaction(tx) @protected def export_privkeys_dialog(self, password): if self.wallet.is_watching_only(): self.show_message(_("This is a watching-only wallet")) return if isinstance(self.wallet, Multisig_Wallet): self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' + _('It cannot be "backed up" by simply exporting these private keys.')) d = WindowModalDialog(self, _('Private keys')) d.setMinimumSize(980, 300) vbox = QVBoxLayout(d) msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."), _("Exposing a single private key can compromise your entire wallet!"), _("In particular, DO NOT use 'redeem private key' services proposed by third parties.")) vbox.addWidget(QLabel(msg)) e = QTextEdit() e.setReadOnly(True) vbox.addWidget(e) defaultname = 'electrum-private-keys.csv' select_msg = _('Select file to export your private keys to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) b = OkButton(d, _('Export')) b.setEnabled(False) vbox.addLayout(Buttons(CancelButton(d), b)) private_keys = {} addresses = self.wallet.get_addresses() done = False cancelled = False def privkeys_thread(): for addr in addresses: time.sleep(0.1) if done or cancelled: break privkey = self.wallet.export_private_key(addr, password) private_keys[addr] = privkey self.computing_privkeys_signal.emit() if not cancelled: self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.emit() def show_privkeys(): s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items())) e.setText(s) b.setEnabled(True) self.show_privkeys_signal.disconnect() nonlocal done done = True def on_dialog_closed(*args): nonlocal done nonlocal cancelled if not done: cancelled = True self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.disconnect() self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses)))) self.show_privkeys_signal.connect(show_privkeys) d.finished.connect(on_dialog_closed) threading.Thread(target=privkeys_thread).start() if not d.exec_(): done = True return filename = filename_e.text() if not filename: return try: self.do_export_privkeys(filename, private_keys, csv_button.isChecked()) except (IOError, os.error) as reason: txt = "\n".join([ _("Electrum was unable to produce a private key-export."), str(reason) ]) self.show_critical(txt, title=_("Unable to create csv")) except Exception as e: self.show_message(repr(e)) return self.show_message(_("Private keys exported.")) def do_export_privkeys(self, fileName, pklist, is_csv): with open(fileName, "w+") as f: if is_csv: transaction = csv.writer(f) transaction.writerow(["address", "private_key"]) for addr, pk in pklist.items(): transaction.writerow(["%34s"%addr,pk]) else: f.write(json.dumps(pklist, indent = 4)) def do_import_labels(self): def import_labels(path): def _validate(data): return data # TODO def import_labels_assign(data): for key, value in data.items(): self.wallet.set_label(key, value) import_meta(path, _validate, import_labels_assign) def on_import(): self.need_update.set() import_meta_gui(self, _('labels'), import_labels, on_import) def do_export_labels(self): def export_labels(filename): export_meta(self.wallet.labels, filename) export_meta_gui(self, _('labels'), export_labels) def sweep_key_dialog(self): d = WindowModalDialog(self, title=_('Sweep private keys')) d.setMinimumSize(600, 300) vbox = QVBoxLayout(d) hbox_top = QHBoxLayout() hbox_top.addWidget(QLabel(_("Enter private keys:"))) hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) vbox.addLayout(hbox_top) keys_e = ScanQRTextEdit(allow_multi=True) keys_e.setTabChangesFocus(True) vbox.addWidget(keys_e) addresses = self.wallet.get_unused_addresses() if not addresses: try: addresses = self.wallet.get_receiving_addresses() except AttributeError: addresses = self.wallet.get_addresses() h, address_e = address_field(addresses) vbox.addLayout(h) vbox.addStretch(1) button = OkButton(d, _('Sweep')) vbox.addLayout(Buttons(CancelButton(d), button)) button.setEnabled(False) def get_address(): addr = str(address_e.text()).strip() if bitcoin.is_address(addr): return addr def get_pk(*, raise_on_error=False): text = str(keys_e.toPlainText()) return keystore.get_private_keys(text, raise_on_error=raise_on_error) def on_edit(): valid_privkeys = False try: valid_privkeys = get_pk(raise_on_error=True) is not None except Exception as e: button.setToolTip(f'{_("Error")}: {repr(e)}') else: button.setToolTip('') button.setEnabled(get_address() is not None and valid_privkeys) on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet()) keys_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_address) on_address(str(address_e.text())) if not d.exec_(): return # user pressed "sweep" addr = get_address() try: self.wallet.check_address(addr) except InternalAddressCorruption as e: self.show_error(str(e)) raise try: coins, keypairs = sweep_preparations(get_pk(), self.network) except Exception as e: # FIXME too broad... self.show_message(repr(e)) return scriptpubkey = bfh(bitcoin.address_to_script(addr)) outputs = [PartialTxOutput(scriptpubkey=scriptpubkey, value='!')] self.warn_if_watching_only() self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs) def _do_import(self, title, header_layout, func): text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True) if not text: return keys = str(text).split() good_inputs, bad_inputs = func(keys) if good_inputs: msg = '\n'.join(good_inputs[:10]) if len(good_inputs) > 10: msg += '\n...' self.show_message(_("The following addresses were added") + f' ({len(good_inputs)}):\n' + msg) if bad_inputs: msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10]) if len(bad_inputs) > 10: msg += '\n...' self.show_error(_("The following inputs could not be imported") + f' ({len(bad_inputs)}):\n' + msg) self.address_list.update() self.history_list.update() def import_addresses(self): if not self.wallet.can_import_address(): return title, msg = _('Import addresses'), _("Enter addresses")+':' self._do_import(title, msg, self.wallet.import_addresses) @protected def do_import_privkey(self, password): if not self.wallet.can_import_privkey(): return title = _('Import private keys') header_layout = QHBoxLayout() header_layout.addWidget(QLabel(_("Enter private keys")+':')) header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password)) def update_fiat(self): b = self.fx and self.fx.is_enabled() self.fiat_send_e.setVisible(b) self.fiat_receive_e.setVisible(b) self.history_list.update() self.address_list.refresh_headers() self.address_list.update() self.update_status() def settings_dialog(self): from .settings_dialog import SettingsDialog d = SettingsDialog(self, self.config) self.alias_received_signal.connect(d.set_alias_color) d.exec_() self.alias_received_signal.disconnect(d.set_alias_color) if self.fx: self.fx.trigger_update() run_hook('close_settings_dialog') if d.need_restart: self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success')) def closeEvent(self, event): # It seems in some rare cases this closeEvent() is called twice if not self.cleaned_up: self.cleaned_up = True self.clean_up() event.accept() def clean_up(self): self.wallet.thread.stop() if self.network: self.network.unregister_callback(self.on_network) self.config.set_key("is_maximized", self.isMaximized()) if not self.isMaximized(): g = self.geometry() self.wallet.db.put("winpos-qt", [g.left(),g.top(), g.width(),g.height()]) self.wallet.db.put("qt-console-history", self.console.history[-50:]) if self.qr_window: self.qr_window.close() self.close_wallet() self.gui_object.timer.timeout.disconnect(self.timer_actions) self.gui_object.close_window(self) def plugins_dialog(self): self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins')) plugins = self.gui_object.plugins vbox = QVBoxLayout(d) # plugins scroll = QScrollArea() scroll.setEnabled(True) scroll.setWidgetResizable(True) scroll.setMinimumSize(400,250) vbox.addWidget(scroll) w = QWidget() scroll.setWidget(w) w.setMinimumHeight(plugins.count() * 35) grid = QGridLayout() grid.setColumnStretch(0,1) w.setLayout(grid) settings_widgets = {} def enable_settings_widget(p, name, i): widget = settings_widgets.get(name) if not widget and p and p.requires_settings(): widget = settings_widgets[name] = p.settings_widget(d) grid.addWidget(widget, i, 1) if widget: widget.setEnabled(bool(p and p.is_enabled())) def do_toggle(cb, name, i): p = plugins.toggle(name) cb.setChecked(bool(p)) enable_settings_widget(p, name, i) run_hook('init_qt', self.gui_object) for i, descr in enumerate(plugins.descriptions.values()): full_name = descr['__name__'] prefix, _separator, name = full_name.rpartition('.') p = plugins.get(name) if descr.get('registers_keystore'): continue try: cb = QCheckBox(descr['fullname']) plugin_is_loaded = p is not None cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet) or plugin_is_loaded and p.can_user_disable()) cb.setEnabled(cb_enabled) cb.setChecked(plugin_is_loaded and p.is_enabled()) grid.addWidget(cb, i, 0) enable_settings_widget(p, name, i) cb.clicked.connect(partial(do_toggle, cb, name, i)) msg = descr['description'] if descr.get('requires'): msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires'))) grid.addWidget(HelpButton(msg), i, 2) except Exception: self.logger.exception(f"cannot display plugin {name}") grid.setRowStretch(len(plugins.descriptions.values()), 1) vbox.addLayout(Buttons(CloseButton(d))) d.exec_() def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None: total_size = parent_tx.estimated_size() + new_tx.estimated_size() parent_txid = parent_tx.txid() assert parent_txid parent_fee = self.wallet.get_tx_fee(parent_txid) if parent_fee is None: self.show_error(_("Can't CPFP: unknown fee for parent transaction.")) return d = WindowModalDialog(self, _('Child Pays for Parent')) vbox = QVBoxLayout(d) msg = ( "A CPFP is a transaction that sends an unconfirmed output back to " "yourself, with a high fee. The goal is to have miners confirm " "the parent transaction in order to get the fee attached to the " "child transaction.") vbox.addWidget(WWLabel(_(msg))) msg2 = ("The proposed fee is computed using your " "fee/kB settings, applied to the total size of both child and " "parent transactions. After you broadcast a CPFP transaction, " "it is normal to see a new unconfirmed transaction in your history.") vbox.addWidget(WWLabel(_(msg2))) grid = QGridLayout() grid.addWidget(QLabel(_('Total size') + ':'), 0, 0) grid.addWidget(QLabel('%d bytes'% total_size), 0, 1) max_fee = new_tx.output_value() grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0) grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1) output_amount = QLabel('') grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0) grid.addWidget(output_amount, 2, 1) fee_e = BTCAmountEdit(self.get_decimal_point) # FIXME with dyn fees, without estimates, there are all kinds of crashes here combined_fee = QLabel('') combined_feerate = QLabel('') def on_fee_edit(x): fee_for_child = fee_e.get_amount() if fee_for_child is None: return out_amt = max_fee - fee_for_child out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else '' output_amount.setText(out_amt_str) comb_fee = parent_fee + fee_for_child comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else '' combined_fee.setText(comb_fee_str) comb_feerate = comb_fee / total_size * 1000 comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else '' combined_feerate.setText(comb_feerate_str) fee_e.textChanged.connect(on_fee_edit) def get_child_fee_from_total_feerate(fee_per_kb): fee = fee_per_kb * total_size / 1000 - parent_fee fee = min(max_fee, fee) fee = max(total_size, fee) # pay at least 1 sat/byte for combined size return fee suggested_feerate = self.config.fee_per_kb() if suggested_feerate is None: self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''') return fee = get_child_fee_from_total_feerate(suggested_feerate) fee_e.setAmount(fee) grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0) grid.addWidget(fee_e, 3, 1) def on_rate(dyn, pos, fee_rate): fee = get_child_fee_from_total_feerate(fee_rate) fee_e.setAmount(fee) fee_slider = FeeSlider(self, self.config, on_rate) fee_slider.update() grid.addWidget(fee_slider, 4, 1) grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0) grid.addWidget(combined_fee, 5, 1) grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0) grid.addWidget(combined_feerate, 6, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return fee = fee_e.get_amount() if fee is None: return # fee left empty, treat is as "cancel" if fee > max_fee: self.show_error(_('Max fee exceeded')) return new_tx = self.wallet.cpfp(parent_tx, fee) new_tx.set_rbf(True) self.show_transaction(new_tx) def bump_fee_dialog(self, tx: Transaction): txid = tx.txid() assert txid fee = self.wallet.get_tx_fee(txid) if fee is None: self.show_error(_("Can't bump fee: unknown fee for original transaction.")) return tx_label = self.wallet.get_label(txid) tx_size = tx.estimated_size() old_fee_rate = fee / tx_size # sat/vbyte d = WindowModalDialog(self, _('Bump Fee')) vbox = QVBoxLayout(d) vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool."))) vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit())) vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate))) vbox.addWidget(QLabel(_('New Fee rate') + ':')) def on_textedit_rate(): fee_slider.deactivate() feerate_e = FeerateEdit(lambda: 0) feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1)) feerate_e.textEdited.connect(on_textedit_rate) vbox.addWidget(feerate_e) def on_slider_rate(dyn, pos, fee_rate): fee_slider.activate() if fee_rate is not None: feerate_e.setAmount(fee_rate / 1000) fee_slider = FeeSlider(self, self.config, on_slider_rate) fee_slider.deactivate() vbox.addWidget(fee_slider) cb = QCheckBox(_('Final')) vbox.addWidget(cb) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return is_final = cb.isChecked() new_fee_rate = feerate_e.get_amount() try: new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins()) except CannotBumpFee as e: self.show_error(str(e)) return if is_final: new_tx.set_rbf(False) self.show_transaction(new_tx, tx_desc=tx_label) def save_transaction_into_wallet(self, tx: Transaction): win = self.top_level_window() try: if not self.wallet.add_transaction(tx): win.show_error(_("Transaction could not be saved.") + "\n" + _("It conflicts with current history.")) return False except AddTransactionException as e: win.show_error(e) return False else: self.wallet.save_db() # need to update at least: history_list, utxo_list, address_list self.need_update.set() msg = (_("Transaction added to wallet history.") + '\n\n' + _("Note: this is an offline transaction, if you want the network " "to see it, you need to broadcast it.")) win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg) return True
py
1a43a86b66ecb9bb461797e602a5d5158846dc6d
# -*- coding: utf-8 -*- # Generated with bb_schemamigration import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models from bluebottle.utils.model_dispatcher import get_model_mapping MODEL_MAP = get_model_mapping() class Migration(SchemaMigration): depends_on = ( ('orders', '0001_initial'), ) def forwards(self, orm): # Adding model 'Donation' db.create_table(MODEL_MAP['donation']['table'], ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=16, decimal_places=2)), ('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[MODEL_MAP['project']['model']])), ('fundraiser', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[MODEL_MAP['fundraiser']['model']], null=True, blank=True)), ('order', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='donations', null=True, to=orm[MODEL_MAP['order']['model']])), ('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)), ('updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)), ('completed', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('anonymous', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal(MODEL_MAP['donation']['app'], ['Donation']) def backwards(self, orm): # Deleting model 'Donation' db.delete_table(MODEL_MAP['donation']['table']) models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'bb_accounts.timeavailable': { 'Meta': {'ordering': "['type']", 'object_name': 'TimeAvailable'}, 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}) }, u'bb_projects.projectphase': { 'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}), 'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, u'bb_projects.projecttheme': { 'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, MODEL_MAP['donation']['model_lower']: { 'Meta': {'object_name': MODEL_MAP['donation']['class']}, 'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '16', 'decimal_places': '2'}), 'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'fundraiser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['fundraiser']['model']), 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donations'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['order']['model'])}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['project']['model'])}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}) }, MODEL_MAP['fundraiser']['model_lower']: { 'Meta': {'object_name': MODEL_MAP['fundraiser']['class']}, 'amount': ('django.db.models.fields.PositiveIntegerField', [], {}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': "'10'"}), 'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['project']['model'])}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}) }, u'geo.country': { 'Meta': {'ordering': "['name']", 'object_name': 'Country'}, 'alpha2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}), 'alpha3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'oda_recipient': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.SubRegion']"}) }, u'geo.region': { 'Meta': {'ordering': "['name']", 'object_name': 'Region'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}) }, u'geo.subregion': { 'Meta': {'ordering': "['name']", 'object_name': 'SubRegion'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Region']"}) }, MODEL_MAP['order']['model_lower']: { 'Meta': {'object_name': MODEL_MAP['order']['class']}, 'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'status': ('django_fsm.db.fields.fsmfield.FSMField', [], {'default': "'created'", 'max_length': '50'}), 'total': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '16', 'decimal_places': '2'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'}) }, u'taggit.tag': { 'Meta': {'object_name': 'Tag'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) }, u'taggit.taggeditem': { 'Meta': {'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"}) }, MODEL_MAP['project']['model_lower']: { 'Meta': {'ordering': "['title']", 'object_name': MODEL_MAP['project']['class']}, 'amount_asked': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}), 'amount_donated': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}), 'amount_needed': ('bluebottle.bb_projects.fields.MoneyField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}), 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'blank': 'True'}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['utils.Language']", 'null': 'True', 'blank': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organization'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['organization']['model'])}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}), 'pitch': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_projects.ProjectPhase']"}), 'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_projects.ProjectTheme']", 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}) }, MODEL_MAP['user']['model_lower']: { 'Meta': {'object_name': MODEL_MAP['user']['class']}, 'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}), 'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'disable_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}), 'facebook': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'picture': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'share_money': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'share_time_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'skypename': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'time_available': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bb_accounts.TimeAvailable']", 'null': 'True', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'user_type': ('django.db.models.fields.CharField', [], {'default': "'person'", 'max_length': '25'}), 'username': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'why': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}) }, MODEL_MAP['organization']['model_lower']: { 'Meta': {'ordering': "['name']", 'object_name': MODEL_MAP['organization']['class']}, 'account_bank_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'account_bank_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'account_bank_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_bank_country'", 'null': 'True', 'to': u"orm['geo.Country']"}), 'account_bank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'account_bank_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}), 'account_bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11', 'blank': 'True'}), 'account_holder_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'account_holder_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'account_holder_country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'account_holder_country'", 'null': 'True', 'to': u"orm['geo.Country']"}), 'account_holder_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'account_holder_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}), 'account_iban': ('django_iban.fields.IBANField', [], {'max_length': '34', 'blank': 'True'}), 'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'account_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'address_line1': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'address_line2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'country'", 'null': 'True', 'to': u"orm['geo.Country']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'partner_organizations': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), 'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}), 'skype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, u'utils.language': { 'Meta': {'ordering': "['language_name']", 'object_name': 'Language'}, 'code': ('django.db.models.fields.CharField', [], {'max_length': '2'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'native_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = [MODEL_MAP['donation']['app']]
py
1a43a8cdcbbfc1ecd17eefe158bf686392183be3
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.DeliveryConfig import DeliveryConfig class SuccessDeliveryConfig(object): def __init__(self): self._delivery_config = None @property def delivery_config(self): return self._delivery_config @delivery_config.setter def delivery_config(self, value): if isinstance(value, DeliveryConfig): self._delivery_config = value else: self._delivery_config = DeliveryConfig.from_alipay_dict(value) def to_alipay_dict(self): params = dict() if self.delivery_config: if hasattr(self.delivery_config, 'to_alipay_dict'): params['delivery_config'] = self.delivery_config.to_alipay_dict() else: params['delivery_config'] = self.delivery_config return params @staticmethod def from_alipay_dict(d): if not d: return None o = SuccessDeliveryConfig() if 'delivery_config' in d: o.delivery_config = d['delivery_config'] return o
py
1a43a8ce7d5c8d5d2a366adb5c895d94aa649cbc
""" Contains the crucial to the simulation classes like SimEnvironment and SimObject """ from __future__ import annotations import pygame from .component import Component from .component_container import ComponentContainer from .event_system import EventSystem, Event from abc import ABC from typing import Iterable, Set, Optional, Sequence class SimEnvironment(ComponentContainer): """ A container for SimObjects and EnvironmentComponents. """ def __init__(self, sim_objects: Iterable[SimObject] = (), components: Iterable[EnvironmentComponent] = ()): # When subclassing, make sure that the super().__init__() is at the end, # so that _setup() is being called at the correct time # A flag that tells whether the setup() method has been called self._is_set_up = False self.sim_objects: Set[SimObject] = set() # a set of sim_objects # that have to be destroyed at the end of the time step self._to_be_destroyed: Set[SimObject] = set() self.__event_system: EventSystem = EventSystem() for o in sim_objects: self.attach_sim_object(o) super().__init__(components) self._setup() @property def event_system(self) -> EventSystem: """ The environment's event system """ return self.__event_system @property def is_set_up(self) -> bool: """ A flag that says whether the environment has been set up or not """ return self._is_set_up @property def to_be_destroyed_sim_objects(self) -> Set[SimObject]: return self._to_be_destroyed def attach_sim_object(self, sim_object: SimObject): """ Attaches a sim object to the environment. """ sim_object.attach_environment(self) self.sim_objects.add(sim_object) if self._is_set_up: for component in sim_object.components: component.setup() def remove_sim_object(self, sim_object: SimObject): """ Removes the sim_object from the environment """ sim_object.remove_environment() self.sim_objects.remove(sim_object) # overriding a method to connect the component to self def attach_component(self, component: EnvironmentComponent): """ Attaches a component to the environment. """ component.attach_environment(self) super().attach_component(component) if self._is_set_up: component.setup() def remove_component(self, component: EnvironmentComponent): component.remove_environment() super().remove_component(component) def _setup(self): """ Calls setup() on all environment components and sim object components. """ for env_component in self.components: env_component.setup() for sim_object in self.sim_objects: for component in sim_object.components: component.setup() self._is_set_up = True def destroy_after_step(self, sim_object: SimObject): """ Schedule the sim_object to be destroyed at the end of the current step """ self._to_be_destroyed.add(sim_object) def _destroy_marked_sim_objects(self): for o in self._to_be_destroyed: o.destroy() self._to_be_destroyed.clear() def advance(self): """ Advance 1 step forward. """ self.event_system.raise_event(AdvanceTimeStepEvent()) self._destroy_marked_sim_objects() def update(self): """ Called after the physics update and before the environment is rendered """ self.event_system.raise_event(EnvironmentUpdateEvent()) def render(self): """ Renders the current state of the simulation """ self.event_system.raise_event(RenderEvent()) def destroy(self): """ Destroys the environment and all its components and sim_objects """ for sim_object in self.sim_objects.copy(): sim_object.destroy() for component in self.components.copy(): component.destroy() self.event_system.clear_listeners() class SimObject(ComponentContainer): """ A container for SimObject Components. Must have a Transform """ def __init__(self, tag: str = "", components: Iterable[SimObjectComponent] = ()): self._environment: Optional[SimEnvironment] = None self.__tag: str = tag super().__init__(components) self._transform: Optional[Transform] = self.try_get_component(Transform) if (self._transform is None): self._transform = Transform() self.attach_component(self.transform) @property def environment(self) -> SimEnvironment: """ A reference to the environment this sim object is attached to """ return self._environment def attach_environment(self, environment: SimEnvironment): """ Attaches an environment reference to the sim object """ if not isinstance(environment, SimEnvironment): raise TypeError("'environment' must be an instance of 'SimEnvironment'") self._environment = environment def remove_environment(self): """ removes the environment reference from the sim object """ self._environment = None @property def tag(self) -> str: return self.__tag @tag.setter def tag(self, value: str): self.__tag = value @property def transform(self) -> Transform: return self._transform # overriding a method to connect the component to self def attach_component(self, component: SimObjectComponent): """ Attaches component to the object """ component.attach_sim_object(self) super().attach_component(component) if self.environment is not None and self.environment.is_set_up: component.setup() def remove_component(self, component: SimObjectComponent): component.remove_sim_object() super().remove_component(component) def destroy(self): """ used to destroy the simobject Calling this yourself method is not recommended, instead use environment.destroy_after_step() to ensure no errors with referencing destroyed objects """ for c in self.components.copy(): c.destroy() self.components.clear() self.environment.remove_sim_object(self) class EnvironmentComponent(Component, ABC): """ The base class for environment components. """ def __init__(self): # a reference to the environment self._environment: Optional[SimEnvironment] = None super().__init__() @property def environment(self): """ The environment this component is attached to """ return self._environment def attach_environment(self, environment: SimEnvironment): """ Attaches an environment reference to the component """ if not isinstance(environment, SimEnvironment): raise TypeError("'environment' must be an instance of 'SimEnvironment'") self._environment = environment def remove_environment(self): """ removes the environment reference from the component """ self._environment = None def _on_destroy(self): self.environment.remove_component(self) def _after_destroy(self): self.remove_environment() class SimObjectComponent(Component, ABC): """ The base class for SimObject components """ def __init__(self): self._sim_object: Optional[SimObject] = None super().__init__() @property def sim_object(self) -> SimObject: """ A reference to the sim_object that the component is attached to """ return self._sim_object def attach_sim_object(self, sim_object: SimObject): """ Attach a sim_object reference to the component """ if not isinstance(sim_object, SimObject): raise TypeError("'sim_object' must be an instance of 'SimObject'") self._sim_object = sim_object def remove_sim_object(self): """ removes the sim_object reference from the component """ self._sim_object = None def _on_destroy(self): pass def _after_destroy(self): self.sim_object.remove_component(self) self.remove_sim_object() class Transform(SimObjectComponent): """ Holds information about position and rotation of the object. """ # I know this isn't actually a transformation matrix, I'm too dumb for that stuff # It also doesn't have scale lol def __init__(self, position: pygame.Vector2 = None, rotation: float = 0): if(position is None): position = pygame.Vector2() self._position: pygame.Vector2 = position self.rotation: float = rotation super().__init__() @property def position(self) -> pygame.Vector2: return self._position @position.setter def position(self, value: Sequence[float]): self._position.x = value[0] self._position.y = value[1] class RenderEvent(Event): """ An event that's raised by the environment when it renders the current state of the simulation on the screen """ class AdvanceTimeStepEvent(Event): """ An event that's raised when the environment advances 1 step forward. """ class EnvironmentUpdateEvent(Event): """ Raised when the environment updates """
py
1a43a8f9a5db289c43a750d0cecd774e9c234284
# cython: auto_cpdef=True """Python code for writing AVRO files""" # This code is a modified version of the code at # http://svn.apache.org/viewvc/avro/trunk/lang/py/src/avro/ which is under # Apache 2.0 license (http://www.apache.org/licenses/LICENSE-2.0) import json from io import BytesIO from os import urandom, SEEK_SET import bz2 import lzma import zlib from .io.binary_encoder import BinaryEncoder from .io.json_encoder import AvroJSONEncoder from .validation import _validate from .read import HEADER_SCHEMA, SYNC_SIZE, MAGIC, reader from .logical_writers import LOGICAL_WRITERS from .schema import extract_record_type, extract_logical_type, parse_schema from ._write_common import _is_appendable def write_null(encoder, datum, schema, named_schemas, fname): """null is written as zero bytes""" encoder.write_null() def write_boolean(encoder, datum, schema, named_schemas, fname): """A boolean is written as a single byte whose value is either 0 (false) or 1 (true).""" encoder.write_boolean(datum) def write_int(encoder, datum, schema, named_schemas, fname): """int and long values are written using variable-length, zig-zag coding.""" encoder.write_int(datum) def write_long(encoder, datum, schema, named_schemas, fname): """int and long values are written using variable-length, zig-zag coding.""" encoder.write_long(datum) def write_float(encoder, datum, schema, named_schemas, fname): """A float is written as 4 bytes. The float is converted into a 32-bit integer using a method equivalent to Java's floatToIntBits and then encoded in little-endian format.""" encoder.write_float(datum) def write_double(encoder, datum, schema, named_schemas, fname): """A double is written as 8 bytes. The double is converted into a 64-bit integer using a method equivalent to Java's doubleToLongBits and then encoded in little-endian format.""" encoder.write_double(datum) def write_bytes(encoder, datum, schema, named_schemas, fname): """Bytes are encoded as a long followed by that many bytes of data.""" encoder.write_bytes(datum) def write_utf8(encoder, datum, schema, named_schemas, fname): """A string is encoded as a long followed by that many bytes of UTF-8 encoded character data.""" encoder.write_utf8(datum) def write_crc32(encoder, datum): """A 4-byte, big-endian CRC32 checksum""" encoder.write_crc32(datum) def write_fixed(encoder, datum, schema, named_schemas, fname): """Fixed instances are encoded using the number of bytes declared in the schema.""" if len(datum) != schema["size"]: raise ValueError( f"data of length {len(datum)} does not match schema size: {schema}" ) encoder.write_fixed(datum) def write_enum(encoder, datum, schema, named_schemas, fname): """An enum is encoded by a int, representing the zero-based position of the symbol in the schema.""" index = schema["symbols"].index(datum) encoder.write_enum(index) def write_array(encoder, datum, schema, named_schemas, fname): """Arrays are encoded as a series of blocks. Each block consists of a long count value, followed by that many array items. A block with count zero indicates the end of the array. Each item is encoded per the array's item schema. If a block's count is negative, then the count is followed immediately by a long block size, indicating the number of bytes in the block. The actual count in this case is the absolute value of the count written.""" encoder.write_array_start() if len(datum) > 0: encoder.write_item_count(len(datum)) dtype = schema["items"] for item in datum: write_data(encoder, item, dtype, named_schemas, fname) encoder.end_item() encoder.write_array_end() def write_map(encoder, datum, schema, named_schemas, fname): """Maps are encoded as a series of blocks. Each block consists of a long count value, followed by that many key/value pairs. A block with count zero indicates the end of the map. Each item is encoded per the map's value schema. If a block's count is negative, then the count is followed immediately by a long block size, indicating the number of bytes in the block. The actual count in this case is the absolute value of the count written.""" encoder.write_map_start() if len(datum) > 0: encoder.write_item_count(len(datum)) vtype = schema["values"] for key, val in datum.items(): encoder.write_utf8(key) write_data(encoder, val, vtype, named_schemas, fname) encoder.write_map_end() def write_union(encoder, datum, schema, named_schemas, fname): """A union is encoded by first writing a long value indicating the zero-based position within the union of the schema of its value. The value is then encoded per the indicated schema within the union.""" best_match_index = -1 if isinstance(datum, tuple): (name, datum) = datum for index, candidate in enumerate(schema): extracted_type = extract_record_type(candidate) if extracted_type == "record": schema_name = candidate["name"] else: schema_name = extracted_type if name == schema_name: best_match_index = index break if best_match_index == -1: field = f"on field {fname}" if fname else "" msg = ( f"provided union type name {name} not found in schema " + f"{schema} {field}" ) raise ValueError(msg) index = best_match_index else: pytype = type(datum) most_fields = -1 # All of Python's floating point values are doubles, so to # avoid loss of precision, we should always prefer 'double' # if we are forced to choose between float and double. # # If 'double' comes before 'float' in the union, then we'll immediately # choose it, and don't need to worry. But if 'float' comes before # 'double', we don't want to pick it. # # So, if we ever see 'float', we skim through the rest of the options, # just to see if 'double' is a possibility, because we'd prefer it. could_be_float = False for index, candidate in enumerate(schema): if could_be_float: if extract_record_type(candidate) == "double": best_match_index = index break else: # Nothing except "double" is even worth considering. continue if _validate(datum, candidate, named_schemas, raise_errors=False): record_type = extract_record_type(candidate) if record_type == "record": logical_type = extract_logical_type(candidate) if logical_type: prepare = LOGICAL_WRITERS.get(logical_type) if prepare: datum = prepare(datum, candidate) candidate_fields = set(f["name"] for f in candidate["fields"]) datum_fields = set(datum) fields = len(candidate_fields.intersection(datum_fields)) if fields > most_fields: best_match_index = index most_fields = fields elif record_type == "float": best_match_index = index # Continue in the loop, because it's possible that there's # another candidate which has record type 'double' could_be_float = True else: best_match_index = index break if best_match_index == -1: field = f"on field {fname}" if fname else "" raise ValueError( f"{repr(datum)} (type {pytype}) do not match {schema} {field}" ) index = best_match_index # write data # TODO: There should be a way to give just the index encoder.write_index(index, schema[index]) write_data(encoder, datum, schema[index], named_schemas, fname) def write_record(encoder, datum, schema, named_schemas, fname): """A record is encoded by encoding the values of its fields in the order that they are declared. In other words, a record is encoded as just the concatenation of the encodings of its fields. Field values are encoded per their schema.""" for field in schema["fields"]: name = field["name"] if name not in datum and "default" not in field and "null" not in field["type"]: raise ValueError(f"no value and no default for {name}") write_data( encoder, datum.get(name, field.get("default")), field["type"], named_schemas, name, ) WRITERS = { "null": write_null, "boolean": write_boolean, "string": write_utf8, "int": write_int, "long": write_long, "float": write_float, "double": write_double, "bytes": write_bytes, "fixed": write_fixed, "enum": write_enum, "array": write_array, "map": write_map, "union": write_union, "error_union": write_union, "record": write_record, "error": write_record, } def write_data(encoder, datum, schema, named_schemas, fname): """Write a datum of data to output stream. Paramaters ---------- encoder: encoder Type of encoder (e.g. binary or json) datum: object Data to write schema: dict Schemda to use named_schemas: dict Mapping of fullname to schema definition """ record_type = extract_record_type(schema) logical_type = extract_logical_type(schema) fn = WRITERS.get(record_type) if fn: if logical_type: prepare = LOGICAL_WRITERS.get(logical_type) if prepare: datum = prepare(datum, schema) try: return fn(encoder, datum, schema, named_schemas, fname) except TypeError as ex: if fname: raise TypeError(f"{ex} on field {fname}") raise else: return write_data(encoder, datum, named_schemas[record_type], named_schemas, "") def write_header(encoder, metadata, sync_marker): header = { "magic": MAGIC, "meta": {key: value.encode() for key, value in metadata.items()}, "sync": sync_marker, } write_data(encoder, header, HEADER_SCHEMA, {}, "") def null_write_block(encoder, block_bytes, compression_level): """Write block in "null" codec.""" encoder.write_long(len(block_bytes)) encoder._fo.write(block_bytes) def deflate_write_block(encoder, block_bytes, compression_level): """Write block in "deflate" codec.""" # The first two characters and last character are zlib # wrappers around deflate data. if compression_level is not None: data = zlib.compress(block_bytes, compression_level)[2:-1] else: data = zlib.compress(block_bytes)[2:-1] encoder.write_long(len(data)) encoder._fo.write(data) def bzip2_write_block(encoder, block_bytes, compression_level): """Write block in "bzip2" codec.""" data = bz2.compress(block_bytes) encoder.write_long(len(data)) encoder._fo.write(data) def xz_write_block(encoder, block_bytes, compression_level): """Write block in "xz" codec.""" data = lzma.compress(block_bytes) encoder.write_long(len(data)) encoder._fo.write(data) BLOCK_WRITERS = { "null": null_write_block, "deflate": deflate_write_block, "bzip2": bzip2_write_block, "xz": xz_write_block, } def _missing_codec_lib(codec, library): def missing(encoder, block_bytes, compression_level): raise ValueError( f"{codec} codec is supported but you need to install {library}" ) return missing def snappy_write_block(encoder, block_bytes, compression_level): """Write block in "snappy" codec.""" data = snappy.compress(block_bytes) encoder.write_long(len(data) + 4) # for CRC encoder._fo.write(data) encoder.write_crc32(block_bytes) try: import snappy except ImportError: BLOCK_WRITERS["snappy"] = _missing_codec_lib("snappy", "python-snappy") else: BLOCK_WRITERS["snappy"] = snappy_write_block def zstandard_write_block(encoder, block_bytes, compression_level): """Write block in "zstandard" codec.""" data = zstd.ZstdCompressor().compress(block_bytes) encoder.write_long(len(data)) encoder._fo.write(data) try: import zstandard as zstd except ImportError: BLOCK_WRITERS["zstandard"] = _missing_codec_lib("zstandard", "zstandard") else: BLOCK_WRITERS["zstandard"] = zstandard_write_block def lz4_write_block(encoder, block_bytes, compression_level): """Write block in "lz4" codec.""" data = lz4.block.compress(block_bytes) encoder.write_long(len(data)) encoder._fo.write(data) try: import lz4.block except ImportError: BLOCK_WRITERS["lz4"] = _missing_codec_lib("lz4", "lz4") else: BLOCK_WRITERS["lz4"] = lz4_write_block class GenericWriter: def __init__(self, schema, metadata=None, validator=None): self._named_schemas = {} self.schema = parse_schema(schema, self._named_schemas) self.validate_fn = _validate if validator is True else validator self.metadata = metadata or {} if isinstance(schema, dict): schema = { key: value for key, value in schema.items() if key not in ("__fastavro_parsed", "__named_schemas") } elif isinstance(schema, list): schemas = [] for s in schema: if isinstance(s, dict): schemas.append( { key: value for key, value in s.items() if key not in ( "__fastavro_parsed", "__named_schemas", ) } ) else: schemas.append(s) schema = schemas self.metadata["avro.schema"] = json.dumps(schema) class Writer(GenericWriter): def __init__( self, fo, schema, codec="null", sync_interval=1000 * SYNC_SIZE, metadata=None, validator=None, sync_marker=None, compression_level=None, ): GenericWriter.__init__(self, schema, metadata, validator) self.metadata["avro.codec"] = codec if isinstance(fo, BinaryEncoder): self.encoder = fo else: self.encoder = BinaryEncoder(fo) self.io = BinaryEncoder(BytesIO()) self.block_count = 0 self.sync_interval = sync_interval self.compression_level = compression_level if _is_appendable(self.encoder._fo): # Seed to the beginning to read the header self.encoder._fo.seek(0) avro_reader = reader(self.encoder._fo) header = avro_reader._header file_writer_schema = parse_schema(avro_reader.writer_schema) if self.schema != file_writer_schema: raise ValueError( f"Provided schema {self.schema} does not match " + f"file writer_schema {file_writer_schema}" ) codec = avro_reader.metadata.get("avro.codec", "null") self.sync_marker = header["sync"] # Seek to the end of the file self.encoder._fo.seek(0, 2) self.block_writer = BLOCK_WRITERS[codec] else: self.sync_marker = sync_marker or urandom(SYNC_SIZE) try: self.block_writer = BLOCK_WRITERS[codec] except KeyError: raise ValueError(f"unrecognized codec: {codec}") write_header(self.encoder, self.metadata, self.sync_marker) def dump(self): self.encoder.write_long(self.block_count) self.block_writer(self.encoder, self.io._fo.getvalue(), self.compression_level) self.encoder._fo.write(self.sync_marker) self.io._fo.truncate(0) self.io._fo.seek(0, SEEK_SET) self.block_count = 0 def write(self, record): if self.validate_fn: self.validate_fn(record, self.schema, self._named_schemas) write_data(self.io, record, self.schema, self._named_schemas, "") self.block_count += 1 if self.io._fo.tell() >= self.sync_interval: self.dump() def write_block(self, block): # Clear existing block if there are any records pending if self.io._fo.tell() or self.block_count > 0: self.dump() self.encoder.write_long(block.num_records) self.block_writer(self.encoder, block.bytes_.getvalue(), self.compression_level) self.encoder._fo.write(self.sync_marker) def flush(self): if self.io._fo.tell() or self.block_count > 0: self.dump() self.encoder._fo.flush() class JSONWriter(GenericWriter): def __init__( self, fo, schema, codec="null", sync_interval=1000 * SYNC_SIZE, metadata=None, validator=None, sync_marker=None, codec_compression_level=None, ): GenericWriter.__init__(self, schema, metadata, validator) self.encoder = fo self.encoder.configure(self.schema, self._named_schemas) def write(self, record): if self.validate_fn: self.validate_fn(record, self.schema, self._named_schemas) write_data(self.encoder, record, self.schema, self._named_schemas, "") def flush(self): self.encoder.flush() def writer( fo, schema, records, codec="null", sync_interval=1000 * SYNC_SIZE, metadata=None, validator=None, sync_marker=None, codec_compression_level=None, ): """Write records to fo (stream) according to schema Parameters ---------- fo: file-like Output stream schema: dict Writer schema records: iterable Records to write. This is commonly a list of the dictionary representation of the records, but it can be any iterable codec: string, optional Compression codec, can be 'null', 'deflate' or 'snappy' (if installed) sync_interval: int, optional Size of sync interval metadata: dict, optional Header metadata validator: None, True or a function Validator function. If None (the default) - no validation. If True then then fastavro.validation.validate will be used. If it's a function, it should have the same signature as fastavro.writer.validate and raise an exeption on error. sync_marker: bytes, optional A byte string used as the avro sync marker. If not provided, a random byte string will be used. codec_compression_level: int, optional Compression level to use with the specified codec (if the codec supports it) Example:: from fastavro import writer, parse_schema schema = { 'doc': 'A weather reading.', 'name': 'Weather', 'namespace': 'test', 'type': 'record', 'fields': [ {'name': 'station', 'type': 'string'}, {'name': 'time', 'type': 'long'}, {'name': 'temp', 'type': 'int'}, ], } parsed_schema = parse_schema(schema) records = [ {u'station': u'011990-99999', u'temp': 0, u'time': 1433269388}, {u'station': u'011990-99999', u'temp': 22, u'time': 1433270389}, {u'station': u'011990-99999', u'temp': -11, u'time': 1433273379}, {u'station': u'012650-99999', u'temp': 111, u'time': 1433275478}, ] with open('weather.avro', 'wb') as out: writer(out, parsed_schema, records) The `fo` argument is a file-like object so another common example usage would use an `io.BytesIO` object like so:: from io import BytesIO from fastavro import writer fo = BytesIO() writer(fo, schema, records) Given an existing avro file, it's possible to append to it by re-opening the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't able to read some of the existing header information and an error will be raised. For example:: # Write initial records with open('weather.avro', 'wb') as out: writer(out, parsed_schema, records) # Write some more records with open('weather.avro', 'a+b') as out: writer(out, parsed_schema, more_records) """ # Sanity check that records is not a single dictionary (as that is a common # mistake and the exception that gets raised is not helpful) if isinstance(records, dict): raise ValueError('"records" argument should be an iterable, not dict') if isinstance(fo, AvroJSONEncoder): writer_class = JSONWriter else: # Assume a binary IO if an encoder isn't given writer_class = Writer fo = BinaryEncoder(fo) output = writer_class( fo, schema, codec, sync_interval, metadata, validator, sync_marker, codec_compression_level, ) for record in records: output.write(record) output.flush() def schemaless_writer(fo, schema, record): """Write a single record without the schema or header information Parameters ---------- fo: file-like Output file schema: dict Schema record: dict Record to write Example:: parsed_schema = fastavro.parse_schema(schema) with open('file', 'rb') as fp: fastavro.schemaless_writer(fp, parsed_schema, record) Note: The ``schemaless_writer`` can only write a single record. """ named_schemas = {} schema = parse_schema(schema, named_schemas) encoder = BinaryEncoder(fo) write_data(encoder, record, schema, named_schemas, "") encoder.flush()
py
1a43a97f88dd3a7a4155955ea2d66bf98fd05986
""" 격자의 행과 열의 크기를 입력 받아서, 격자의 왼쪽 위에서 오른쪽 아래로 가는 모든 최단 경로 (shortest grid path)를 d, r로 표시하여 보세요. Input 같은 줄에 격자(grid)의 행(세로, row)과 열(가로, col)의 크기가 입력됩니다. Output 격자의 왼쪽 위에서 오른쪽 아래로 가는 모든 최단 경로를 d, r로 표시하여 출력합니다. 출력 순서는 문제의 예와 같게 합니다. Sample Input 1 3 2 Sample Output 1 dddrr ddrdr ddrrd drddr drdrd drrdd rdddr rddrd rdrdd rrddd """ def getGridPath(r, c, p=''): if (len(p) >= row+col): if (r==0 and c==0): print(p) return getGridPath(r-1, c, p+'d') getGridPath(r, c-1, p+'r') a = input().split() row, col = int(a[0]), int(a[1]) getGridPath(row, col)
py
1a43a9832238476cec744e8873cfa95985cf18af
""" _SummaryHistogram_ Histogram module, to be used by the TaskArchiver to store histograms in the summary. Created on Nov 16, 2012 @author: dballest """ from builtins import str from WMCore.DataStructs.WMObject import WMObject class SummaryHistogram(WMObject): """ _SummaryHistogram_ Histogram object, provides familiar CRUD methods which take care of most of the statistical calculations when adding points, this object can also be converted into a dictionary for JSON documents. It knows how to combine with other histograms and create itself from a dictionary provided it has matching structure. This is an interface, the real work is done by the ContinuousSummaryHistogram and DiscreteSummaryHistogram objects """ def __init__(self, title = None, xLabel = None): """ __init__ Initialize the elements in the object. """ # Meta-information about the histogram, it can be changed at any point self.title = title self.xLabel = xLabel # These shouldn't be touched from anything outside the SummaryHistogram object and children classes self.continuous = None self.jsonInternal = None self.data = {} self.average = None self.stdDev = None return def setTitle(self, newTitle): """ _setTitle_ Set the title """ self.title = newTitle return def setHorizontalLabel(self, xLabel): """ _setHorizontalLabel_ Set the label on the x axis """ self.xLabel = xLabel return def addPoint(self, xValue, yLabel): """ _addPoint_ Add a point to the histogram data, a histogram can have many types of y values for the same x if x is continuous otherwise it is only one yLabel. They should be in a similar scale for best results. """ raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation") def toJSON(self): """ _toJSON_ Return a dictionary which is compatible with a JSON object """ if self.continuous is None: raise TypeError("toJSON can't be called on a bare SummaryHistogram object") # Get what the children classes did jsonDict = {} jsonDict['internalData'] = self.jsonInternal or {} # Add the common things jsonDict['title'] = self.title jsonDict['xLabel'] = self.xLabel jsonDict['continuous'] = self.continuous jsonDict['data'] = self.data jsonDict['stdDev'] = self.stdDev jsonDict['average'] = self.average return jsonDict def __add__(self, other): """ __add__ Add two histograms, combine statistics. """ raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation") def __str__(self): """ __str__ Return the str object of the JSON """ return str(self.toJSON())
py
1a43aa96ae2c0441c379b7092d43b2eb0c5ad437
# coding: utf-8 """ DFC DFC is a scalable object-storage based caching system with Amazon and Google Cloud backends. # noqa: E501 OpenAPI spec version: 1.1.0 Contact: [email protected] Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import unittest import openapi_client from openapi_client.models.cluster_statistics import ClusterStatistics # noqa: E501 from openapi_client.rest import ApiException class TestClusterStatistics(unittest.TestCase): """ClusterStatistics unit test stubs""" def setUp(self): pass def tearDown(self): pass def testClusterStatistics(self): """Test ClusterStatistics""" # FIXME: construct object with mandatory attributes with example values # model = openapi_client.models.cluster_statistics.ClusterStatistics() # noqa: E501 pass if __name__ == '__main__': unittest.main()
py
1a43ab844bc4bef920aacbea7297ef9ee489297e
import cv2 import numpy as np from preparation.augmentor import Augmentor from preparation.utils import get_snake_case, get_class_from_path import pandas as pd import os from multiprocessing.pool import ThreadPool as Pool from glob import glob class Processor: def __init__(self, batch_size, width, height): self.batch_size = batch_size self.width = width self.height = height columns = pd.read_csv('data/train_labels/train01.csv').columns[1:] columns = list(map(lambda x: get_snake_case(x), columns)) columns_to_index = {column_name: index for (index, column_name) in enumerate(columns)} columns_to_index.update({'no_tools': 21}) self.columns = columns_to_index def process(self, imgs_paths, augment=True): new_imgs = np.zeros((self.batch_size, self.height, self.width, 3), dtype=np.float32) new_labels = np.zeros((self.batch_size, 22), dtype=np.float32) if not len(imgs_paths): return new_imgs, new_labels for i in range(0, len(imgs_paths)): img = cv2.imread(imgs_paths[i], 1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = Augmentor().augment(img) if augment else img new_imgs[i] = img current_class = get_class_from_path(imgs_paths[i]) new_labels[i][self.columns[current_class]] = 1. new_imgs /= 255 return new_imgs, new_labels def delete_empty_files(self, imgs_paths, folder_path): def delete_empty_file(path): img = cv2.imread(path) shape = img.shape[:2] if not shape[0] or not shape[1]: os.remove(path) with Pool(processes=12) as pool: pool.map(delete_empty_file, imgs_paths) return np.array(glob(folder_path))
py
1a43ab9a2bff6625b72a4b2f3ea87982009fd559
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from ..base.base_manager import BaseManager class ExtensionManager(BaseManager): """ Manage DevOps Extensions Install a new extension within an organization or view existing extensions. Attributes: See BaseManager """ def __init__(self, organization_name="", creds=None): """Inits ExtensionManager as per BaseManager""" super(ExtensionManager, self).__init__(creds, organization_name=organization_name) def create_extension(self, extension_name, publisher_name): """Installs an extension in Azure DevOps if it does not already exist""" extensions = self.list_extensions() extension = next((extension for extension in extensions if (extension.publisher_id == publisher_name) and (extension.extension_id == extension_name)), None) # If the extension wasn't in the installed extensions than we know we need to install it if extension is None: extension = self._extension_management_client.install_extension_by_name(publisher_name, extension_name) return extension def list_extensions(self): """Lists an extensions already installed in Azure DevOps""" return self._extension_management_client.get_installed_extensions()
py
1a43acc50f991cef40a02bf02c66c692c3595994
from django.test import TestCase # Create your tests here. import datetime from django.utils import timezone from catalog.forms import RenewBookForm class RenewBookFormTest(TestCase): def test_renew_form_date_in_past(self): """ Test form is invalid if renewal_date is before today """ date = datetime.date.today() - datetime.timedelta(days=1) form_data = {'renewal_date': date} form = RenewBookForm(data=form_data) self.assertFalse(form.is_valid()) def test_renew_form_date_too_far_in_future(self): """ Test for is invalid if renewal_date more than 4 weeks from today """ date = datetime.date.today() + datetime.timedelta(weeks=4) + datetime.timedelta(days=1) form_data = {'renewal_date': date} form = RenewBookForm(data=form_data) self.assertFalse(form.is_valid()) def test_renew_form_date_today(self): """ Test for is valid if renewal_date is today """ date = datetime.date.today() form_data = {'renewal_date': date} form = RenewBookForm(data=form_data) self.assertTrue(form.is_valid()) def test_renew_form_date_max(self): """ Test form is valid if renewal_date is within 4 weeks """ date = timezone.now() + datetime.timedelta(weeks=4) form_data = {'renewal_date': date} form = RenewBookForm(data=form_data) self.assertTrue(form.is_valid()) def test_renew_form_date_field_label(self): """ Test renewal_date label is "renewal date" """ form = RenewBookForm() self.assertTrue(form.fields['renewal_date'].label == None or form.fields['renewal_date'].label == 'renewal date') def test_renew_form_date_field_help_text(self): """ Test renewal_date help_text is as expected """ form = RenewBookForm() self.assertTrue(form.fields['renewal_date'].help_text,'Enter a date between now and 4 weeks (default 3).')
py
1a43acfbf31c000e7f4dbdffd47649d953c074ca
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict from typing import Tuple import numpy as np import torch from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2 from nnunet.utilities.to_torch import maybe_to_torch, to_cuda from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, \ get_patch_size, default_3D_augmentation_params from nnunet.training.dataloading.dataset_loading import unpack_dataset from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.utilities.nd_softmax import softmax_helper from torch import nn from torch.nn.utils import clip_grad_norm_ from nnunet.training.learning_rate.poly_lr import poly_lr from batchgenerators.utilities.file_and_folder_operations import * try: from apex import amp except ImportError: amp = None class nnUNetTrainerV2(nnUNetTrainer): """ Info for Fabian: same as internal nnUNetTrainerV2_2 """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.max_num_epochs = 1000 self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_moreDA_augmentation( self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory ) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def initialize_network(self): """ - momentum 0.99 - SGD instead of Adam - self.lr_scheduler = None because we do poly_lr - deep supervision = True - i am sure I forgot something here Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though :return: """ if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) # finetune # finetune = True # print('-'*30) # if finetune==True: # saved_model = torch.load(join(self.output_folder, "finetune.model"), map_location=torch.device('cpu')) # exit() # new_state_dict = OrderedDict() # curr_state_dict_keys = list(self.network.state_dict().keys()) # print('-'*30) # print('curr_state_dict_keys: ', curr_state_dict_keys) # print('saved_model['state_dict'].keys(): ', saved_model['state_dict'].keys()) # print('-'*30) # for k, value in saved_model['state_dict'].items(): # key = k # if key not in curr_state_dict_keys: # print(key, "duh***********") # key = key[7:] # new_state_dict[key] = value # self.network.load_state_dict(new_state_dict) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.99, nesterov=True) self.lr_scheduler = None def run_online_evaluation(self, output, target): """ due to deep supervision the return value and the reference are now lists of tensors. We only need the full resolution output because this is what we are interested in in the end. The others are ignored :param output: :param target: :return: """ target = target[0] output = output[0] return super().run_online_evaluation(output, target) def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0): """ We need to wrap this because we need to enforce self.network.do_ds = False for prediction """ ds = self.network.do_ds self.network.do_ds = False ret = super().validate(do_mirroring, use_sliding_window, step_size, save_softmax, use_gaussian, overwrite, validation_folder_name, debug, all_in_gpu, force_separate_z=force_separate_z, interpolation_order=interpolation_order, interpolation_order_z=interpolation_order_z) self.network.do_ds = ds return ret def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = True, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ We need to wrap this because we need to enforce self.network.do_ds = False for prediction """ ds = self.network.do_ds self.network.do_ds = False ret = super().predict_preprocessed_data_return_seg_and_softmax(data, do_mirroring, mirror_axes, use_sliding_window, step_size, use_gaussian, pad_border_mode, pad_kwargs, all_in_gpu, verbose) self.network.do_ds = ds return ret def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): """ gradient clipping improves training stability :param data_generator: :param do_backprop: :param run_online_evaluation: :return: """ data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data) target = to_cuda(target) self.optimizer.zero_grad() output = self.network(data) del data loss = self.loss(output, target) if run_online_evaluation: self.run_online_evaluation(output, target) del target if do_backprop: if not self.fp16 or amp is None or not torch.cuda.is_available(): loss.backward() else: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() _ = clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() return loss.detach().cpu().numpy() def do_split(self): """ we now allow more than 5 splits. IMPORTANT: and fold > 4 will not be a real split but just another random 80:20 split of the data. You cannot run X-fold cross-validation with this code. It will always be a 5-fold CV. Folds > 4 will be independent from each other :return: """ if self.fold == 'all' or self.fold < 5: return super().do_split() else: rnd = np.random.RandomState(seed=12345 + self.fold) keys = np.sort(list(self.dataset.keys())) idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False) idx_val = [i for i in range(len(keys)) if i not in idx_tr] self.dataset_tr = OrderedDict() for i in idx_tr: self.dataset_tr[keys[i]] = self.dataset[keys[i]] self.dataset_val = OrderedDict() for i in idx_val: self.dataset_val[keys[i]] = self.dataset[keys[i]] def setup_DA_params(self): """ - we increase roation angle from [-15, 15] to [-30, 30] - scale range is now (0.7, 1.4), was (0.85, 1.25) - we don't do elastic deformation anymore :return: """ self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1] if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params["scale_range"] = (0.7, 1.4) self.data_aug_params["do_elastic"] = False self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform self.data_aug_params["num_cached_per_thread"] = 2 def maybe_update_lr(self, epoch=None): """ if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1 (maybe_update_lr is called in on_epoch_end which is called before epoch is incremented. herefore we need to do +1 here) :param epoch: :return: """ if epoch is None: ep = self.epoch + 1 else: ep = epoch self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9) self.print_to_log_file("lr:", np.round(self.optimizer.param_groups[0]['lr'], decimals=6)) def on_epoch_end(self): """ overwrite patient-based early stopping. Always run to 1000 epochs :return: """ super().on_epoch_end() continue_training = self.epoch < self.max_num_epochs # it can rarely happen that the momentum of nnUNetTrainerV2 is too high for some dataset. If at epoch 100 the # estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95 if self.epoch == 100: if self.all_val_eval_metrics[-1] == 0: self.optimizer.param_groups[0]["momentum"] = 0.95 self.network.apply(InitWeights_He(1e-2)) self.print_to_log_file("At epoch 100, the mean foreground Dice was 0. This can be caused by a too " "high momentum. High momentum (0.99) is good for datasets where it works, but " "sometimes causes issues such as this one. Momentum has now been reduced to " "0.95 and network weights have been reinitialized") return continue_training def run_training(self): """ if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first continued epoch with self.initial_lr we also need to make sure deep supervision in the network is enabled for training, thus the wrapper :return: """ self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we # want at the start of the training ds = self.network.do_ds self.network.do_ds = True ret = super().run_training() self.network.do_ds = ds return ret
py
1a43ad144cb1e7389d42d0d45fc84630d5fe91a0
import logging import os from datetime import datetime from unittest.mock import Mock import psycopg2 import pytest from scrapy.exceptions import NotConfigured from kingfisher_scrapy.extensions import DatabaseStore, FilesStore from tests import spider_with_crawler database_url = os.getenv('KINGFISHER_COLLECT_DATABASE_URL') skip_test_if = not database_url and ('CI' not in os.environ or 'CI_SKIP' in os.environ) def test_from_crawler_missing_arguments(): spider = spider_with_crawler(crawl_time='2021-05-25T00:00:00') with pytest.raises(NotConfigured) as excinfo: DatabaseStore.from_crawler(spider.crawler) assert str(excinfo.value) == 'DATABASE_URL is not set.' spider.crawler.settings = {'DATABASE_URL': 'test', 'FILES_STORE': None} with pytest.raises(NotConfigured) as excinfo: DatabaseStore.from_crawler(spider.crawler) assert str(excinfo.value) == 'FILES_STORE is not set.' @pytest.mark.skipif(skip_test_if, reason='KINGFISHER_COLLECT_DATABASE_URL must be set') @pytest.mark.parametrize('from_date,default_from_date,date_format', [ (None, None, None), ('2020-01-01', None, 'date'), ('2020-01-01', '2020-01-01', 'date'), ]) def test_spider_opened_first_time(caplog, tmpdir, from_date, default_from_date, date_format): spider = spider_with_crawler(crawl_time='2021-05-25T00:00:00', settings={'DATABASE_URL': database_url, 'FILES_STORE': tmpdir}) spider.from_date = from_date spider.default_from_date = default_from_date if date_format: spider.date_format = spider.VALID_DATE_FORMATS[date_format] extension = DatabaseStore.from_crawler(spider.crawler) with caplog.at_level(logging.INFO): extension.spider_opened(spider) if not from_date: assert [record.message for record in caplog.records][-5:] == [ 'Getting the date from which to resume the crawl from the test table'] connection = psycopg2.connect(database_url) cursor = connection.cursor() try: cursor.execute("SELECT to_regclass('test')") table_exists = cursor.fetchone()[0] assert table_exists == 'test' assert spider.from_date == from_date finally: cursor.close() connection.close() @pytest.mark.skipif(skip_test_if, reason='KINGFISHER_COLLECT_DATABASE_URL must be set') def test_spider_closed_error(caplog, tmpdir): spider = spider_with_crawler(crawl_time='2021-05-25T00:00:00', settings={'DATABASE_URL': database_url, 'FILES_STORE': tmpdir}) extension = DatabaseStore.from_crawler(spider.crawler) with caplog.at_level(logging.INFO): extension.spider_closed(spider, 'closed') assert not caplog.records @pytest.mark.skipif(skip_test_if, reason='KINGFISHER_COLLECT_DATABASE_URL must be set') @pytest.mark.parametrize('data,data_type,sample,compile_releases', [ (b'{"releases": [{"date": "2021-05-26T10:00:00Z"}]}', 'release_package', None, False), (b'{"releases": [{"date": "2021-05-26T10:00:00Z"}]}', 'release_package', 1, False), (b'{"releases": [{"ocid":"1", "date": "2021-05-26T10:00:00Z"}]}', 'release_package', None, True), (b'{"records": [{"compiledRelease": {"date": "2021-05-26T10:00:00Z"}}]}', 'record_package', None, False), (b'{"records": [{"releases": [{"ocid":"1", "date": "2021-05-26T10:00:00Z"}]}]}', 'record_package', None, True), ]) def test_spider_closed(caplog, tmpdir, data, data_type, sample, compile_releases): caplog.set_level(logging.INFO) expected_date = '2021-05-26T10:00:00Z' spider = spider_with_crawler(crawl_time='2021-05-25T00:00:00', settings={'DATABASE_URL': database_url, 'FILES_STORE': tmpdir}) spider.data_type = data_type spider.sample = sample spider.compile_releases = compile_releases extension = DatabaseStore.from_crawler(spider.crawler) files_store_extension = FilesStore.from_crawler(spider.crawler) response = Mock() response.body = data response.request = Mock() response.request.url = 'https://example.com/remote.json' response.request.meta = {'file_name': 'file.json'} item = spider.build_file_from_response(response, file_name='file.json', data_type=data_type) files_store_extension.item_scraped(item, spider) extension.spider_opened(spider) caplog.clear() extension.spider_closed(spider, 'finished') connection = psycopg2.connect(database_url) cursor = connection.cursor() try: cursor.execute("SELECT max(data->>'date') FROM test") max_date = cursor.fetchone()[0] assert max_date == expected_date if compile_releases: if data_type == 'release_package': prefix = 'empty' else: prefix = 'records.item.releases.item' elif data_type == 'release_package': prefix = 'releases.item' else: prefix = 'records.item.compiledRelease' if sample: suffix = '_sample' else: suffix = '' expected_messages = [ f'Reading the {tmpdir}/test{suffix}/20210525_000000 crawl directory with the {prefix} prefix', f'Writing the JSON data to the {tmpdir}/test{suffix}/20210525_000000/data.csv CSV file', 'Replacing the JSON data in the test table', ] if compile_releases: expected_messages.insert(1, 'Creating compiled releases') assert [record.message for record in caplog.records][-5:] == expected_messages finally: cursor.close() connection.close() @pytest.mark.skipif(skip_test_if, reason='KINGFISHER_COLLECT_DATABASE_URL must be set') def test_spider_opened_with_data(caplog, tmpdir): spider = spider_with_crawler(crawl_time='2021-05-25T00:00:00', settings={'DATABASE_URL': database_url, 'FILES_STORE': tmpdir}) extension = DatabaseStore.from_crawler(spider.crawler) connection = psycopg2.connect(database_url) cursor = connection.cursor() try: with caplog.at_level(logging.INFO): extension.spider_opened(spider) assert spider.from_date == datetime(2021, 5, 26, 0, 0) assert [record.message for record in caplog.records][-5:] == [ 'Getting the date from which to resume the crawl from the test table', 'Resuming the crawl from 2021-05-26'] finally: cursor.execute('DROP TABLE test') connection.commit() cursor.close() connection.close()
py
1a43ad55ba98c74c295c8b91ba7be8e27814d4d2
import json f = open("../../config/add_action.txt") processors = [] action_map = {} actions = [] primitive_list = [] primitive_num = 0 parameter_num = 0 primitive_idx = 0 cur_idx = 0 while True: line = f.readline() print(line) if not line: break if line == "\n": continue l = line.split() if l[0] == 'e': action_map["processor_name"] = l[1] # processors.append(l[1]) # action_map[processors[-1]] = [] # action_map[processors[-1]] = {} if l[0] == 'a': actions.append(l[1]) # action_map[processors[-1]].append({}) action_map["action_name"] = l[1] action_map["parameter_num"] = int(l[2]) parameter_num = int(l[2]) action_map["primitives"] = [] action_map["parameter_length"] = [] primitive_num = int(l[3]) # primitive_list = [] cur_idx = 0 elif l[0] == 'p': primitive_map = {} primitive_name = l[1] primitive_map["primitive_name"] = l[1] parameters = [] for i in range(len(l)-2): header_name = l[2+i].split('.')[0] field_name = l[2+i].split('.')[1] parameter_map = {} parameter_map["type"] = header_name parameter_map["value"] = field_name parameters.append(parameter_map) primitive_map["parameters"] = parameters action_map["primitives"].append(primitive_map) elif l[0] == 'l': for i in range(1, parameter_num + 1): action_map["parameter_length"].append(int(l[i])) f.close() print(action_map) print(json.dumps(action_map, indent=3)) filename = "../../config/add_action.json" with open(filename, 'w') as file_obj: json.dump(action_map, file_obj, indent=3)
py
1a43ae295bcb037b01eef26bc14001f656e25b56
# Copyright (c) MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from monai.engines import SupervisedEvaluator from monai.handlers import DecollateBatch, PostProcessing from monai.transforms import Activationsd, AsDiscreted, Compose, CopyItemsd class TestHandlerDecollateBatch(unittest.TestCase): def test_compute(self): data = [ {"image": torch.tensor([[[[2.0], [3.0]]]]), "filename": ["test1"]}, {"image": torch.tensor([[[[6.0], [8.0]]]]), "filename": ["test2"]}, ] handlers = [ DecollateBatch(event="MODEL_COMPLETED"), PostProcessing( transform=Compose( [ Activationsd(keys="pred", sigmoid=True), CopyItemsd(keys="filename", times=1, names="filename_bak"), AsDiscreted(keys="pred", threshold=0.5, to_onehot=2), ] ) ), ] # set up engine, PostProcessing handler works together with postprocessing transforms of engine engine = SupervisedEvaluator( device=torch.device("cpu:0"), val_data_loader=data, epoch_length=2, network=torch.nn.PReLU(), # set decollate=False and execute some postprocessing first, then decollate in handlers postprocessing=lambda x: dict(pred=x["pred"] + 1.0), decollate=False, val_handlers=handlers, ) engine.run() expected = torch.tensor([[[[1.0], [1.0]], [[0.0], [0.0]]]]) for o, e in zip(engine.state.output, expected): torch.testing.assert_allclose(o["pred"], e) filename = o.get("filename_bak") if filename is not None: self.assertEqual(filename, "test2") if __name__ == "__main__": unittest.main()
py
1a43aebe50c13fb00f9ade607db5846e61e38e43
def get_query_set(readpercent): return [ "SELECT a.a1 FROM t.a WHERE a.a2 = <randInt>;", readpercent, "UPDATE t.a SET a3 = <randInt> WHERE a.a2 = <randInt2>;", (100-readpercent), ]
py
1a43af3af4bbdb88c9a4c71df24c1d972be0afee
from flask import Blueprint # from myapp.ext import db blue = Blueprint("app",__name__) def init_blue(app): app.register_blueprint(blue)
py
1a43af75ba16677d5e0d588a10bbe0473d202df6
from __future__ import absolute_import, division, print_function, unicode_literals import braintree from postgres.orm import Model class ExchangeRoute(Model): typname = "exchange_routes" def __bool__(self): return self.error != 'invalidated' __nonzero__ = __bool__ @classmethod def from_id(cls, id): return cls.db.one(""" SELECT r.*::exchange_routes FROM exchange_routes r WHERE id = %(id)s """, locals()) @classmethod def from_network(cls, participant, network): participant_id = participant.id r = cls.db.one(""" SELECT r.*::exchange_routes FROM current_exchange_routes r WHERE participant = %(participant_id)s AND network = %(network)s """, locals()) if r: r.__dict__['participant'] = participant return r @classmethod def from_address(cls, participant, network, address): participant_id = participant.id r = cls.db.one(""" SELECT r.*::exchange_routes FROM exchange_routes r WHERE participant = %(participant_id)s AND network = %(network)s AND address = %(address)s """, locals()) if r: r.__dict__['participant'] = participant return r @classmethod def insert(cls, participant, network, address, error='', fee_cap=None): participant_id = participant.id r = cls.db.one(""" INSERT INTO exchange_routes (participant, network, address, error, fee_cap) VALUES (%(participant_id)s, %(network)s, %(address)s, %(error)s, %(fee_cap)s) RETURNING exchange_routes.*::exchange_routes """, locals()) if network == 'balanced-cc': participant.update_giving_and_tippees() r.__dict__['participant'] = participant return r def invalidate(self): if self.network == 'braintree-cc': braintree.PaymentMethod.delete(self.address) # For Paypal, we remove the record entirely to prevent # an integrity error if the user tries to add the route again if self.network == 'paypal': self.db.run("DELETE FROM exchange_routes WHERE id=%s", (self.id,)) else: self.update_error('invalidated') def update_error(self, new_error, propagate=True): id = self.id old_error = self.error if old_error == 'invalidated': return self.db.run(""" UPDATE exchange_routes SET error = %(new_error)s WHERE id = %(id)s """, locals()) self.set_attributes(error=new_error) # Update the receiving amounts of tippees if requested and necessary if not propagate or self.network != 'balanced-cc': return if self.participant.is_suspicious or bool(new_error) == bool(old_error): return self.participant.update_giving_and_tippees()
py
1a43b1bbe369d4d438e784beeb6f89634732619d
import os TARGET = os.path.abspath(os.getcwd()) for root, dirs, files in os.walk(TARGET): for filename in files: # read file content with open(os.path.join(root, filename)) as f: content = f.read() # replace tag by install path content = content.replace('$((INSTALDIR))', TARGET) # replace file content with open(os.path.join(root, filename), 'w') as f: f.write(content)
py
1a43b5435736b0b8198e0398a1e6064f4717ed64
# -*- coding: utf-8 -*- """Base exchange class""" # ----------------------------------------------------------------------------- __version__ = '1.15.45' # ----------------------------------------------------------------------------- from ccxt.base.errors import ExchangeError from ccxt.base.errors import NotSupported from ccxt.base.errors import AuthenticationError from ccxt.base.errors import DDoSProtection from ccxt.base.errors import RequestTimeout from ccxt.base.errors import ExchangeNotAvailable from ccxt.base.errors import InvalidAddress # ----------------------------------------------------------------------------- from ccxt.base.decimal_to_precision import decimal_to_precision from ccxt.base.decimal_to_precision import DECIMAL_PLACES # ----------------------------------------------------------------------------- __all__ = [ 'Exchange', ] # ----------------------------------------------------------------------------- # Python 2 & 3 import logging import base64 import calendar import collections import datetime from email.utils import parsedate import functools import gzip import hashlib import hmac import io import json import math from numbers import Number import re from requests import Session from requests.utils import default_user_agent from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException # import socket from ssl import SSLError # import sys import time import uuid import zlib from decimal import Decimal # ----------------------------------------------------------------------------- try: import urllib.parse as _urlencode # Python 3 except ImportError: import urllib as _urlencode # Python 2 # ----------------------------------------------------------------------------- try: basestring # Python 3 except NameError: basestring = str # Python 2 # ----------------------------------------------------------------------------- class Exchange(object): """Base exchange class""" id = None version = None # rate limiter settings enableRateLimit = False rateLimit = 2000 # milliseconds = seconds * 1000 timeout = 10000 # milliseconds = seconds * 1000 asyncio_loop = None aiohttp_proxy = None session = None # Session () by default logger = None # logging.getLogger(__name__) by default userAgent = None userAgents = { 'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36', 'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36', } verbose = False markets = None symbols = None fees = { 'trading': { 'fee_loaded': False, 'percentage': True, # subclasses should rarely have to redefine this }, 'funding': { 'fee_loaded': False, 'withdraw': {}, 'deposit': {}, }, } ids = None tickers = None api = None parseJsonResponse = True proxy = '' origin = '*' # CORS origin proxies = None apiKey = '' secret = '' password = '' uid = '' twofa = False marketsById = None markets_by_id = None currencies_by_id = None precision = None limits = None exceptions = None headers = None balance = None orderbooks = None orders = None trades = None currencies = None options = None # Python does not allow to define properties in run-time with setattr requiredCredentials = { 'apiKey': True, 'secret': True, 'uid': False, 'login': False, 'password': False, 'twofa': False, # 2-factor authentication (one-time password key) } # API method metainfo has = { 'publicAPI': True, 'privateAPI': True, 'CORS': False, 'cancelOrder': True, 'cancelOrders': False, 'createDepositAddress': False, 'createOrder': True, 'createMarketOrder': True, 'createLimitOrder': True, 'deposit': False, 'editOrder': 'emulated', 'fetchBalance': True, 'fetchClosedOrders': False, 'fetchCurrencies': False, 'fetchDepositAddress': False, 'fetchFundingFees': False, 'fetchL2OrderBook': True, 'fetchMarkets': True, 'fetchMyTrades': False, 'fetchOHLCV': 'emulated', 'fetchOpenOrders': False, 'fetchOrder': False, 'fetchOrderBook': True, 'fetchOrderBooks': False, 'fetchOrders': False, 'fetchTicker': True, 'fetchTickers': False, 'fetchTrades': True, 'fetchTradingFees': False, 'fetchTradingLimits': False, 'withdraw': False, } precisionMode = DECIMAL_PLACES minFundingAddressLength = 1 # used in check_address substituteCommonCurrencyCodes = True lastRestRequestTimestamp = 0 lastRestPollTimestamp = 0 restRequestQueue = None restPollerLoopIsRunning = False rateLimitTokens = 16 rateLimitMaxTokens = 16 rateLimitUpdateTime = 0 last_http_response = None last_json_response = None last_response_headers = None commonCurrencies = { 'XBT': 'BTC', 'BCC': 'BCH', 'DRK': 'DASH', } def __init__(self, config={}): self.precision = {} if self.precision is None else self.precision self.limits = {} if self.limits is None else self.limits self.exceptions = {} if self.exceptions is None else self.exceptions self.headers = {} if self.headers is None else self.headers self.balance = {} if self.balance is None else self.balance self.orderbooks = {} if self.orderbooks is None else self.orderbooks self.orders = {} if self.orders is None else self.orders self.trades = {} if self.trades is None else self.trades self.currencies = {} if self.currencies is None else self.currencies self.options = {} if self.options is None else self.options # Python does not allow to define properties in run-time with setattr self.decimalToPrecision = self.decimal_to_precision = decimal_to_precision # version = '.'.join(map(str, sys.version_info[:3])) # self.userAgent = { # 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version # } self.userAgent = default_user_agent() settings = self.deep_extend(self.describe(), config) for key in settings: if hasattr(self, key) and isinstance(getattr(self, key), dict): setattr(self, key, self.deep_extend(getattr(self, key), settings[key])) else: setattr(self, key, settings[key]) if self.api: self.define_rest_api(self.api, 'request') if self.markets: self.set_markets(self.markets) # format camel case for attr in dir(self): if attr[0] != '_'and attr[-1] != '_' and '_' in attr: conv = attr.split('_') camel_case = conv[0] + ''.join(i[0].upper() + i[1:] for i in conv[1:]) setattr(self, camel_case, getattr(self, attr)) self.tokenBucket = self.extend({ 'refillRate': 1.0 / self.rateLimit, 'delay': 1.0, 'capacity': 1.0, 'defaultCost': 1.0, }, getattr(self, 'tokenBucket') if hasattr(self, 'tokenBucket') else {}) self.session = self.session if self.session else Session() self.logger = self.logger if self.logger else logging.getLogger(__name__) def __del__(self): if self.session: self.session.close() def describe(self): return {} def define_rest_api(self, api, method_name, options={}): delimiters = re.compile('[^a-zA-Z0-9]') for api_type, methods in api.items(): for http_method, urls in methods.items(): for url in urls: url = url.strip() split_path = delimiters.split(url) uppercase_method = http_method.upper() lowercase_method = http_method.lower() camelcase_method = lowercase_method.capitalize() camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path]) lowercase_path = [x.strip().lower() for x in split_path] underscore_suffix = '_'.join([k for k in lowercase_path if len(k)]) camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix) underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower() if 'suffixes' in options: if 'camelcase' in options['suffixes']: camelcase += options['suffixes']['camelcase'] if 'underscore' in options['suffixes']: underscore += options['suffixes']['underscore'] partial = functools.partial(getattr(self, method_name), url, api_type, uppercase_method) setattr(self, camelcase, partial) setattr(self, underscore, partial) def raise_error(self, exception_type, url=None, method=None, error=None, details=None): if error: error = str(error) output = ' '.join([self.id] + [var for var in (url, method, error, details) if var is not None]) raise exception_type(output) def throttle(self): now = float(self.milliseconds()) elapsed = now - self.lastRestRequestTimestamp if elapsed < self.rateLimit: delay = self.rateLimit - elapsed time.sleep(delay / 1000.0) def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): """A better wrapper over request for deferred signing""" if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body']) def request(self, path, api='public', method='GET', params={}, headers=None, body=None): return self.fetch2(path, api, method, params, headers, body) @staticmethod def gzip_deflate(response, text): encoding = response.info().get('Content-Encoding') if encoding in ('gzip', 'x-gzip', 'deflate'): if encoding == 'deflate': return zlib.decompress(text, -zlib.MAX_WBITS) else: return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read() return text def handle_errors(self, code, reason, url, method, headers, body): pass def prepare_request_headers(self, headers=None): headers = headers or {} headers.update(self.headers) if self.userAgent: if type(self.userAgent) is str: headers.update({'User-Agent': self.userAgent}) elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent): headers.update(self.userAgent) if self.proxy: headers.update({'Origin': self.origin}) headers.update({'Accept-Encoding': 'gzip, deflate'}) return headers def fetch(self, url, method='GET', headers=None, body=None): """Perform a HTTP request and return decoded JSON data""" request_headers = self.prepare_request_headers(headers) url = self.proxy + url if self.verbose: print("\nRequest:", method, url, request_headers, body) self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body) if body: body = body.encode() self.session.cookies.clear() response = None try: response = self.session.request( method, url, data=body, headers=request_headers, timeout=int(self.timeout / 1000), proxies=self.proxies ) self.last_http_response = response.text self.last_response_headers = response.headers if self.verbose: print("\nResponse:", method, url, str(response.status_code), str(response.headers), self.last_http_response) self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status_code, response.headers, self.last_http_response) response.raise_for_status() except Timeout as e: self.raise_error(RequestTimeout, method, url, e) except TooManyRedirects as e: self.raise_error(ExchangeError, url, method, e) except SSLError as e: self.raise_error(ExchangeError, url, method, e) except HTTPError as e: self.handle_errors(response.status_code, response.reason, url, method, self.last_response_headers, self.last_http_response) self.handle_rest_errors(e, response.status_code, self.last_http_response, url, method) self.raise_error(ExchangeError, url, method, e, self.last_http_response) except RequestException as e: # base exception class self.raise_error(ExchangeError, url, method, e, self.last_http_response) self.handle_errors(response.status_code, response.reason, url, method, None, self.last_http_response) return self.handle_rest_response(self.last_http_response, url, method, headers, body) def handle_rest_errors(self, exception, http_status_code, response, url, method='GET'): error = None if http_status_code in [418, 429]: error = DDoSProtection elif http_status_code in [404, 409, 500, 501, 502, 520, 521, 522, 525]: error = ExchangeNotAvailable elif http_status_code in [422]: error = ExchangeError elif http_status_code in [400, 403, 405, 503, 530]: # special case to detect ddos protection error = ExchangeNotAvailable if response: ddos_protection = re.search('(cloudflare|incapsula)', response, flags=re.IGNORECASE) if ddos_protection: error = DDoSProtection elif http_status_code in [408, 504]: error = RequestTimeout elif http_status_code in [401, 511]: error = AuthenticationError if error: self.raise_error(error, url, method, exception if exception else http_status_code, response) def handle_rest_response(self, response, url, method='GET', headers=None, body=None): try: if self.parseJsonResponse: last_json_response = json.loads(response) if len(response) > 1 else None self.last_json_response = last_json_response return last_json_response else: return response except ValueError as e: # ValueError == JsonDecodeError ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE) exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE) if ddos_protection: self.raise_error(DDoSProtection, method, url, None, response) if exchange_not_available: message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect' self.raise_error(ExchangeNotAvailable, method, url, None, message) self.raise_error(ExchangeError, method, url, e, response) @staticmethod def safe_float(dictionary, key, default_value=None): value = default_value try: if isinstance(dictionary, list) and isinstance(key, int) and len(dictionary) > key: value = float(dictionary[key]) else: value = float(dictionary[key]) if (key is not None) and (key in dictionary) and (dictionary[key] is not None) else default_value except ValueError as e: value = default_value return value @staticmethod def safe_string(dictionary, key, default_value=None): return str(dictionary[key]) if key is not None and (key in dictionary) and dictionary[key] is not None else default_value @staticmethod def safe_integer(dictionary, key, default_value=None): if key is None or (key not in dictionary): return default_value value = dictionary[key] if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()): return int(value) return default_value @staticmethod def safe_value(dictionary, key, default_value=None): return dictionary[key] if key is not None and (key in dictionary) and dictionary[key] is not None else default_value @staticmethod def truncate(num, precision=0): if precision > 0: decimal_precision = math.pow(10, precision) return math.trunc(num * decimal_precision) / decimal_precision return int(Exchange.truncate_to_string(num, precision)) @staticmethod def truncate_to_string(num, precision=0): if precision > 0: parts = ('%f' % Decimal(num)).split('.') decimal_digits = parts[1][:precision].rstrip('0') decimal_digits = decimal_digits if len(decimal_digits) else '0' return parts[0] + '.' + decimal_digits return ('%d' % num) @staticmethod def uuid(): return str(uuid.uuid4()) @staticmethod def capitalize(string): # first character only, rest characters unchanged # the native pythonic .capitalize() method lowercases all other characters # which is an unwanted behaviour, therefore we use this custom implementation # check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize()) if len(string) > 1: return "%s%s" % (string[0].upper(), string[1:]) return string.upper() @staticmethod def keysort(dictionary): return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0])) @staticmethod def extend(*args): if args is not None: result = None if type(args[0]) is collections.OrderedDict: result = collections.OrderedDict() else: result = {} for arg in args: result.update(arg) return result return {} @staticmethod def deep_extend(*args): result = None for arg in args: if isinstance(arg, dict): if not isinstance(result, dict): result = {} for key in arg: result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key]) else: result = arg return result @staticmethod def filter_by(array, key, value=None): if value: grouped = Exchange.group_by(array, key) if value in grouped: return grouped[value] return [] return array @staticmethod def filterBy(self, array, key, value=None): return Exchange.filter_by(array, key, value) @staticmethod def group_by(array, key): result = {} array = Exchange.to_array(array) array = [entry for entry in array if (key in entry) and (entry[key] is not None)] for entry in array: if entry[key] not in result: result[entry[key]] = [] result[entry[key]].append(entry) return result @staticmethod def groupBy(array, key): return Exchange.group_by(array, key) @staticmethod def index_by(array, key): result = {} if type(array) is dict: array = Exchange.keysort(array).values() for element in array: if (key in element) and (element[key] is not None): k = element[key] result[k] = element return result @staticmethod def sort_by(array, key, descending=False): return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending) @staticmethod def array_concat(a, b): return a + b @staticmethod def in_array(needle, haystack): return needle in haystack @staticmethod def is_empty(object): return not object @staticmethod def extract_params(string): return re.findall(r'{([\w-]+)}', string) @staticmethod def implode_params(string, params): for key in params: string = string.replace('{' + key + '}', str(params[key])) return string @staticmethod def url(path, params={}): result = Exchange.implode_params(path, params) query = Exchange.omit(params, Exchange.extract_params(path)) if query: result += '?' + _urlencode.urlencode(query) return result @staticmethod def urlencode(params={}): if (type(params) is dict) or isinstance(params, collections.OrderedDict): return _urlencode.urlencode(params) return params @staticmethod def rawencode(params={}): return _urlencode.unquote(Exchange.urlencode(params)) @staticmethod def encode_uri_component(uri): return _urlencode.quote(uri, safe="~()*!.'") @staticmethod def omit(d, *args): result = d.copy() for arg in args: if type(arg) is list: for key in arg: if key in result: del result[key] else: if arg in result: del result[arg] return result @staticmethod def unique(array): return list(set(array)) @staticmethod def pluck(array, key): return [ element[key] for element in array if (key in element) and (element[key] is not None) ] @staticmethod def sum(*args): return sum([arg for arg in args if isinstance(arg, (float, int))]) @staticmethod def ordered(array): return collections.OrderedDict(array) @staticmethod def aggregate(bidasks): ordered = Exchange.ordered({}) for [price, volume] in bidasks: if volume > 0: ordered[price] = (ordered[price] if price in ordered else 0) + volume result = [] items = list(ordered.items()) for price, volume in items: result.append([price, volume]) return result @staticmethod def sec(): return Exchange.seconds() @staticmethod def msec(): return Exchange.milliseconds() @staticmethod def usec(): return Exchange.microseconds() @staticmethod def seconds(): return int(time.time()) @staticmethod def milliseconds(): return int(time.time() * 1000) @staticmethod def microseconds(): return int(time.time() * 1000000) @staticmethod def iso8601(timestamp=None): if timestamp is None: return timestamp if not isinstance(timestamp, int): return None if int(timestamp) < 0: return None try: utc = datetime.datetime.utcfromtimestamp(timestamp // 1000) return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z' except (TypeError, OverflowError, OSError): return None @staticmethod def dmy(timestamp, infix='-'): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y') @staticmethod def ymd(timestamp, infix='-'): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d') @staticmethod def ymdhms(timestamp, infix=' '): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S') @staticmethod def parse_date(timestamp=None): if timestamp is None: return timestamp if not isinstance(timestamp, str): return None if 'GMT' in timestamp: try: string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z' dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ") return calendar.timegm(dt.utctimetuple()) * 1000 except (TypeError, OverflowError, OSError): return None else: return Exchange.parse8601(timestamp) @staticmethod def parse8601(timestamp=None): if timestamp is None: return timestamp yyyy = '([0-9]{4})-?' mm = '([0-9]{2})-?' dd = '([0-9]{2})(?:T|[\\s])?' h = '([0-9]{2}):?' m = '([0-9]{2}):?' s = '([0-9]{2})' ms = '(\\.[0-9]{1,3})?' tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?' regex = r'' + yyyy + mm + dd + h + m + s + ms + tz try: match = re.search(regex, timestamp, re.IGNORECASE) if match is None: return None yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups() ms = ms or '.000' msint = int(ms[1:]) sign = sign or '' sign = int(sign + '1') hours = int(hours or 0) * sign minutes = int(minutes or 0) * sign offset = datetime.timedelta(hours=hours, minutes=minutes) string = yyyy + mm + dd + h + m + s + ms + 'Z' dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ") dt = dt + offset return calendar.timegm(dt.utctimetuple()) * 1000 + msint except (TypeError, OverflowError, OSError, ValueError): return None @staticmethod def hash(request, algorithm='md5', digest='hex'): h = hashlib.new(algorithm, request) if digest == 'hex': return h.hexdigest() elif digest == 'base64': return base64.b64encode(h.digest()) return h.digest() @staticmethod def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'): h = hmac.new(secret, request, algorithm) if digest == 'hex': return h.hexdigest() elif digest == 'base64': return base64.b64encode(h.digest()) return h.digest() @staticmethod def binary_concat(*args): result = bytes() for arg in args: result = result + arg return result @staticmethod def binary_to_string(s): return s.decode('ascii') @staticmethod def base64urlencode(s): return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '') @staticmethod def jwt(request, secret, algorithm=hashlib.sha256, alg='HS256'): header = Exchange.encode(Exchange.json({ 'alg': alg, 'typ': 'JWT', })) encodedHeader = Exchange.base64urlencode(header) encodedData = Exchange.base64urlencode(Exchange.encode(Exchange.json(request))) token = encodedHeader + '.' + encodedData hmac = Exchange.hmac(Exchange.encode(token), Exchange.encode(secret), algorithm, 'binary') signature = Exchange.base64urlencode(hmac) return token + '.' + signature @staticmethod def unjson(input): return json.loads(input) @staticmethod def json(data, params=None): return json.dumps(data, separators=(',', ':')) @staticmethod def encode(string): return string.encode() @staticmethod def decode(string): return string.decode() @staticmethod def to_array(value): return list(value.values()) if type(value) is dict else value def nonce(self): return Exchange.seconds() def check_required_credentials(self): keys = list(self.requiredCredentials.keys()) for key in keys: if self.requiredCredentials[key] and not getattr(self, key): self.raise_error(AuthenticationError, details='requires `' + key + '`') def check_address(self, address): """Checks an address is not the same character repeated or an empty sequence""" if address is None: self.raise_error(InvalidAddress, details='address is None') if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address: self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"') return address def account(self): return { 'free': 0.0, 'used': 0.0, 'total': 0.0, } def common_currency_code(self, currency): if not self.substituteCommonCurrencyCodes: return currency return self.safe_string(self.commonCurrencies, currency, currency) def currency_id(self, commonCode): currencyIds = {v: k for k, v in self.commonCurrencies.items()} return self.safe_string(currencyIds, commonCode, commonCode) def precision_from_string(self, string): parts = re.sub(r'0+$', '', string).split('.') return len(parts[1]) if len(parts) > 1 else 0 def cost_to_precision(self, symbol, cost): return ('{:.' + str(self.markets[symbol]['precision']['price']) + 'f}').format(float(cost)) def price_to_precision(self, symbol, price): return ('{:.' + str(self.markets[symbol]['precision']['price']) + 'f}').format(float(price)) def amount_to_precision(self, symbol, amount): return self.truncate(amount, self.markets[symbol]['precision']['amount']) def amount_to_string(self, symbol, amount): return self.truncate_to_string(amount, self.markets[symbol]['precision']['amount']) def amount_to_lots(self, symbol, amount): lot = self.markets[symbol]['lot'] return self.amount_to_precision(symbol, math.floor(amount / lot) * lot) def fee_to_precision(self, symbol, fee): return ('{:.' + str(self.markets[symbol]['precision']['price']) + 'f}').format(float(fee)) def set_markets(self, markets, currencies=None): values = list(markets.values()) if type(markets) is dict else markets for i in range(0, len(values)): values[i] = self.extend( self.fees['trading'], {'precision': self.precision, 'limits': self.limits}, values[i] ) self.markets = self.index_by(values, 'symbol') self.markets_by_id = self.index_by(values, 'id') self.marketsById = self.markets_by_id self.symbols = sorted(list(self.markets.keys())) self.ids = sorted(list(self.markets_by_id.keys())) if currencies: self.currencies = self.deep_extend(currencies, self.currencies) else: base_currencies = [{ 'id': market['baseId'] if 'baseId' in market else market['base'], 'numericId': market['baseNumericId'] if 'baseNumericId' in market else None, 'code': market['base'], 'precision': ( market['precision']['base'] if 'base' in market['precision'] else ( market['precision']['amount'] if 'amount' in market['precision'] else None ) ) if 'precision' in market else 8, } for market in values if 'base' in market] quote_currencies = [{ 'id': market['quoteId'] if 'quoteId' in market else market['quote'], 'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None, 'code': market['quote'], 'precision': ( market['precision']['quote'] if 'quote' in market['precision'] else ( market['precision']['price'] if 'price' in market['precision'] else None ) ) if 'precision' in market else 8, } for market in values if 'quote' in market] currencies = self.sort_by(base_currencies + quote_currencies, 'code') self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies) self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id') return self.markets def load_markets(self, reload=False): if not reload: if self.markets: if not self.markets_by_id: return self.set_markets(self.markets) return self.markets markets = self.fetch_markets() currencies = None if self.has['fetchCurrencies']: currencies = self.fetch_currencies() return self.set_markets(markets, currencies) def populate_fees(self): if not (hasattr(self, 'markets') or hasattr(self, 'currencies')): return for currency, data in self.currencies.items(): # try load withdrawal fees from currencies if 'fee' in data and data['fee'] is not None: self.fees['funding']['withdraw'][currency] = data['fee'] self.fees['funding']['fee_loaded'] = True # find a way to populate trading fees from markets def load_fees(self): self.load_markets() self.populate_fees() if not (self.has['fetchTradingFees'] or self.has['fetchFundingFees']): return self.fees fetched_fees = self.fetch_fees() if fetched_fees['funding']: self.fees['funding']['fee_loaded'] = True if fetched_fees['trading']: self.fees['trading']['fee_loaded'] = True self.fees = self.deep_extend(self.fees, fetched_fees) return self.fees def fetch_markets(self): return self.to_array(self.markets) def fetch_fees(self): trading = {} funding = {} try: trading = self.fetch_trading_fees() except AuthenticationError: pass except AttributeError: pass try: funding = self.fetch_funding_fees() except AuthenticationError: pass except AttributeError: pass return { 'trading': trading, 'funding': funding, } def create_order(self, symbol, type, side, amount, price=None, params={}): self.raise_error(NotSupported, details='create_order() not implemented yet') def cancel_order(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='cancel_order() not implemented yet') def fetch_bids_asks(self, symbols=None, params={}): self.raise_error(NotSupported, details='API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now') def fetch_tickers(self, symbols=None, params={}): self.raise_error(NotSupported, details='API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now') def fetch_order_status(self, id, market=None): order = self.fetch_order(id) return order['status'] def purge_cached_orders(self, before): orders = self.to_array(self.orders) orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)] self.orders = self.index_by(orders, 'id') return self.orders def fetch_order(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='fetch_order() is not implemented yet') def fetch_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_orders() is not implemented yet') def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_open_orders() is not implemented yet') def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_closed_orders() is not implemented yet') def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_my_trades() is not implemented yet') def fetch_order_trades(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='fetch_order_trades() is not implemented yet') def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None): return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None): ohlcvs = self.to_array(ohlcvs) num_ohlcvs = len(ohlcvs) result = [] i = 0 while i < num_ohlcvs: if limit and (len(result) >= limit): break ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit) i = i + 1 if since and (ohlcv[0] < since): continue result.append(ohlcv) return result def parse_bid_ask(self, bidask, price_key=0, amount_key=0): return [float(bidask[price_key]), float(bidask[amount_key])] def parse_bids_asks(self, bidasks, price_key=0, amount_key=1): result = [] if len(bidasks): if type(bidasks[0]) is list: for bidask in bidasks: if bidask[price_key] and bidask[amount_key]: result.append(self.parse_bid_ask(bidask, price_key, amount_key)) elif type(bidasks[0]) is dict: for bidask in bidasks: if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]): result.append(self.parse_bid_ask(bidask, price_key, amount_key)) else: self.raise_error(ExchangeError, details='unrecognized bidask format: ' + str(bidasks[0])) return result def fetch_l2_order_book(self, symbol, limit=None, params={}): orderbook = self.fetch_order_book(symbol, limit, params) return self.extend(orderbook, { 'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True), 'asks': self.sort_by(self.aggregate(orderbook['asks']), 0), }) def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1): return { 'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True), 'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0), 'timestamp': timestamp, 'datetime': self.iso8601(timestamp) if timestamp is not None else None, 'nonce': None, } def parse_balance(self, balance): currencies = self.omit(balance, 'info').keys() for account in ['free', 'used', 'total']: balance[account] = {} for currency in currencies: balance[account][currency] = balance[currency][account] return balance def fetch_partial_balance(self, part, params={}): balance = self.fetch_balance(params) return balance[part] def fetch_free_balance(self, params={}): return self.fetch_partial_balance('free', params) def fetch_used_balance(self, params={}): return self.fetch_partial_balance('used', params) def fetch_total_balance(self, params={}): return self.fetch_partial_balance('total', params) def load_trading_limits(self, symbols=None, reload=False, params={}): if self.has['fetchTradingLimits']: if reload or not('limitsLoaded' in list(self.options.keys())): response = self.fetch_trading_limits(symbols) limits = response['limits'] keys = list(limits.keys()) for i in range(0, len(keys)): symbol = keys[i] self.markets[symbol] = self.deep_extend(self.markets[symbol], { 'limits': limits[symbol], }) self.options['limitsLoaded'] = self.milliseconds() return self.markets def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): if not self.has['fetchTrades']: self.raise_error(NotSupported, details='fetch_ohlcv() not implemented yet') self.load_markets() trades = self.fetch_trades(symbol, since, limit, params) return self.build_ohlcv(trades, timeframe, since, limit) def convert_trading_view_to_ohlcv(self, ohlcvs): result = [] for i in range(0, len(ohlcvs['t'])): result.append([ ohlcvs['t'][i] * 1000, ohlcvs['o'][i], ohlcvs['h'][i], ohlcvs['l'][i], ohlcvs['c'][i], ohlcvs['v'][i], ]) return result def convert_ohlcv_to_trading_view(self, ohlcvs): result = { 't': [], 'o': [], 'h': [], 'l': [], 'c': [], 'v': [], } for i in range(0, len(ohlcvs)): result['t'].append(int(ohlcvs[i][0] / 1000)) result['o'].append(ohlcvs[i][1]) result['h'].append(ohlcvs[i][2]) result['l'].append(ohlcvs[i][3]) result['c'].append(ohlcvs[i][4]) result['v'].append(ohlcvs[i][5]) return result def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None): ms = self.parse_timeframe(timeframe) * 1000 ohlcvs = [] (high, low, close, volume) = (2, 3, 4, 5) num_trades = len(trades) oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit) for i in range(0, oldest): trade = trades[i] if (since is not None) and (trade['timestamp'] < since): continue opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M) j = len(ohlcvs) if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms: # moved to a new timeframe -> create a new candle from opening trade ohlcvs.append([ opening_time, trade['price'], trade['price'], trade['price'], trade['price'], trade['amount'], ]) else: # still processing the same timeframe -> update opening trade ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price']) ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price']) ohlcvs[j - 1][close] = trade['price'] ohlcvs[j - 1][volume] += trade['amount'] return ohlcvs def parse_timeframe(self, timeframe): amount = int(timeframe[0:-1]) unit = timeframe[-1] if 'y' in unit: scale = 60 * 60 * 24 * 365 elif 'M' in unit: scale = 60 * 60 * 24 * 30 elif 'w' in unit: scale = 60 * 60 * 24 * 7 elif 'd' in unit: scale = 60 * 60 * 24 elif 'h' in unit: scale = 60 * 60 else: scale = 60 # 1m by default return amount * scale def parse_trades(self, trades, market=None, since=None, limit=None): array = self.to_array(trades) array = [self.parse_trade(trade, market) for trade in array] array = self.sort_by(array, 'timestamp') symbol = market['symbol'] if market else None return self.filter_by_symbol_since_limit(array, symbol, since, limit) def parse_orders(self, orders, market=None, since=None, limit=None): array = self.to_array(orders) array = [self.parse_order(order, market) for order in array] array = self.sort_by(array, 'timestamp') symbol = market['symbol'] if market else None return self.filter_by_symbol_since_limit(array, symbol, since, limit) def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None): array = self.to_array(array) if symbol: array = [entry for entry in array if entry['symbol'] == symbol] if since: array = [entry for entry in array if entry['timestamp'] >= since] if limit: array = array[0:limit] return array def filter_by_since_limit(self, array, since=None, limit=None): array = self.to_array(array) if since: array = [entry for entry in array if entry['timestamp'] >= since] if limit: array = array[0:limit] return array def filter_by_symbol(self, array, symbol=None): array = self.to_array(array) if symbol: return [entry for entry in array if entry['symbol'] == symbol] return array def filter_by_array(self, objects, key, values=None, indexed=True): objects = self.to_array(objects) # return all of them if no values were passed in if values is None: return self.index_by(objects, key) if indexed else objects result = [] for i in range(0, len(objects)): value = objects[i][key] if key in objects[i] else None if value in values: result.append(objects[i]) return self.index_by(result, key) if indexed else result def currency(self, code): if not self.currencies: self.raise_error(ExchangeError, details='Currencies not loaded') if isinstance(code, basestring) and (code in self.currencies): return self.currencies[code] self.raise_error(ExchangeError, details='Does not have currency code ' + str(code)) def find_market(self, string): if not self.markets: self.raise_error(ExchangeError, details='Markets not loaded') if isinstance(string, basestring): if string in self.markets_by_id: return self.markets_by_id[string] if string in self.markets: return self.markets[string] return string def find_symbol(self, string, market=None): if market is None: market = self.find_market(string) if isinstance(market, dict): return market['symbol'] return string def market(self, symbol): if not self.markets: self.raise_error(ExchangeError, details='Markets not loaded') if isinstance(symbol, basestring) and (symbol in self.markets): return self.markets[symbol] self.raise_error(ExchangeError, details='No market symbol ' + str(symbol)) def market_ids(self, symbols): return [self.market_id(symbol) for symbol in symbols] def market_id(self, symbol): market = self.market(symbol) return market['id'] if type(market) is dict else symbol def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}): market = self.markets[symbol] rate = market[takerOrMaker] cost = float(self.cost_to_precision(symbol, amount * price)) return { 'rate': rate, 'type': takerOrMaker, 'currency': market['quote'], 'cost': float(self.fee_to_precision(symbol, rate * cost)), } def edit_limit_buy_order(self, id, symbol, *args): return self.edit_limit_order(id, symbol, 'buy', *args) def edit_limit_sell_order(self, id, symbol, *args): return self.edit_limit_order(id, symbol, 'sell', *args) def edit_limit_order(self, id, symbol, *args): return self.edit_order(id, symbol, 'limit', *args) def edit_order(self, id, symbol, *args): if not self.enableRateLimit: self.raise_error(ExchangeError, details='edit_order() requires enableRateLimit = true') self.cancel_order(id, symbol) return self.create_order(symbol, *args) def create_limit_order(self, symbol, *args): return self.create_order(symbol, 'limit', *args) def create_market_order(self, symbol, *args): return self.create_order(symbol, 'market', *args) def create_limit_buy_order(self, symbol, *args): return self.create_order(symbol, 'limit', 'buy', *args) def create_limit_sell_order(self, symbol, *args): return self.create_order(symbol, 'limit', 'sell', *args) def create_market_buy_order(self, symbol, amount, params={}): return self.create_order(symbol, 'market', 'buy', amount, None, params) def create_market_sell_order(self, symbol, amount, params={}): return self.create_order(symbol, 'market', 'sell', amount, None, params) def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
py
1a43b593d48bcad17037c69194bfdd23af04c13d
import itertools import collections import numpy as np class Solver: ''' Solver class. ''' def __init__(self, Y, M, epsilon, distance): ''' Parameters ---------- Y : list<vector> Finite set of vectors M : int Positive integer lesser than the size of Y epsilon : float Relative error distance : callable Distance metric ''' self.Y = Y if isinstance(Y[0], int): self.q = 1 else: self.q = len(Y[0]) self.M = M self.epsilon = epsilon self.distance = distance def solve(self): min_obj_fun_val = 1e10 opt_subset = [] for y in self.Y: ZMyY = self.computeZMvY(y) # M elements of Y closest to y rMyY = ZMyY[-1][-1] # maximal distance between y and elements in ZMyY h = self.epsilon*1.0 / (self.q*self.M)**0.5 * rMyY H = self.M**0.5 * rMyY if rMyY==0: return ZMyY ByhH = self.generateByhH(y,h,H) for b in ByhH: ZMbY = self.computeZMvY(b) subset = [i[0] for i in ZMbY] obj_fun_val = self.computeObj(subset) if obj_fun_val < min_obj_fun_val: min_obj_fun_val = obj_fun_val opt_subset = subset return opt_subset, min_obj_fun_val def generateByhH(self, y, h, H): if self.q==1: return np.hstack(( np.arange(y, -1*H, h), np.arange(y, H, h) )) arr = [[] for i in range(self.q)] for i in range(self.q): arr[i] = np.hstack(( np.arange(y[i], -1*H, -1*h), np.arange(y[i], H, h) )) out = [] n = self.q indices = [0 for i in range(n)] while (1): out.append([]) for i in range(n): out[-1].append(arr[i][indices[i]]) next = n - 1 while (next >= 0 and (indices[next] + 1 >= len(arr[next]))): next-=1 if (next < 0): return out indices[next] += 1 for i in range(next + 1, n): indices[i] = 0 return out def computeObj(self, Y): Y = np.array(Y) y_mean = sum(Y)/len(Y) val = 0 for y in Y: val += self.distance(y, y_mean) return val def computeZMvY(self, y): dist = collections.OrderedDict() for v in self.Y: dist[v] = self.distance(y, v) dist = sorted(dist.items(), key=lambda kv: kv[1]) return dist[:self.M]
py
1a43b5d0b8d536905daeb27a9f3cfe053837545e
import os import pytest from datetime import timedelta from tempfile import TemporaryDirectory from shapely.geometry import box from ost.s1_core.s1scenes import Sentinel1Scenes as S1Scenes from ost.s1_core.s1scene import Sentinel1Scene as S1Scene def test_s1_scenes(s1_slc_master, s1_slc_slave, s1_slc_ost_master, s1_slc_ost_slave ): filelist = [s1_slc_master, s1_slc_slave] with TemporaryDirectory(dir=os.getcwd()) as temp: s1_scenes = S1Scenes(filelist, processing_dir=temp, ard_type=None, cleanup=False ) master = s1_scenes.master slaves = s1_scenes.slaves assert isinstance(master, S1Scene) for slave in slaves: assert isinstance(slave, S1Scene) # Test bi-weekly products pairing process_scenes = s1_scenes.get_biweekly_pairs() start = process_scenes[0][0].timestamp end = process_scenes[0][1].timestamp dif = end-start control_dif = timedelta(days=11, hours=23, minutes=59, seconds=59) assert dif == control_dif product_list = [s1_slc_ost_master[1], s1_slc_ost_slave[1]] with TemporaryDirectory(dir=os.getcwd()) as temp: s1_scenes = S1Scenes(product_list, processing_dir=temp, ard_type=None, cleanup=False ) master = s1_scenes.master slaves = s1_scenes.slaves assert isinstance(master, S1Scene) for slave in slaves: assert isinstance(slave, S1Scene) # Test bi-weekly products pairing process_scenes = s1_scenes.get_biweekly_pairs() start = process_scenes[0][0].timestamp end = process_scenes[0][1].timestamp dif = end-start control_dif = timedelta(days=11, hours=23, minutes=59, seconds=59) assert dif == control_dif @pytest.mark.skipif("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", reason="Skipping this test on Travis CI." ) def test_create_stack(s1_grd_notnr): filelist = [s1_grd_notnr, s1_grd_notnr] with TemporaryDirectory(dir=os.getcwd()) as temp: s1_scenes = S1Scenes(filelist, processing_dir=temp, ard_type=None, cleanup=False ) out_stack = s1_scenes.create_grd_stack() @pytest.mark.skipif("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", reason="Skipping this test on Travis CI." ) def test_coherence_s1_scenes(s1_slc_master, s1_slc_slave, some_bounds_slc): filelist = [s1_slc_master, s1_slc_slave] with TemporaryDirectory(dir=os.getcwd()) as temp, \ TemporaryDirectory(dir=os.getcwd()) as temp_2: s1_scenes = S1Scenes(filelist, processing_dir=temp, ard_type=None, cleanup=False ) test_subset = box(some_bounds_slc[0], some_bounds_slc[1], some_bounds_slc[2], some_bounds_slc[3] ).wkt s1_scenes.create_coherence( processing_dir=temp, temp_dir=temp_2, timeliness='14days', subset=test_subset, )
py
1a43b60bee766b140209891720cb830544a71ad1
import random import nltk from nltk.tokenize.treebank import TreebankWordDetokenizer import random import base64 import binascii import cipheydists import string import cipheycore import cipheydists import base58 import base62 import re class encipher: """Generates encrypted text. Used for the NN and test_generator""" def __init__(self): # pragma: no cover """Inits the encipher object """ self.text = self.read_text() self.MAX_SENTENCE_LENGTH = 5 # ntlk.download("punkt") self.crypto = encipher_crypto() def read_text(self): # pragma: no cover f = open("hansard.txt", encoding="ISO-8859-1") x = f.read() splits = nltk.tokenize.sent_tokenize(x) return splits def getRandomSentence(self, size): # pragma: no cover return TreebankWordDetokenizer().detokenize( random.sample(self.text, random.randint(1, size)) ) def getRandomEncryptedSentence(self, size): # pragma: no cover sents = self.getRandomSentence(size) sentsEncrypted = self.crypto.randomEncrypt(sents) return {"PlainText Sentences": sents, "Encrypted Texts": sentsEncrypted} class encipher_crypto: # pragma: no cover """Holds the encryption functions can randomly select an encryption function use on text returns: {"text": t, "plaintext": c, "cipher": p, "suceeds": False} where suceeds is whether or not the text is really encrypted or falsely decrypted Uses Cyclic3's module generate psuedo random text""" def __init__(self): # pragma: no cover self.methods = [ self.Base64, self.Ascii, self.Base16, self.Base32, self.Binary, self.Hex, self.MorseCode, self.Reverse, self.Vigenere, self.base58_bitcoin, self.base58_ripple, self.b62, ] self.morse_dict = dict(cipheydists.get_charset("morse")) self.letters = string.ascii_lowercase self.group = cipheydists.get_charset("english")["lcase"] # pragma: no cover def random_key(self, text) -> str: # pragma: no cover if len(text) < 8: length = 3 else: length = 8 return self.random_string(length) def random_string(self, length) -> str: # pragma: no cover return "".join(random.sample(self.letters, length)) def randomEncrypt(self, text: str) -> str: # pragma: no cover """Randomly encrypts string with an encryption""" func__use = random.choice(self.methods) encryptedText = func__use(text) name = func__use.__name__ return {"PlainText": text, "EncryptedText": encryptedText, "CipherUsed": name} def Base64(self, text: str) -> str: # pragma: no cover """Turns text in base64 using Python libray args: text -> text convert returns: text -> as base 64""" return base64.b64encode(bytes(text, "utf-8")).decode("utf-8") def Caesar(self, s, k): # pragma: no cover """Iterates through each letter and constructs the cipher text""" new_message = "" facr = k % 26 for c in s: new_message += self.apply_rotation(c, facr) return new_message def apply_rotation(self, c, facr): # pragma: no cover """Applies a shift of facr the letter denoted by c""" if c.isalpha(): lower = ord("A") if c.isupper() else ord("a") c = chr(lower + ((ord(c) - lower + facr) % 26)) return c def Base32(self, text: str) -> str: # pragma: no cover """Turns text in base64 using Python libray args: text -> text convert returns: text -> as base 64""" return base64.b32encode(bytes(text, "utf-8")).decode("utf-8") def Base16(self, text: str) -> str: # pragma: no cover """Turns text in base64 using Python libray args: text -> text convert returns: text -> as base 64""" return base64.b16encode(bytes(text, "utf-8")).decode("utf-8") def Binary(self, text: str) -> str: # pragma: no cover return " ".join(format(ord(x), "b") for x in text) # pragma: no cover def Ascii(self, text: str) -> str: # pragma: no cover res = [ord(c) for c in text] return " ".join([str(x) for x in res]) def Hex(self, text: str) -> str: # pragma: no cover return binascii.hexlify(text.encode()).decode("utf-8") def MorseCode(self, text: str) -> str: # pragma: :wno cover morse = [] for i in text: m = self.morse_dict.get(i.upper()) if m == None: m = "" morse.append(m) output = morse # output = " ".join(MORSE_CODE_DICT.get(i.upper()) for i in text) return " ".join(output) def Reverse(self, text: str) -> str: return text[::-1] def Vigenere(self, plaintext): key = self.vig_key(plaintext, self.random_key(plaintext)) cipheycore.vigenere_encrypt(plaintext, key, self.group) def vig_key(self, msg, key): tab = dict() for counter, i in enumerate(self.group): tab[self.group[counter]] = counter real_key = [] for i in key: real_key.append(tab[i]) return real_key # vigenere_encrypt(msg, real_key, group) def base58_bitcoin(self, text: str): return base58.b58encode(bytes(text, "utf-8")).decode("utf-8") def base58_ripple(self, text: str): return base58.b58encode( bytes(text, "utf-8"), alphabet=base58.RIPPLE_ALPHABET ).decode("utf-8") def b62(self, text: str): return base62.decode(str(re.sub(r"[^A-Za-z1-9]+", "", text))) # obj = encipher() # print(obj.getRandomEncryptedSentence())
py
1a43b6d44fce42232c58d8346d4840105851bda8
# Copyright 2018 The glTF-Blender-IO authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mathutils from . import gltf2_blender_export_keys from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached from io_scene_gltf2.io.com import gltf2_io from io_scene_gltf2.io.exp import gltf2_io_binary_data from io_scene_gltf2.io.com import gltf2_io_constants from io_scene_gltf2.blender.exp import gltf2_blender_gather_joints from io_scene_gltf2.blender.com import gltf2_blender_math @cached def gather_skin(blender_object, mesh_object, export_settings): """ Gather armatures, bones etc into a glTF2 skin object. :param blender_object: the object which may contain a skin :param mesh_object: the mesh object to be deformed :param export_settings: :return: a glTF2 skin object """ if not __filter_skin(blender_object, export_settings): return None return gltf2_io.Skin( extensions=__gather_extensions(blender_object, export_settings), extras=__gather_extras(blender_object, export_settings), inverse_bind_matrices=__gather_inverse_bind_matrices(blender_object, mesh_object, export_settings), joints=__gather_joints(blender_object, export_settings), name=__gather_name(blender_object, export_settings), skeleton=__gather_skeleton(blender_object, export_settings) ) def __filter_skin(blender_object, export_settings): if not export_settings[gltf2_blender_export_keys.SKINS]: return False if blender_object.type != 'ARMATURE' or len(blender_object.pose.bones) == 0: return False return True def __gather_extensions(blender_object, export_settings): return None def __gather_extras(blender_object, export_settings): return None def __gather_inverse_bind_matrices(blender_object, mesh_object, export_settings): inverse_matrices = [] axis_basis_change = mathutils.Matrix.Identity(4) if export_settings[gltf2_blender_export_keys.YUP]: axis_basis_change = mathutils.Matrix( ((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))) # # artificial torso, as needed by glTF # inverse_bind_matrix = blender_object.matrix_world.inverted() * axis_basis_change.inverted() # for column in range(0, 4): # for row in range(0, 4): # inverse_matrices.append(inverse_bind_matrix[row][column]) # for blender_bone in blender_object.pose.bones: matrix_world = gltf2_blender_math.multiply(blender_object.matrix_world, mesh_object.matrix_world.inverted()) inverse_bind_matrix = gltf2_blender_math.multiply( axis_basis_change, gltf2_blender_math.multiply( matrix_world, blender_bone.bone.matrix_local ) ).inverted() for column in range(0, 4): for row in range(0, 4): inverse_matrices.append(inverse_bind_matrix[row][column]) binary_data = gltf2_io_binary_data.BinaryData.from_list(inverse_matrices, gltf2_io_constants.ComponentType.Float) return gltf2_io.Accessor( buffer_view=binary_data, byte_offset=None, component_type=gltf2_io_constants.ComponentType.Float, count=len(inverse_matrices) // gltf2_io_constants.DataType.num_elements(gltf2_io_constants.DataType.Mat4), extensions=None, extras=None, max=None, min=None, name=None, normalized=None, sparse=None, type=gltf2_io_constants.DataType.Mat4 ) def __gather_joints(blender_object, export_settings): # # the skeletal hierarchy groups below a 'root' joint # # TODO: add transform? # torso = gltf2_io.Node( # camera=None, # children=[], # extensions={}, # extras=None, # matrix=[], # mesh=None, # name="Skeleton_" + blender_object.name, # rotation=None, # scale=None, # skin=None, # translation=None, # weights=None # ) root_joints = [] # build the hierarchy of nodes out of the bones for blender_bone in blender_object.pose.bones: if not blender_bone.parent: root_joints.append(gltf2_blender_gather_joints.gather_joint(blender_bone, export_settings)) # joints is a flat list containing all nodes belonging to the skin joints = [] def __collect_joints(node): joints.append(node) for child in node.children: __collect_joints(child) for joint in root_joints: __collect_joints(joint) return joints def __gather_name(blender_object, export_settings): return blender_object.name def __gather_skeleton(blender_object, export_settings): # In the future support the result of https://github.com/KhronosGroup/glTF/pull/1195 return None # gltf2_blender_gather_nodes.gather_node(blender_object, export_settings)
py
1a43b77e1c717d087354a1fe9b4b8a1a9b81cf8a
# Generated by Django 3.2.6 on 2021-09-23 17:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('spyfall', '0004_alter_players_refresh_room'), ] operations = [ migrations.AddField( model_name='players', name='roles_ratio', field=models.CharField(default='8/8', max_length=200), ), ]
py
1a43b864efde6f246371b95880772f63c8ed7195
import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class CTCLabelConverter(object): """ Convert between text-label and text-index """ def __init__(self, character): # character (str): set of the possible characters. dict_character = list(character) self.dict = {} for i, char in enumerate(dict_character): # NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss self.dict[char] = i + 1 self.character = ['[CTCblank]'] + dict_character # dummy '[CTCblank]' token for CTCLoss (index 0) def encode(self, text, batch_max_length=60): """convert text-label into text-index. input: text: text labels of each image. [batch_size] batch_max_length: max length of text label in the batch. 25 by default output: text: text index for CTCLoss. [batch_size, batch_max_length] length: length of each text. [batch_size] """ length = [len(s) for s in text] # The index used for padding (=0) would not affect the CTC loss calculation. batch_text = torch.LongTensor(len(text), batch_max_length).fill_(0) for i, t in enumerate(text): text = list(t) text = [self.dict[char] for char in text] batch_text[i][:len(text)] = torch.LongTensor(text) return (batch_text.to(device), torch.IntTensor(length).to(device)) def decode(self, text_index, length): """ convert text-index into text-label. """ texts = [] for index, l in enumerate(length): t = text_index[index, :] char_list = [] for i in range(l): if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank. char_list.append(self.character[t[i]]) text = ''.join(char_list) texts.append(text) return texts class CTCLabelConverterForBaiduWarpctc(object): """ Convert between text-label and text-index for baidu warpctc """ def __init__(self, character): # character (str): set of the possible characters. dict_character = list(character) self.dict = {} for i, char in enumerate(dict_character): # NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss self.dict[char] = i + 1 self.character = ['[CTCblank]'] + dict_character # dummy '[CTCblank]' token for CTCLoss (index 0) def encode(self, text, batch_max_length=25): """convert text-label into text-index. input: text: text labels of each image. [batch_size] output: text: concatenated text index for CTCLoss. [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)] length: length of each text. [batch_size] """ length = [len(s) for s in text] text = ''.join(text) text = [self.dict[char] for char in text] return (torch.IntTensor(text), torch.IntTensor(length)) def decode(self, text_index, length): """ convert text-index into text-label. """ texts = [] index = 0 for l in length: t = text_index[index:index + l] char_list = [] for i in range(l): if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank. char_list.append(self.character[t[i]]) text = ''.join(char_list) texts.append(text) index += l return texts class AttnLabelConverter(object): """ Convert between text-label and text-index """ def __init__(self, character): # character (str): set of the possible characters. # [GO] for the start token of the attention decoder. [s] for end-of-sentence token. list_token = ['[GO]', '[s]'] # ['[s]','[UNK]','[PAD]','[GO]'] list_character = list(character) self.character = list_token + list_character self.dict = {} for i, char in enumerate(self.character): # print(i, char) self.dict[char] = i def encode(self, text, batch_max_length=25): """ convert text-label into text-index. input: text: text labels of each image. [batch_size] batch_max_length: max length of text label in the batch. 25 by default output: text : the input of attention decoder. [batch_size x (max_length+2)] +1 for [GO] token and +1 for [s] token. text[:, 0] is [GO] token and text is padded with [GO] token after [s] token. length : the length of output of attention decoder, which count [s] token also. [3, 7, ....] [batch_size] """ length = [len(s) + 1 for s in text] # +1 for [s] at end of sentence. # batch_max_length = max(length) # this is not allowed for multi-gpu setting batch_max_length += 1 # additional +1 for [GO] at first step. batch_text is padded with [GO] token after [s] token. batch_text = torch.LongTensor(len(text), batch_max_length + 1).fill_(0) for i, t in enumerate(text): text = list(t) text.append('[s]') text = [self.dict[char] for char in text] batch_text[i][1:1 + len(text)] = torch.LongTensor(text) # batch_text[:, 0] = [GO] token return (batch_text.to(device), torch.IntTensor(length).to(device)) def decode(self, text_index, length): """ convert text-index into text-label. """ texts = [] for index, l in enumerate(length): text = ''.join([self.character[i] for i in text_index[index, :]]) texts.append(text) return texts class Averager(object): """Compute average for torch.Tensor, used for loss average.""" def __init__(self): self.reset() def add(self, v): count = v.data.numel() v = v.data.sum() self.n_count += count self.sum += v def reset(self): self.n_count = 0 self.sum = 0 def val(self): res = 0 if self.n_count != 0: res = self.sum / float(self.n_count) return res
py
1a43b8af5b58b121cc5db82f8513a94435802929
# Copyright 2019-2021 Simon Zigelli # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.test import TestCase from stage.consumers import generate_channel_group_name class ExtractorUtilitiesTests(TestCase): def test_generate_channel_group_name(self): self.assertEqual("congregation.console.testAZaz", generate_channel_group_name("console", "testAZaz")) self.assertEqual("congregation.console.T.e_s-t", generate_channel_group_name("console", "T.e_s-t")) self.assertEqual("congregation.console.t_est_", generate_channel_group_name("console", "t%est%"))
py
1a43b8c7bb97d44debccff4afbe1ff9762c602d5
from MyPyTorchAPI.AbsModelContainer import * class ModelContainerGNet(AbsModelContainer): def __init__(self, model, wName='Weights/main'): super().__init__(model, wName) self.bn = 0 self.optimizer = optim.RMSprop(model.parameters(), lr=10 ** -3, weight_decay=10 ** -3) self.loss = nn.modules.loss.L1Loss() def forwardProp(self, dataInTuple): pass # (x, y, u) = dataInTuple # self.x, self.y, self.u = self.toGPU(x, y, u) # self.pr, self.A, self.B = self.model(x, u) def getLoss(self): pass # loss = self.loss(self.pr[:, 0, None], self.y[:, 0, None]) + \ # 1 * self.loss(self.pr[:, 1, None], self.y[:, 1, None]) # return loss def prepResults(self, N): pass # self.result0 = np.zeros((N, 2)) # self.result1 = np.zeros((N, 2, 2)) # self.result2 = np.zeros((N, 2)) def saveToResults(self, start, last): pass # self.result0[start:last, :] = self.toCPUNumpy(self.pr) # self.result1[start:last, :] = self.toCPUNumpy(self.A) # self.result2[start:last, :] = self.toCPUNumpy(self.B) def returnResults(self): pass #return self.result0, self.result1, self.result2 def changeOptim(self, epoch): pass
py
1a43b8edfd17141cfd4a476b15c681a85a881516
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # snippet-start:[acm.python.remove_tags_for_certificate.complete] import boto3 # Create ACM client acm = boto3.client('acm') # Remove tag(s) from the specified certificate. response = acm.remove_tags_from_certificate( CertificateArn='arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012', Tags=[ { 'Key': 'TagKey1', 'Value': 'TagValue1' }, { 'Key': 'TagKey2', 'Value': 'TagValue2' }, ] ) print(response) # snippet-end:[acm.python.remove_tags_for_certificate.complete] # snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] # snippet-sourcedescription:[remove_tags_from_certificate.py demonstrates how to remove one or more tags from an AWS Certificate Manager certificate. ] # snippet-keyword:[Python] # snippet-sourcesyntax:[python] # snippet-sourcesyntax:[python] # snippet-keyword:[AWS SDK for Python (Boto3)] # snippet-keyword:[Code Sample] # snippet-keyword:[AWS Certificate Manager] # snippet-service:[acm] # snippet-sourcetype:[full-example] # snippet-sourcedate:[2018-12-26] # snippet-sourceauthor:[walkerk1980]