content
stringlengths
7
928k
avg_line_length
float64
3.5
33.8k
max_line_length
int64
6
139k
alphanum_fraction
float64
0.08
0.96
licenses
sequence
repository_name
stringlengths
7
104
path
stringlengths
4
230
size
int64
7
928k
lang
stringclasses
1 value
""" Test to verify performance of attaching number of pods as a bulk, each pod attached to one pvc only The test results will be uploaded to the ES server """ import logging import os import pytest import pathlib import time from concurrent.futures import ThreadPoolExecutor from ocs_ci.framework.testlib import performance, polarion_id from ocs_ci.helpers import helpers from ocs_ci.helpers.helpers import get_full_test_logs_path from ocs_ci.ocs import defaults, constants, scale_lib from ocs_ci.ocs.resources.pod import get_pod_obj from ocs_ci.ocs.perftests import PASTest from ocs_ci.ocs.perfresult import ResultsAnalyse from ocs_ci.ocs.resources.objectconfigfile import ObjectConfFile from ocs_ci.utility.utils import ocsci_log_path log = logging.getLogger(__name__) @performance class TestBulkPodAttachPerformance(PASTest): """ Test to measure performance of attaching pods to pvc in a bulk """ pvc_size = "1Gi" def setup(self): """ Setting up test parameters """ log.info("Starting the test setup") super(TestBulkPodAttachPerformance, self).setup() self.benchmark_name = "bulk_pod_attach_time" # Pulling the pod image to the worker node, so pull image will not calculate # in the total attach time helpers.pull_images(constants.PERF_IMAGE) @pytest.fixture() def base_setup(self, project_factory, interface_type, storageclass_factory): """ A setup phase for the test Args: interface_type: Interface type storageclass_factory: A fixture to create everything needed for a storage class """ self.interface = interface_type self.sc_obj = storageclass_factory(self.interface) proj_obj = project_factory() self.namespace = proj_obj.namespace if self.interface == constants.CEPHFILESYSTEM: self.sc = "CephFS" if self.interface == constants.CEPHBLOCKPOOL: self.sc = "RBD" @pytest.mark.parametrize( argnames=["interface_type", "bulk_size"], argvalues=[ pytest.param( *[constants.CEPHBLOCKPOOL, 120], ), pytest.param( *[constants.CEPHBLOCKPOOL, 240], ), pytest.param( *[constants.CEPHFILESYSTEM, 120], ), pytest.param( *[constants.CEPHFILESYSTEM, 240], ), ], ) @pytest.mark.usefixtures(base_setup.__name__) @polarion_id("OCS-1620") def test_bulk_pod_attach_performance(self, teardown_factory, bulk_size): """ Measures pods attachment time in bulk_size bulk Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. bulk_size: Size of the bulk to be tested Returns: """ # Getting the test start time test_start_time = PASTest.get_time() log.info(f"Start creating bulk of new {bulk_size} PVCs") pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=self.sc_obj.name, namespace=self.namespace, number_of_pvc=bulk_size, size=self.pvc_size, burst=True, ) for pvc_obj in pvc_objs: pvc_obj.reload() teardown_factory(pvc_obj) with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in pvc_objs: executor.submit( helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND ) executor.submit(pvc_obj.reload) start_time = helpers.get_provision_time( self.interface, pvc_objs, status="start" ) end_time = helpers.get_provision_time(self.interface, pvc_objs, status="end") total_time = (end_time - start_time).total_seconds() log.info( f"{self.interface}: Bulk of {bulk_size} PVCs creation time is {total_time} seconds." ) pvc_names_list = [] for pvc_obj in pvc_objs: pvc_names_list.append(pvc_obj.name) log.info(f"{self.interface} : Before pod attach") bulk_start_time = time.time() pod_data_list = list() pod_data_list.extend( scale_lib.attach_multiple_pvc_to_pod_dict( pvc_list=pvc_names_list, namespace=self.namespace, pvcs_per_pod=1, ) ) lcl = locals() tmp_path = pathlib.Path(ocsci_log_path()) obj_name = "obj1" # Create kube_job for pod creation lcl[f"pod_kube_{obj_name}"] = ObjectConfFile( name=f"pod_kube_{obj_name}", obj_dict_list=pod_data_list, project=defaults.ROOK_CLUSTER_NAMESPACE, tmp_path=tmp_path, ) lcl[f"pod_kube_{obj_name}"].create(namespace=self.namespace) log.info("Checking that pods are running") # Check all the PODs reached Running state pod_running_list = scale_lib.check_all_pod_reached_running_state_in_kube_job( kube_job_obj=lcl[f"pod_kube_{obj_name}"], namespace=self.namespace, no_of_pod=len(pod_data_list), timeout=180, ) for pod_name in pod_running_list: pod_obj = get_pod_obj(pod_name, self.namespace) teardown_factory(pod_obj) bulk_end_time = time.time() bulk_total_time = bulk_end_time - bulk_start_time log.info( f"Bulk attach time of {len(pod_running_list)} pods is {bulk_total_time} seconds" ) # Collecting environment information self.get_env_info() # Initialize the results doc file. full_log_path = get_full_test_logs_path(cname=self) self.results_path = get_full_test_logs_path(cname=self) full_log_path += f"-{self.sc}" full_results = self.init_full_results( ResultsAnalyse( self.uuid, self.crd_data, full_log_path, "pod_bulk_attachtime" ) ) full_results.add_key("storageclass", self.sc) full_results.add_key("pod_bulk_attach_time", bulk_total_time) full_results.add_key("pvc_size", self.pvc_size) full_results.add_key("bulk_size", bulk_size) # Getting the test end time test_end_time = PASTest.get_time() # Add the test time to the ES report full_results.add_key( "test_time", {"start": test_start_time, "end": test_end_time} ) # Write the test results into the ES server if full_results.es_write(): res_link = full_results.results_link() # write the ES link to the test results in the test log. log.info(f"The result can be found at : {res_link}") # Create text file with results of all subtest (4 - according to the parameters) self.write_result_to_file(res_link) def test_bulk_pod_attach_results(self): """ This is not a test - it is only check that previous test ran and finish as expected and reporting the full results (links in the ES) of previous tests (4) """ self.number_of_tests = 4 self.results_path = get_full_test_logs_path( cname=self, fname="test_bulk_pod_attach_performance" ) self.results_file = os.path.join(self.results_path, "all_results.txt") log.info(f"Check results in {self.results_file}") self.check_tests_results() self.push_to_dashboard(test_name="Bulk Pod Attach Time") def init_full_results(self, full_results): """ Initialize the full results object which will send to the ES server Args: full_results (obj): an empty ResultsAnalyse object Returns: ResultsAnalyse (obj): the input object filled with data """ for key in self.environment: full_results.add_key(key, self.environment[key]) full_results.add_key("index", full_results.new_index) return full_results
34.207469
106
0.632703
[ "MIT" ]
Sravikaz/ocs-ci
tests/e2e/performance/csi_tests/test_bulk_pod_attachtime_performance.py
8,244
Python
import datetime import logging import os import elastalert.elastalert import elastalert.utils.util import mock import pytest from elastalert import config from elastalert.ruletypes import AnyRule from elastalert.utils.time import dt_to_ts, ts_to_dt writeback_index = "wb" def pytest_addoption(parser): parser.addoption( "--runelasticsearch", action="store_true", default=False, help="run elasticsearch tests", ) def pytest_collection_modifyitems(config, items): if config.getoption("--runelasticsearch"): # --runelasticsearch given in cli: run elasticsearch tests, skip ordinary unit # tests skip_unit_tests = pytest.mark.skip( reason="not running when --runelasticsearch option is used to run" ) for item in items: if "elasticsearch" not in item.keywords: item.add_marker(skip_unit_tests) else: # skip elasticsearch tests skip_elasticsearch = pytest.mark.skip( reason="need --runelasticsearch option to run" ) for item in items: if "elasticsearch" in item.keywords: item.add_marker(skip_elasticsearch) @pytest.fixture def cls_monkeypatch(request, monkeypatch): request.cls.monkeypatch = monkeypatch @pytest.fixture(scope="function", autouse=True) def reset_loggers(): """Prevent logging handlers from capturing temporary file handles. For example, a test that uses the `capsys` fixture and calls `logging.exception()` will initialize logging with a default handler that captures `sys.stderr`. When the test ends, the file handles will be closed and `sys.stderr` will be returned to its original handle, but the logging will have a dangling reference to the temporary handle used in the `capsys` fixture. """ logger = logging.getLogger() for handler in logger.handlers: logger.removeHandler(handler) class mock_es_indices_client(object): def __init__(self): self.exists = mock.Mock(return_value=True) class mock_es_client(object): def __init__(self, host="es", port=14900): self.host = host self.port = port self.return_hits = [] self.search = mock.Mock() self.deprecated_search = mock.Mock() self.create = mock.Mock() self.index = mock.Mock() self.delete = mock.Mock() self.info = mock.Mock( return_value={"status": 200, "name": "foo", "version": {"number": "2.0"}} ) self.ping = mock.Mock(return_value=True) self.indices = mock_es_indices_client() self.es_version = mock.Mock(return_value="2.0") self.is_atleastfive = mock.Mock(return_value=False) self.is_atleastsix = mock.Mock(return_value=False) self.is_atleastsixtwo = mock.Mock(return_value=False) self.is_atleastsixsix = mock.Mock(return_value=False) self.is_atleastseven = mock.Mock(return_value=False) def writeback_index_side_effect(index, doc_type): if doc_type == "silence": return index + "_silence" elif doc_type == "past_elastalert": return index + "_past" elif doc_type == "elastalert_status": return index + "_status" elif doc_type == "elastalert_error": return index + "_error" return index self.resolve_writeback_index = mock.Mock( side_effect=writeback_index_side_effect ) def mock_ruletype(conf, es): rule = AnyRule(conf, es=es) rule.add_data = mock.Mock() rule.add_count_data = mock.Mock() rule.garbage_collect = mock.Mock() rule.add_terms_data = mock.Mock() rule.find_pending_aggregate_alert = mock.Mock() rule.find_pending_aggregate_alert.return_value = False rule.is_silenced = mock.Mock() rule.is_silenced.return_value = False rule.matches = [] rule.get_match_data = lambda x: x rule.get_match_str = lambda x: "some stuff happened" rule.garbage_collect = mock.Mock() return rule class mock_alert(object): def __init__(self): self.alert = mock.Mock() def get_info(self): return {"type": "mock"} @pytest.fixture def configured(monkeypatch): test_args = mock.Mock() test_args.config = "test_config" test_args.rule = None test_args.debug = False test_args.es_debug_trace = None test_args.silence = False test_args.timeout = 0 _conf = { "args": test_args, "debug": False, "rules_loader": "test", "rules_folder": "rules", "run_every": datetime.timedelta(minutes=10), "buffer_time": datetime.timedelta(minutes=5), "alert_time_limit": datetime.timedelta(hours=24), "es_client": config.ESClient( es_host="es", es_port=12345, es_password="", es_username="", es_conn_timeout=1234, es_url_prefix="es/test", es_send_get_body_as="GET", ), "writeback_index": "wb", "writeback_alias": "wb_a", "max_query_size": 10000, "old_query_limit": datetime.timedelta(weeks=1), "disable_rules_on_error": False, "scroll_keepalive": "30s", } monkeypatch.setattr(config, "_cfg", config.Config(**_conf)) @pytest.fixture def ea(): test_args = mock.Mock() test_args.config = "test_config" test_args.rule = None test_args.debug = False test_args.es_debug_trace = None test_args.silence = False test_args.timeout = datetime.timedelta(seconds=0) test_args.end = None _conf = { "args": test_args, "debug": False, "rules_loader": "test", "rules_folder": "rules", "run_every": datetime.timedelta(minutes=10), "buffer_time": datetime.timedelta(minutes=5), "alert_time_limit": datetime.timedelta(hours=24), "es_client": config.ESClient( es_host="es", es_port=12345, es_password="", es_username="", es_conn_timeout=1234, es_url_prefix="es/test", es_send_get_body_as="GET", ), "mail_settings": config.MailSettings(notify_email=[]), "writeback_index": "wb", "writeback_alias": "wb_a", "max_query_size": 10000, "old_query_limit": datetime.timedelta(weeks=1), "disable_rules_on_error": False, "scroll_keepalive": "30s", } conf = config.Config(**_conf) rules = { "testrule": { "name": "testrule", "es_host": "", "es_port": 14900, "index": "idx", "filter": [], "include": ["@timestamp"], "aggregation": datetime.timedelta(0), "realert": datetime.timedelta(0), "processed_hits": {}, "timestamp_field": "@timestamp", "match_enhancements": [], "rule_file": "blah.yaml", "max_query_size": 10000, "ts_to_dt": ts_to_dt, "dt_to_ts": dt_to_ts, "_source_enabled": True, "run_every": datetime.timedelta(seconds=15), } } elastalert.elastalert.elasticsearch_client = mock_es_client class mock_rule_loader(object): required_globals = frozenset([]) def __init__(self, conf): self.base_config = conf self.load_configuration = mock.Mock() def load(self, args): return rules def get_hashes(self, args): return {} def load_rule(self, str: str): return {} with mock.patch("elastalert.elastalert.BackgroundScheduler"): with mock.patch( "elastalert.elastalert.config.Config.load_config" ) as load_config: with mock.patch( "elastalert.elastalert.loader_mapping" ) as loader_mapping, mock.patch( "elastalert.elastalert.config.configure_logging" ): loader_mapping.get.return_value = mock_rule_loader load_config.return_value = conf ea = elastalert.elastalert.ElastAlerter(["--pin_rules"]) rules["testrule"]["alert"] = [mock_alert()] ea.rule_es = mock_es_client() ea.rule_es.is_atleastsixtwo.return_value = True ea.rule_es.is_atleastfive.return_value = True ea.rule_es.index.return_value = {"_id": "ABCD", "created": True} ea.rules["testrule"]["type"] = mock_ruletype(rules["testrule"], ea.rule_es) ea.testrule = ea.rules["testrule"]["type"] ea.conf = conf ea.writeback_es = mock_es_client() ea.writeback_es.is_atleastsixtwo.return_value = True ea.writeback_es.is_atleastfive.return_value = True ea.writeback_es.search.return_value = { "hits": {"total": {"value": "0"}, "hits": []} } ea.writeback_es.deprecated_search.return_value = {"hits": {"hits": []}} ea.writeback_es.index.return_value = {"_id": "ABCD", "created": True} ea.es = mock_es_client() ea.es.index.return_value = {"_id": "ABCD", "created": True} ea.thread_data.num_hits = 0 ea.thread_data.num_dupes = 0 return ea @pytest.fixture(scope="function") def environ(): """py.test fixture to get a fresh mutable environment.""" old_env = os.environ new_env = dict(list(old_env.items())) os.environ = new_env yield os.environ os.environ = old_env
32.125
86
0.619624
[ "Apache-2.0" ]
JasperJuergensen/elastalert
tests/conftest.py
9,509
Python
import time import os from flask import Flask, jsonify, make_response from flask.ext.sqlalchemy import SQLAlchemy from redis import Redis from rq import Queue from fetch import fetch_user_photos app = Flask(__name__) app.config.from_object(os.environ['APP_SETTINGS']) db = SQLAlchemy(app) request_queue = Queue(connection=Redis()) from models import Profile @app.route("/") def index(): return jsonify({ "msg": "Welcome to PyPhotoAnalytics", "routes": ["/api", "/api/users", "/api/users/<username>"] }) @app.route("/api") def api(): return jsonify({"msg": "Welcome to PyPhotoAnalytics API"}) @app.route("/api/users/") def get_users(): return jsonify({"msg": "specify username /api/users/<username>"}) @app.route("/api/users/<username>") def get_user_media(username): job = request_queue.enqueue(fetch_user_photos, username) time.sleep(7) result = job.result if result is None: return jsonify({"msg": "Still processing :("}) elif result.status_code == 200: data = result.json() return jsonify(**data) else: return jsonify({"msg": "Oh gawd no"}) @app.errorhandler(404) def not_found(error): return make_response(jsonify({"error": "Not Found"}), 404) if __name__ == "__main__": app.run()
24.377358
69
0.673375
[ "MIT" ]
oosidat/pyphotoanalytics
app.py
1,292
Python
"""A Couchbase CLI subcommand""" import getpass import inspect import ipaddress import json import os import platform import random import re import string import subprocess import sys import urllib.parse import tempfile import time from typing import Optional, List, Any, Dict from argparse import ArgumentError, ArgumentParser, HelpFormatter, Action, SUPPRESS from operator import itemgetter from cluster_manager import ClusterManager from pbar import TopologyProgressBar try: from cb_version import VERSION # pylint: disable=import-error except ImportError: VERSION = "0.0.0-0000-community" print(f'WARNING: Could not import cb_version, setting VERSION to {VERSION}') COUCHBASE_DEFAULT_PORT = 8091 BUCKET_PRIORITY_HIGH_INT = 8 BUCKET_PRIORITY_HIGH_STR = "high" BUCKET_PRIORITY_LOW_INT = 3 BUCKET_PRIORITY_LOW_STR = "low" BUCKET_TYPE_COUCHBASE = "membase" BUCKET_TYPE_MEMCACHED = "memcached" CB_BIN_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "bin")) CB_ETC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "etc", "couchbase")) CB_LIB_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "lib")) # On MacOS the config is store in the users home directory if platform.system() == "Darwin": CB_CFG_PATH = os.path.expanduser("~/Library/Application Support/Couchbase/var/lib/couchbase") else: CB_CFG_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "var", "lib", "couchbase")) CB_MAN_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "share")) if os.name == "nt": CB_MAN_PATH = os.path.join(CB_MAN_PATH, "html") else: CB_MAN_PATH = os.path.join(CB_MAN_PATH, "man", "man1") def remove_prefix(val: str, prefix: str) -> str: """This function removes a prefix from a string. Note this is a built-in function in Python 3.9 once we upgrade to it we should use it instead. """ return val[len(prefix):] if val.startswith(prefix) else val def rest_initialiser(cluster_init_check=False, version_check=False, enterprise_check=None): """rest_initialiser is a decorator that does common subcommand tasks. The decorator will always creates a cluster manager and assign it to the subcommand variable rest :param cluster_init_check: if true it will check if the cluster is initialized before executing the subcommand :param version_check: if true it will check if the cluster and CLI version match if they do not it prints a warning :param enterprise_check: if true it will check if the cluster is enterprise and fail if not. If it is false it does the check but it does not fail if not enterprise. If none it does not perform the check. The result of the check is stored on the instance parameter enterprise """ def inner(fn): def decorator(self, opts): self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify, opts.cacert, opts.debug) if cluster_init_check: check_cluster_initialized(self.rest) if version_check: check_versions(self.rest) if enterprise_check is not None: enterprise, errors = self.rest.is_enterprise() _exit_if_errors(errors) if enterprise_check and not enterprise: _exit_if_errors(['Command only available in enterprise edition']) self.enterprise = enterprise return fn(self, opts) return decorator return inner def check_cluster_initialized(rest): initialized, errors = rest.is_cluster_initialized() if errors: _exit_if_errors(errors) if not initialized: _exit_if_errors(["Cluster is not initialized, use cluster-init to initialize the cluster"]) def check_versions(rest): result, errors = rest.pools() if errors: return server_version = result['implementationVersion'] if server_version is None or VERSION is None: return major_couch = server_version[: server_version.index('.')] minor_couch = server_version[server_version.index('.') + 1: server_version.index('.', len(major_couch) + 1)] major_cli = VERSION[: VERSION.index('.')] minor_cli = VERSION[VERSION.index('.') + 1: VERSION.index('.', len(major_cli) + 1)] if major_cli != major_couch or minor_cli != minor_couch: _warning(f'couchbase-cli version {VERSION} does not match couchbase server version {server_version}') def index_storage_mode_to_param(value, default="plasma"): """Converts the index storage mode to what Couchbase understands""" if value == "default": return default if value == "memopt": return "memory_optimized" return value def process_services(services, enterprise): """Converts services to a format Couchbase understands""" sep = "," if services.find(sep) < 0: # backward compatible when using ";" as separator sep = ";" svc_set = set([w.strip() for w in services.split(sep)]) svc_candidate = ["data", "index", "query", "fts", "eventing", "analytics", "backup"] for svc in svc_set: if svc not in svc_candidate: return None, [f'`{svc}` is not a valid service'] if not enterprise and svc in ["eventing", "analytics", "backup"]: return None, [f'{svc} service is only available on Enterprise Edition'] if not enterprise: # Valid CE node service configuration ce_svc_30 = set(["data"]) ce_svc_40 = set(["data", "index", "query"]) ce_svc_45 = set(["data", "index", "query", "fts"]) if svc_set not in [ce_svc_30, ce_svc_40, ce_svc_45]: return None, [f"Invalid service configuration. Community Edition only supports nodes with the following" f" combinations of services: '{''.join(ce_svc_30)}', '{','.join(ce_svc_40)}' or " f"'{','.join(ce_svc_45)}'"] services = ",".join(svc_set) for old, new in [[";", ","], ["data", "kv"], ["query", "n1ql"], ["analytics", "cbas"]]: services = services.replace(old, new) return services, None def find_subcommands(): """Finds all subcommand classes""" clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass) subclasses = [cls for cls in clsmembers if issubclass(cls[1], (Subcommand, LocalSubcommand)) and cls[1] not in [Subcommand, LocalSubcommand]] subcommands = [] for subclass in subclasses: name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])]) subcommands.append((name, subclass[1])) return subcommands def _success(msg): print(f'SUCCESS: {msg}') def _deprecated(msg): print(f'DEPRECATED: {msg}') def _warning(msg): print(f'WARNING: {msg}') def _exit_if_errors(errors): if errors: for error in errors: # Some endpoint return errors prefixed with '_ -' this has to be stripped out. For more information see # MB-42801 print(f'ERROR: {remove_prefix(error, "_ -").lstrip(" ")}') sys.exit(1) def _exit_on_file_write_failure(fname, to_write): try: wfile = open(fname, 'w') wfile.write(to_write) wfile.close() except IOError as error: _exit_if_errors([error]) def _exit_on_file_read_failure(fname, to_report=None): try: rfile = open(fname, 'r') read_bytes = rfile.read() rfile.close() return read_bytes except IOError as error: if to_report is None: _exit_if_errors([f'{error.strerror} `{fname}`']) else: _exit_if_errors([to_report]) def apply_default_port(nodes): """ Adds the default port if the port is missing. @type nodes: string @param nodes: A comma seprated list of nodes @rtype: array of strings @return: The nodes with the port postfixed on each one """ nodes = nodes.split(',') def append_port(node): if re.match(r'.*:\d+$', node): return node return f'{node}:8091' return [append_port(x) for x in nodes] class CLIHelpFormatter(HelpFormatter): """Format help with indented section bodies""" def __init__(self, prog, indent_increment=2, max_help_position=30, width=None): HelpFormatter.__init__(self, prog, indent_increment, max_help_position, width) def add_argument(self, action): if action.help is not SUPPRESS: # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] for subaction in self._iter_indented_subactions(action): invocations.append(get_invocation(subaction)) # update the maximum item length invocation_length = max([len(s) for s in invocations]) action_length = invocation_length + self._current_indent + 2 self._action_max_length = max(self._action_max_length, action_length) # add the item to the list self._add_item(self._format_action, [action]) def _format_action_invocation(self, action): if not action.option_strings: metavar, = self._metavar_formatter(action, action.dest)(1) return metavar else: parts = [] if action.nargs == 0: parts.extend(action.option_strings) return ','.join(parts) else: default = action.dest args_string = self._format_args(action, default) for option_string in action.option_strings: parts.append(option_string) return ','.join(parts) + ' ' + args_string class CBDeprecatedAction(Action): """Indicates that a specific option is deprecated""" def __call__(self, parser, namespace, values, option_string=None): _deprecated('Specifying ' + '/'.join(self.option_strings) + ' is deprecated') if self.nargs == 0: setattr(namespace, self.dest, self.const) else: setattr(namespace, self.dest, values) class CBHostAction(Action): """Allows the handling of hostnames on the command line""" def __call__(self, parser, namespace, values, option_string=None): parsed = urllib.parse.urlparse(values) # If the netloc is empty then it means that there was no scheme added # to the URI and we are parsing it as a path. In this case no scheme # means HTTP so we can add that scheme to the hostname provided. if parsed.netloc == "": parsed = urllib.parse.urlparse("http://" + values) if parsed.scheme == "": parsed = urllib.parse.urlparse("http://" + values) if parsed.path != "" or parsed.params != "" or parsed.query != "" or parsed.fragment != "": raise ArgumentError(self, f"{values} is not an accepted hostname") if not parsed.hostname: raise ArgumentError(self, f"{values} is not an accepted hostname") hostname_regex = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*' + r'([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$') if not hostname_regex.match(parsed.hostname): try: ipaddress.ip_address(parsed.hostname) except ValueError as val_error: raise ArgumentError(self, f"{values} is not an accepted hostname") from val_error scheme = parsed.scheme port = None if scheme in ["http", "couchbase"]: if not parsed.port: port = 8091 if scheme == "couchbase": scheme = "http" elif scheme in ["https", "couchbases"]: if not parsed.port: port = 18091 if scheme == "couchbases": scheme = "https" else: raise ArgumentError(self, "%s is not an accepted scheme" % scheme) if parsed.port: setattr(namespace, self.dest, (scheme + "://" + parsed.netloc)) else: setattr(namespace, self.dest, (scheme + "://" + parsed.netloc + ":" + str(port))) class CBEnvAction(Action): """Allows the custom handling of environment variables for command line options""" def __init__(self, envvar, required=True, default=None, **kwargs): if not default and envvar and envvar in os.environ: default = os.environ[envvar] if required and default: required = False super(CBEnvAction, self).__init__(default=default, required=required, **kwargs) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) class CBNonEchoedAction(CBEnvAction): """Allows an argument to be specified by use of a non-echoed value passed through stdin, through an environment variable, or as a value to the argument""" def __init__(self, envvar, prompt_text="Enter password:", confirm_text=None, required=True, default=None, nargs='?', **kwargs): self.prompt_text = prompt_text self.confirm_text = confirm_text super(CBNonEchoedAction, self).__init__(envvar, required=required, default=default, nargs=nargs, **kwargs) def __call__(self, parser, namespace, values, option_string=None): if values is None: values = getpass.getpass(self.prompt_text) if self.confirm_text is not None: confirm = getpass.getpass(self.prompt_text) if values != confirm: raise ArgumentError(self, "Passwords entered do not match, please retry") super(CBNonEchoedAction, self).__call__(parser, namespace, values, option_string=None) class CBHelpAction(Action): """Allows the custom handling of the help command line argument""" # pylint: disable=redefined-builtin def __init__(self, option_strings, klass, dest=SUPPRESS, default=SUPPRESS, help=None): super(CBHelpAction, self).__init__(option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) # pylint: disable=redefined-builtin self.klass = klass def __call__(self, parser, namespace, values, option_string=None): if option_string == "-h": parser.print_help() else: CBHelpAction._show_man_page(self.klass.get_man_page_name()) parser.exit() @staticmethod def _show_man_page(page): if os.name == "nt": try: subprocess.call(["rundll32.exe", "url.dll,FileProtocolHandler", os.path.join(CB_MAN_PATH, page)]) except OSError as e: _exit_if_errors(["Unable to open man page using your browser, %s" % e]) else: try: subprocess.call(["man", os.path.join(CB_MAN_PATH, page)]) except OSError: _exit_if_errors(["Unable to open man page using the 'man' command, ensure it is on your path or" + "install a manual reader"]) class CliParser(ArgumentParser): def __init__(self, *args, **kwargs): super(CliParser, self).__init__(*args, **kwargs) def error(self, message): self.exit(2, f'ERROR: {message}\n') class Command(object): """A Couchbase CLI Command""" def __init__(self): self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False) def parse(self, args): """Parses the subcommand""" if len(args) == 0: self.short_help() return self.parser.parse_args(args) def short_help(self, code=0): """Prints the short help message and exits""" self.parser.print_help() self.parser.exit(code) def execute(self, opts): """Executes the subcommand""" raise NotImplementedError @staticmethod def get_man_page_name(): """Returns the man page name""" raise NotImplementedError @staticmethod def get_description(): """Returns the command description""" raise NotImplementedError class CouchbaseCLI(Command): """A Couchbase CLI command""" def __init__(self): super(CouchbaseCLI, self).__init__() self.parser.prog = "couchbase-cli" subparser = self.parser.add_subparsers(title="Commands", metavar="") for (name, klass) in find_subcommands(): if klass.is_hidden(): subcommand = subparser.add_parser(name) else: subcommand = subparser.add_parser(name, help=klass.get_description()) subcommand.set_defaults(klass=klass) group = self.parser.add_argument_group("Options") group.add_argument("-h", "--help", action=CBHelpAction, klass=self, help="Prints the short or long help message") group.add_argument("--version", help="Get couchbase-cli version") def parse(self, args): if len(sys.argv) == 1: self.parser.print_help() self.parser.exit(1) if args[1] == "--version": print(VERSION) sys.exit(0) if not args[1] in ["-h", "--help", "--version"] and args[1].startswith("-"): _exit_if_errors([f"Unknown subcommand: '{args[1]}'. The first argument has to be a subcommand like" f" 'bucket-list' or 'rebalance', please see couchbase-cli -h for the full list of commands" f" and options"]) l1_args = self.parser.parse_args(args[1:2]) l2_args = l1_args.klass().parse(args[2:]) setattr(l2_args, 'klass', l1_args.klass) return l2_args def execute(self, opts): opts.klass().execute(opts) @staticmethod def get_man_page_name(): """Returns the man page name""" return "couchbase-cli" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "A Couchbase cluster administration utility" class Subcommand(Command): """ A Couchbase CLI Subcommand: This is for subcommand that interact with a remote Couchbase Server over the REST API. """ def __init__(self, deprecate_username=False, deprecate_password=False, cluster_default=None): super(Subcommand, self).__init__() # Filled by the decorators self.rest = None self.enterprise = None self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False) group = self.parser.add_argument_group("Cluster options") group.add_argument("-c", "--cluster", dest="cluster", required=(cluster_default is None), metavar="<cluster>", action=CBHostAction, default=cluster_default, help="The hostname of the Couchbase cluster") if deprecate_username: group.add_argument("-u", "--username", dest="username", action=CBDeprecatedAction, help=SUPPRESS) else: group.add_argument("-u", "--username", dest="username", required=True, action=CBEnvAction, envvar='CB_REST_USERNAME', metavar="<username>", help="The username for the Couchbase cluster") if deprecate_password: group.add_argument("-p", "--password", dest="password", action=CBDeprecatedAction, help=SUPPRESS) else: group.add_argument("-p", "--password", dest="password", required=True, action=CBNonEchoedAction, envvar='CB_REST_PASSWORD', metavar="<password>", help="The password for the Couchbase cluster") group.add_argument("-o", "--output", dest="output", default="standard", metavar="<output>", choices=["json", "standard"], help="The output type (json or standard)") group.add_argument("-d", "--debug", dest="debug", action="store_true", help="Run the command with extra logging") group.add_argument("-s", "--ssl", dest="ssl", const=True, default=False, nargs=0, action=CBDeprecatedAction, help="Use ssl when connecting to Couchbase (Deprecated)") group.add_argument("--no-ssl-verify", dest="ssl_verify", action="store_false", default=True, help="Skips SSL verification of certificates against the CA") group.add_argument("--cacert", dest="cacert", default=True, help="Verifies the cluster identity with this certificate") group.add_argument("-h", "--help", action=CBHelpAction, klass=self, help="Prints the short or long help message") def execute(self, opts): # pylint: disable=useless-super-delegation super(Subcommand, self).execute(opts) @staticmethod def get_man_page_name(): return Command.get_man_page_name() @staticmethod def get_description(): return Command.get_description() @staticmethod def is_hidden(): """Whether or not the subcommand should be hidden from the help message""" return False class LocalSubcommand(Command): """ A Couchbase CLI Localcommand: This is for subcommands that interact with the local Couchbase Server via the filesystem or a local socket. """ def __init__(self): super(LocalSubcommand, self).__init__() self.parser = CliParser(formatter_class=CLIHelpFormatter, add_help=False, allow_abbrev=False) group = self.parser.add_argument_group(title="Local command options", description="This command has to be execute on the locally running" + " Couchbase Server.") group.add_argument("-h", "--help", action=CBHelpAction, klass=self, help="Prints the short or long help message") group.add_argument("--config-path", dest="config_path", metavar="<path>", default=CB_CFG_PATH, help=SUPPRESS) def execute(self, opts): # pylint: disable=useless-super-delegation super(LocalSubcommand, self).execute(opts) @staticmethod def get_man_page_name(): return Command.get_man_page_name() @staticmethod def get_description(): return Command.get_description() @staticmethod def is_hidden(): """Whether or not the subcommand should be hidden from the help message""" return False class ClusterInit(Subcommand): """The cluster initialization subcommand""" def __init__(self): super(ClusterInit, self).__init__(True, True, "http://127.0.0.1:8091") self.parser.prog = "couchbase-cli cluster-init" group = self.parser.add_argument_group("Cluster initialization options") group.add_argument("--cluster-username", dest="username", required=True, metavar="<username>", help="The cluster administrator username") group.add_argument("--cluster-password", dest="password", required=True, metavar="<password>", help="The cluster administrator password") group.add_argument("--cluster-port", dest="port", type=(int), metavar="<port>", help="The cluster administration console port") group.add_argument("--cluster-ramsize", dest="data_mem_quota", type=(int), metavar="<quota>", help="The data service memory quota in mebibytes") group.add_argument("--cluster-index-ramsize", dest="index_mem_quota", type=(int), metavar="<quota>", help="The index service memory quota in mebibytes") group.add_argument("--cluster-fts-ramsize", dest="fts_mem_quota", type=(int), metavar="<quota>", help="The full-text service memory quota in mebibytes") group.add_argument("--cluster-eventing-ramsize", dest="eventing_mem_quota", type=(int), metavar="<quota>", help="The Eventing service memory quota in mebibytes") group.add_argument("--cluster-analytics-ramsize", dest="cbas_mem_quota", type=(int), metavar="<quota>", help="The analytics service memory quota in mebibytes") group.add_argument("--cluster-name", dest="name", metavar="<name>", help="The cluster name") group.add_argument("--index-storage-setting", dest="index_storage_mode", choices=["default", "memopt"], metavar="<mode>", help="The index storage backend (Defaults to \"default)\"") group.add_argument("--services", dest="services", default="data", metavar="<service_list>", help="The services to run on this server") group.add_argument("--update-notifications", dest="notifications", metavar="<1|0>", choices=["0", "1"], default="1", help="Enables/disable software update notifications") @rest_initialiser(enterprise_check=False) def execute(self, opts): # We need to ensure that creating the REST username/password is the # last REST API that is called because once that API succeeds the # cluster is initialized and cluster-init cannot be run again. initialized, errors = self.rest.is_cluster_initialized() _exit_if_errors(errors) if initialized: _exit_if_errors(["Cluster is already initialized, use setting-cluster to change settings"]) if not self.enterprise and opts.index_storage_mode == 'memopt': _exit_if_errors(["memopt option for --index-storage-setting can only be configured on enterprise edition"]) services, errors = process_services(opts.services, self.enterprise) _exit_if_errors(errors) if 'kv' not in services.split(','): _exit_if_errors(["Cannot set up first cluster node without the data service"]) if opts.data_mem_quota or opts.index_mem_quota or opts.fts_mem_quota or opts.cbas_mem_quota \ or opts.eventing_mem_quota or opts.name is not None: _, errors = self.rest.set_pools_default(opts.data_mem_quota, opts.index_mem_quota, opts.fts_mem_quota, opts.cbas_mem_quota, opts.eventing_mem_quota, opts.name) _exit_if_errors(errors) # Set the index storage mode if not opts.index_storage_mode and 'index' in services.split(','): opts.index_storage_mode = "default" default = "plasma" if not self.enterprise: default = "forestdb" if opts.index_storage_mode: param = index_storage_mode_to_param(opts.index_storage_mode, default) _, errors = self.rest.set_index_settings(param, None, None, None, None, None, None, None) _exit_if_errors(errors) # Setup services _, errors = self.rest.setup_services(services) _exit_if_errors(errors) # Enable notifications if opts.notifications == "1": _, errors = self.rest.enable_notifications(True) else: _, errors = self.rest.enable_notifications(False) _exit_if_errors(errors) # Setup Administrator credentials and Admin Console port _, errors = self.rest.set_admin_credentials(opts.username, opts.password, opts.port) _exit_if_errors(errors) _success("Cluster initialized") @staticmethod def get_man_page_name(): return "couchbase-cli-cluster-init" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Initialize a Couchbase cluster" class BucketCompact(Subcommand): """The bucket compact subcommand""" def __init__(self): super(BucketCompact, self).__init__() self.parser.prog = "couchbase-cli bucket-compact" group = self.parser.add_argument_group("Bucket compaction options") group.add_argument("--bucket", dest="bucket_name", metavar="<name>", help="The name of bucket to compact") group.add_argument("--data-only", dest="data_only", action="store_true", help="Only compact the data files") group.add_argument("--view-only", dest="view_only", action="store_true", help="Only compact the view files") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): bucket, errors = self.rest.get_bucket(opts.bucket_name) _exit_if_errors(errors) if bucket["bucketType"] != BUCKET_TYPE_COUCHBASE: _exit_if_errors(["Cannot compact memcached buckets"]) _, errors = self.rest.compact_bucket(opts.bucket_name, opts.data_only, opts.view_only) _exit_if_errors(errors) _success("Bucket compaction started") @staticmethod def get_man_page_name(): return "couchbase-cli-bucket-compact" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Compact database and view data" class BucketCreate(Subcommand): """The bucket create subcommand""" def __init__(self): super(BucketCreate, self).__init__() self.parser.prog = "couchbase-cli bucket-create" group = self.parser.add_argument_group("Bucket create options") group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True, help="The name of bucket to create") group.add_argument("--bucket-type", dest="type", metavar="<type>", required=True, choices=["couchbase", "ephemeral", "memcached"], help="The bucket type (couchbase, ephemeral, or memcached)") group.add_argument("--storage-backend", dest="storage", metavar="<storage>", choices=["couchstore", "magma"], help="Type of storage backend (only for couchbase buckets)") group.add_argument("--bucket-ramsize", dest="memory_quota", metavar="<quota>", type=(int), required=True, help="The amount of memory to allocate the bucket") group.add_argument("--bucket-replica", dest="replica_count", metavar="<num>", choices=["0", "1", "2", "3"], help="The replica count for the bucket") group.add_argument("--bucket-priority", dest="priority", metavar="<priority>", choices=[BUCKET_PRIORITY_LOW_STR, BUCKET_PRIORITY_HIGH_STR], help="The bucket disk io priority (low or high)") group.add_argument("--durability-min-level", dest="durability_min_level", metavar="<level>", choices=["none", "majority", "majorityAndPersistActive", "persistToMajority"], help="The bucket durability minimum level") group.add_argument("--bucket-eviction-policy", dest="eviction_policy", metavar="<policy>", choices=["valueOnly", "fullEviction", "noEviction", "nruEviction"], help="The bucket eviction policy") group.add_argument("--conflict-resolution", dest="conflict_resolution", default=None, choices=["sequence", "timestamp"], metavar="<type>", help="The XDCR conflict resolution type (timestamp or sequence)") group.add_argument("--max-ttl", dest="max_ttl", default=None, type=(int), metavar="<seconds>", help="Set the maximum TTL the bucket will accept. Couchbase server Enterprise Edition only.") group.add_argument("--compression-mode", dest="compression_mode", choices=["off", "passive", "active"], metavar="<mode>", help="Set the compression mode of the bucket") group.add_argument("--enable-flush", dest="enable_flush", metavar="<0|1>", choices=["0", "1"], help="Enable bucket flush on this bucket (0 or 1)") group.add_argument("--enable-index-replica", dest="replica_indexes", metavar="<0|1>", choices=["0", "1"], help="Enable replica indexes (0 or 1)") group.add_argument("--wait", dest="wait", action="store_true", help="Wait for bucket creation to complete") group.add_argument("--database-fragmentation-threshold-percentage", dest="db_frag_perc", metavar="<perc>", type=(int), help="Set Database Fragmentation level percent") group.add_argument("--database-fragmentation-threshold-size", dest="db_frag_size", metavar="<mebibytes>", type=(int), help="Set Database Fragmentation level") group.add_argument("--view-fragmentation-threshold-percentage", dest="view_frag_perc", metavar="<perc>", type=(int), help="Set View Fragmentation level percent") group.add_argument("--view-fragmentation-threshold-size", dest="view_frag_size", metavar="<mebibytes>", type=(int), help="Set View Fragmentation level size") group.add_argument("--from-hour", dest="from_hour", metavar="<quota>", type=(int), help="Set start time hour") group.add_argument("--from-minute", dest="from_min", metavar="<quota>", type=(int), help="Set start time minutes") group.add_argument("--to-hour", dest="to_hour", metavar="<quota>", type=(int), help="Set end time hour") group.add_argument("--to-minute", dest="to_min", metavar="<quota>", type=(int), help="Set end time minutes") group.add_argument("--abort-outside", dest="abort_outside", metavar="<0|1>", choices=["0", "1"], help="Allow Time period") group.add_argument("--parallel-db-view-compaction", dest="paralleldb_and_view_compact", metavar="<0|1>", choices=["0", "1"], help="Set parallel DB and View Compaction") group.add_argument("--purge-interval", dest="purge_interval", type=(float), metavar="<float>", help="Sets the frequency of the tombstone purge interval") @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if opts.max_ttl and not self.enterprise: _exit_if_errors(["Maximum TTL can only be configured on enterprise edition"]) if opts.compression_mode and not self.enterprise: _exit_if_errors(["Compression mode can only be configured on enterprise edition"]) if opts.type == "memcached": _deprecated("Memcached buckets are deprecated, please use ephemeral buckets instead") if opts.replica_count is not None: _exit_if_errors(["--bucket-replica cannot be specified for a memcached bucket"]) if opts.conflict_resolution is not None: _exit_if_errors(["--conflict-resolution cannot be specified for a memcached bucket"]) if opts.replica_indexes is not None: _exit_if_errors(["--enable-index-replica cannot be specified for a memcached bucket"]) if opts.priority is not None: _exit_if_errors(["--bucket-priority cannot be specified for a memcached bucket"]) if opts.eviction_policy is not None: _exit_if_errors(["--bucket-eviction-policy cannot be specified for a memcached bucket"]) if opts.max_ttl is not None: _exit_if_errors(["--max-ttl cannot be specified for a memcached bucket"]) if opts.compression_mode is not None: _exit_if_errors(["--compression-mode cannot be specified for a memcached bucket"]) if opts.durability_min_level is not None: _exit_if_errors(["--durability-min-level cannot be specified for a memcached bucket"]) elif opts.type == "ephemeral" and opts.eviction_policy in ["valueOnly", "fullEviction"]: _exit_if_errors(["--bucket-eviction-policy must either be noEviction or nruEviction"]) elif opts.type == "couchbase" and opts.eviction_policy in ["noEviction", "nruEviction"]: _exit_if_errors(["--bucket-eviction-policy must either be valueOnly or fullEviction"]) if ((opts.type == "memcached" or opts.type == "ephemeral") and (opts.db_frag_perc is not None or opts.db_frag_size is not None or opts.view_frag_perc is not None or opts.view_frag_size is not None or opts.from_hour is not None or opts.from_min is not None or opts.to_hour is not None or opts.to_min is not None or opts.abort_outside is not None or opts.paralleldb_and_view_compact is not None)): _warning(f'ignoring compaction settings as bucket type {opts.type} does not accept it') storage_type = "couchstore" if opts.storage is not None: if opts.type != "couchbase": _exit_if_errors(["--storage-backend is only valid for couchbase buckets"]) if opts.storage == "magma": storage_type = "magma" priority = None if opts.priority is not None: if opts.priority == BUCKET_PRIORITY_HIGH_STR: priority = BUCKET_PRIORITY_HIGH_INT elif opts.priority == BUCKET_PRIORITY_LOW_STR: priority = BUCKET_PRIORITY_LOW_INT conflict_resolution_type = None if opts.conflict_resolution is not None: if opts.conflict_resolution == "sequence": conflict_resolution_type = "seqno" elif opts.conflict_resolution == "timestamp": conflict_resolution_type = "lww" _, errors = self.rest.create_bucket(opts.bucket_name, opts.type, storage_type, opts.memory_quota, opts.durability_min_level, opts.eviction_policy, opts.replica_count, opts.replica_indexes, priority, conflict_resolution_type, opts.enable_flush, opts.max_ttl, opts.compression_mode, opts.wait, opts.db_frag_perc, opts.db_frag_size, opts.view_frag_perc, opts.view_frag_size, opts.from_hour, opts.from_min, opts.to_hour, opts.to_min, opts.abort_outside, opts.paralleldb_and_view_compact, opts.purge_interval) _exit_if_errors(errors) _success("Bucket created") @staticmethod def get_man_page_name(): return "couchbase-cli-bucket-create" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Add a new bucket to the cluster" class BucketDelete(Subcommand): """The bucket delete subcommand""" def __init__(self): super(BucketDelete, self).__init__() self.parser.prog = "couchbase-cli bucket-delete" group = self.parser.add_argument_group("Bucket delete options") group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True, help="The name of bucket to delete") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): _, errors = self.rest.get_bucket(opts.bucket_name) _exit_if_errors(errors) _, errors = self.rest.delete_bucket(opts.bucket_name) _exit_if_errors(errors) _success("Bucket deleted") @staticmethod def get_man_page_name(): return "couchbase-cli-bucket-delete" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Delete an existing bucket" class BucketEdit(Subcommand): """The bucket edit subcommand""" def __init__(self): super(BucketEdit, self).__init__() self.parser.prog = "couchbase-cli bucket-edit" group = self.parser.add_argument_group("Bucket edit options") group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True, help="The name of bucket to create") group.add_argument("--bucket-ramsize", dest="memory_quota", metavar="<quota>", type=(int), help="The amount of memory to allocate the bucket") group.add_argument("--bucket-replica", dest="replica_count", metavar="<num>", choices=["0", "1", "2", "3"], help="The replica count for the bucket") group.add_argument("--bucket-priority", dest="priority", metavar="<priority>", choices=["low", "high"], help="The bucket disk io priority (low or high)") group.add_argument("--durability-min-level", dest="durability_min_level", metavar="<level>", choices=["none", "majority", "majorityAndPersistActive", "persistToMajority"], help="The bucket durability minimum level") group.add_argument("--bucket-eviction-policy", dest="eviction_policy", metavar="<policy>", type=(str), help="The bucket eviction policy (valueOnly or fullEviction)") group.add_argument("--max-ttl", dest="max_ttl", default=None, type=(int), metavar="<seconds>", help="Set the maximum TTL the bucket will accept") group.add_argument("--compression-mode", dest="compression_mode", choices=["off", "passive", "active"], metavar="<mode>", help="Set the compression mode of the bucket") group.add_argument("--enable-flush", dest="enable_flush", metavar="<0|1>", choices=["0", "1"], help="Enable bucket flush on this bucket (0 or 1)") group.add_argument("--remove-bucket-port", dest="remove_port", metavar="<0|1>", choices=["0", "1"], help="Removes the bucket-port setting") group.add_argument("--database-fragmentation-threshold-percentage", dest="db_frag_perc", metavar="<perc>", type=(int), help="Set Database Fragmentation level percent") group.add_argument("--database-fragmentation-threshold-size", dest="db_frag_size", metavar="<mebibytes>", type=(int), help="Set Database Fragmentation level") group.add_argument("--view-fragmentation-threshold-percentage", dest="view_frag_perc", metavar="<perc>", type=(int), help="Set View Fragmentation level percent") group.add_argument("--view-fragmentation-threshold-size", dest="view_frag_size", metavar="<mebibytes>", type=(int), help="Set View Fragmentation level size") group.add_argument("--from-hour", dest="from_hour", metavar="<hour>", type=(int), help="Set start time hour") group.add_argument("--from-minute", dest="from_min", metavar="<min>", type=(int), help="Set start time minutes") group.add_argument("--to-hour", dest="to_hour", metavar="<hour>", type=(int), help="Set end time hour") group.add_argument("--to-minute", dest="to_min", metavar="<min>", type=(int), help="Set end time minutes") group.add_argument("--abort-outside", dest="abort_outside", metavar="<0|1>", choices=["0", "1"], help="Allow Time period") group.add_argument("--parallel-db-view-compaction", dest="paralleldb_and_view_compact", metavar="<0|1>", choices=["0", "1"], help="Set parallel DB and View Compaction") group.add_argument("--purge-interval", dest="purge_interval", type=(float), metavar="<num>", help="Set the bucket metadata purge interval") @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if opts.max_ttl and not self.enterprise: _exit_if_errors(["Maximum TTL can only be configured on enterprise edition"]) if opts.compression_mode and not self.enterprise: _exit_if_errors(["Compression mode can only be configured on enterprise edition"]) # Note that we accept 'noEviction' and 'nruEviction' as valid values even though they are undocumented; this is # so that users attempting to modify the eviction policy of an ephemeral bucket will receive a meaningful # message from 'ns_server'. See MB-39036 for more information. if (opts.eviction_policy is not None and opts.eviction_policy not in ["valueOnly", "fullEviction", "noEviction", "nruEviction"]): _exit_if_errors([f"argument --bucket-eviction-policy: invalid choice: '{opts.eviction_policy}'"+ " (choose from 'valueOnly', 'fullEviction')"]) bucket, errors = self.rest.get_bucket(opts.bucket_name) _exit_if_errors(errors) if "bucketType" in bucket and bucket["bucketType"] == "memcached": _deprecated("Memcached buckets are deprecated, please use ephemeral buckets instead") if opts.memory_quota is not None: _exit_if_errors(["--bucket-ramsize cannot be specified for a memcached bucket"]) if opts.replica_count is not None: _exit_if_errors(["--bucket-replica cannot be specified for a memcached bucket"]) if opts.priority is not None: _exit_if_errors(["--bucket-priority cannot be specified for a memcached bucket"]) if opts.eviction_policy is not None: _exit_if_errors(["--bucket-eviction-policy cannot be specified for a memcached bucket"]) if opts.max_ttl is not None: _exit_if_errors(["--max-ttl cannot be specified for a memcached bucket"]) if opts.compression_mode is not None: _exit_if_errors(["--compression-mode cannot be specified for a memcached bucket"]) if opts.durability_min_level is not None: _exit_if_errors(["--durability-min-level cannot be specified for a memcached bucket"]) if (("bucketType" in bucket and (bucket["bucketType"] == "memcached" or bucket["bucketType"] == "ephemeral")) and (opts.db_frag_perc is not None or opts.db_frag_size is not None or opts.view_frag_perc is not None or opts.view_frag_size is not None or opts.from_hour is not None or opts.from_min is not None or opts.to_hour is not None or opts.to_min is not None or opts.abort_outside is not None or opts.paralleldb_and_view_compact is not None)): _exit_if_errors([f'compaction settings can not be specified for a {bucket["bucketType"]} bucket']) priority = None if opts.priority is not None: if opts.priority == BUCKET_PRIORITY_HIGH_STR: priority = BUCKET_PRIORITY_HIGH_INT elif opts.priority == BUCKET_PRIORITY_LOW_STR: priority = BUCKET_PRIORITY_LOW_INT if opts.remove_port: if opts.remove_port == '1': opts.remove_port = True else: opts.remove_port = False _, errors = self.rest.edit_bucket(opts.bucket_name, opts.memory_quota, opts.durability_min_level, opts.eviction_policy, opts.replica_count, priority, opts.enable_flush, opts.max_ttl, opts.compression_mode, opts.remove_port, opts.db_frag_perc, opts.db_frag_size, opts.view_frag_perc, opts.view_frag_size, opts.from_hour, opts.from_min, opts.to_hour, opts.to_min, opts.abort_outside, opts.paralleldb_and_view_compact, opts.purge_interval, 'bucketType' in bucket and bucket['bucketType'] == 'membase') _exit_if_errors(errors) _success("Bucket edited") @staticmethod def get_man_page_name(): return "couchbase-cli-bucket-edit" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify settings for an existing bucket" class BucketFlush(Subcommand): """The bucket edit subcommand""" def __init__(self): super(BucketFlush, self).__init__() self.parser.prog = "couchbase-cli bucket-flush" group = self.parser.add_argument_group("Bucket flush options") group.add_argument("--bucket", dest="bucket_name", metavar="<name>", required=True, help="The name of bucket to delete") group.add_argument("--force", dest="force", action="store_true", help="Execute the command without asking to confirm") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): _, errors = self.rest.get_bucket(opts.bucket_name) _exit_if_errors(errors) if not opts.force: question = "Running this command will totally PURGE database data from disk. " + \ "Do you really want to do it? (Yes/No)" confirm = input(question) if confirm not in ('y', 'Y', 'yes', 'Yes'): return _, errors = self.rest.flush_bucket(opts.bucket_name) _exit_if_errors(errors) _success("Bucket flushed") @staticmethod def get_man_page_name(): return "couchbase-cli-bucket-flush" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Flush all data from disk for a given bucket" class BucketList(Subcommand): """The bucket list subcommand""" def __init__(self): super(BucketList, self).__init__() self.parser.prog = "couchbase-cli bucket-list" @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): result, errors = self.rest.list_buckets(extended=True) _exit_if_errors(errors) if opts.output == 'json': print(json.dumps(result)) else: for bucket in result: print(f'{bucket["name"]}') print(f' bucketType: {bucket["bucketType"]}') print(f' numReplicas: {bucket["replicaNumber"]}') print(f' ramQuota: {bucket["quota"]["ram"]}') print(f' ramUsed: {bucket["basicStats"]["memUsed"]}') @staticmethod def get_man_page_name(): return "couchbase-cli-bucket-list" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "List all buckets in a cluster" class CollectLogsStart(Subcommand): """The collect-logs-start subcommand""" def __init__(self): super(CollectLogsStart, self).__init__() self.parser.prog = "couchbase-cli collect-logs-start" group = self.parser.add_argument_group("Collect logs start options") group.add_argument("--all-nodes", dest="all_nodes", action="store_true", default=False, help="Collect logs for all nodes") group.add_argument("--nodes", dest="nodes", metavar="<node_list>", help="A comma separated list of nodes to collect logs from") group.add_argument("--redaction-level", dest="redaction_level", metavar="<none|partial>", choices=["none", "partial"], help="Level of log redaction to apply") group.add_argument("--salt", dest="salt", metavar="<string>", help="The salt to use to redact the log") group.add_argument("--output-directory", dest="output_dir", metavar="<directory>", help="Output directory to place the generated logs file") group.add_argument("--temporary-directory", dest="tmp_dir", metavar="<directory>", help="Temporary directory to use when generating the logs") group.add_argument("--upload", dest="upload", action="store_true", default=False, help="Logs should be uploaded for Couchbase support") group.add_argument("--upload-host", dest="upload_host", metavar="<host>", help="The host to upload logs to") group.add_argument("--upload-proxy", dest="upload_proxy", metavar="<proxy>", help="The proxy to used to upload the logs via") group.add_argument("--customer", dest="upload_customer", metavar="<name>", help="The name of the customer uploading logs") group.add_argument("--ticket", dest="upload_ticket", metavar="<num>", help="The ticket number the logs correspond to") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if not opts.nodes and not opts.all_nodes: _exit_if_errors(["Must specify either --all-nodes or --nodes"]) if opts.nodes and opts.all_nodes: _exit_if_errors(["Cannot specify both --all-nodes and --nodes"]) if opts.salt and opts.redaction_level != "partial": _exit_if_errors(["--redaction-level has to be set to 'partial' when --salt is specified"]) servers = opts.nodes if opts.all_nodes: servers = "*" if opts.upload: if not opts.upload_host: _exit_if_errors(["--upload-host is required when --upload is specified"]) if not opts.upload_customer: _exit_if_errors(["--upload-customer is required when --upload is specified"]) else: if opts.upload_host: _warning("--upload-host has no effect with specifying --upload") if opts.upload_customer: _warning("--upload-customer has no effect with specifying --upload") if opts.upload_ticket: _warning("--upload_ticket has no effect with specifying --upload") if opts.upload_proxy: _warning("--upload_proxy has no effect with specifying --upload") _, errors = self.rest.collect_logs_start(servers, opts.redaction_level, opts.salt, opts.output_dir, opts.tmp_dir, opts.upload, opts.upload_host, opts.upload_proxy, opts.upload_customer, opts.upload_ticket) _exit_if_errors(errors) _success("Log collection started") @staticmethod def get_man_page_name(): return "couchbase-cli-collect-logs-start" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Start cluster log collection" class CollectLogsStatus(Subcommand): """The collect-logs-status subcommand""" def __init__(self): super(CollectLogsStatus, self).__init__() self.parser.prog = "couchbase-cli collect-logs-status" @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): tasks, errors = self.rest.get_tasks() _exit_if_errors(errors) found = False for task in tasks: if isinstance(task, dict) and 'type' in task and task['type'] == 'clusterLogsCollection': found = True self._print_task(task) if not found: print("No log collection tasks were found") def _print_task(self, task): print(f'Status: {task["status"]}') if 'perNode' in task: print("Details:") for node, node_status in task["perNode"].items(): print('\tNode:', node) print('\tStatus:', node_status['status']) for field in ["path", "statusCode", "url", "uploadStatusCode", "uploadOutput"]: if field in node_status: print('\t', field, ":", node_status[field]) print() @staticmethod def get_man_page_name(): return "couchbase-cli-collect-logs-status" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "View the status of cluster log collection" class CollectLogsStop(Subcommand): """The collect-logs-stop subcommand""" def __init__(self): super(CollectLogsStop, self).__init__() self.parser.prog = "couchbase-cli collect-logs-stop" @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): _, errors = self.rest.collect_logs_stop() _exit_if_errors(errors) _success("Log collection stopped") @staticmethod def get_man_page_name(): return "couchbase-cli-collect-logs-stop" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Stop cluster log collection" class Failover(Subcommand): """The failover subcommand""" def __init__(self): super(Failover, self).__init__() self.parser.prog = "couchbase-cli failover" group = self.parser.add_argument_group("Failover options") group.add_argument("--server-failover", dest="servers_to_failover", metavar="<server_list>", required=True, help="A list of servers to fail over") group.add_argument("--hard", dest="hard", action="store_true", help="Hard failover the server") group.add_argument("--force", dest="force", action="store_true", help="Force a hard failover") group.add_argument("--no-progress-bar", dest="no_bar", action="store_true", default=False, help="Disables the progress bar") group.add_argument("--no-wait", dest="wait", action="store_false", default=True, help="Don't wait for rebalance completion") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.force and not opts.hard: _exit_if_errors(["--hard is required with --force flag"]) opts.servers_to_failover = apply_default_port(opts.servers_to_failover) _, errors = self.rest.failover(opts.servers_to_failover, opts.hard, opts.force) _exit_if_errors(errors) if not opts.hard: time.sleep(1) if opts.wait: bar = TopologyProgressBar(self.rest, 'Gracefully failing over', opts.no_bar) errors = bar.show() _exit_if_errors(errors) _success("Server failed over") else: _success("Server failed over started") else: _success("Server failed over") @staticmethod def get_man_page_name(): return "couchbase-cli-failover" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Failover one or more servers" class GroupManage(Subcommand): """The group manage subcommand""" def __init__(self): super(GroupManage, self).__init__() self.parser.prog = "couchbase-cli group-manage" group = self.parser.add_argument_group("Group manage options") group.add_argument("--create", dest="create", action="store_true", default=None, help="Create a new server group") group.add_argument("--delete", dest="delete", action="store_true", default=None, help="Delete a server group") group.add_argument("--list", dest="list", action="store_true", default=None, help="List all server groups") group.add_argument("--rename", dest="rename", help="Rename a server group. It takes the new name of the group.") group.add_argument("--group-name", dest="name", metavar="<name>", help="The name of the server group") group.add_argument("--move-servers", dest="move_servers", metavar="<server_list>", help="A list of servers to move between groups") group.add_argument("--from-group", dest="from_group", metavar="<group>", help="The group to move servers from") group.add_argument("--to-group", dest="to_group", metavar="<group>", help="The group to move servers to") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): cmds = [opts.create, opts.delete, opts.list, opts.rename, opts.move_servers] if sum(cmd is not None for cmd in cmds) == 0: _exit_if_errors(["Must specify one of the following: --create, " + "--delete, --list, --move-servers, or --rename"]) elif sum(cmd is not None for cmd in cmds) != 1: _exit_if_errors(["Only one of the following may be specified: --create" + ", --delete, --list, --move-servers, or --rename"]) if opts.create: self._create(opts) elif opts.delete: self._delete(opts) elif opts.list: self._list(opts) elif opts.rename: self._rename(opts) elif opts.move_servers is not None: self._move(opts) def _create(self, opts): if opts.name is None: _exit_if_errors(["--group-name is required with --create flag"]) _, errors = self.rest.create_server_group(opts.name) _exit_if_errors(errors) _success("Server group created") def _delete(self, opts): if opts.name is None: _exit_if_errors(["--group-name is required with --delete flag"]) _, errors = self.rest.delete_server_group(opts.name) _exit_if_errors(errors) _success("Server group deleted") def _list(self, opts): groups, errors = self.rest.get_server_groups() _exit_if_errors(errors) found = False for group in groups["groups"]: if opts.name is None or opts.name == group['name']: found = True print(group['name']) for node in group['nodes']: print(f' server: {node["hostname"]}') if not found and opts.name: _exit_if_errors([f'Invalid group name: {opts.name}']) def _move(self, opts): if opts.from_group is None: _exit_if_errors(["--from-group is required with --move-servers"]) if opts.to_group is None: _exit_if_errors(["--to-group is required with --move-servers"]) servers = apply_default_port(opts.move_servers) _, errors = self.rest.move_servers_between_groups(servers, opts.from_group, opts.to_group) _exit_if_errors(errors) _success("Servers moved between groups") def _rename(self, opts): if opts.name is None: _exit_if_errors(["--group-name is required with --rename option"]) _, errors = self.rest.rename_server_group(opts.name, opts.rename) _exit_if_errors(errors) _success("Server group renamed") @staticmethod def get_man_page_name(): return "couchbase-cli-group-manage" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Manage server groups" class HostList(Subcommand): """The host list subcommand""" def __init__(self): super(HostList, self).__init__() self.parser.prog = "couchbase-cli host-list" @rest_initialiser(version_check=True) def execute(self, opts): result, errors = self.rest.pools('default') _exit_if_errors(errors) if opts.output == 'json': nodes_out = {'nodes': []} for node in result['nodes']: nodes_out['nodes'].append(node['configuredHostname']) print(json.dumps(nodes_out)) else: for node in result['nodes']: print(node['configuredHostname']) @staticmethod def get_man_page_name(): return "couchbase-cli-host-list" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "List all hosts in a cluster" class ResetCipherSuites(LocalSubcommand): """The reset cipher suites subcommand """ def __init__(self): super(ResetCipherSuites, self).__init__() self.parser.prog = "couchbase-cli reset-cipher-suites" group = self.parser.add_argument_group("Reset Cipher Suites") group.add_argument("--force", action='store_true', default=False, help="Force resetting of the cipher suites") group.add_argument("-P", "--port", metavar="<port>", default="8091", help="The REST API port, defaults to 8091") def execute(self, opts): token = _exit_on_file_read_failure(os.path.join(opts.config_path, "localtoken")).rstrip() rest = ClusterManager("http://127.0.0.1:" + opts.port, "@localtoken", token) check_cluster_initialized(rest) check_versions(rest) if not opts.force: confirm = str(input("Are you sure that the cipher should be reset?: Y/[N]")) if confirm != "Y": _success("Cipher suites have not been reset to default") _, errors = rest.reset_cipher_suites() _exit_if_errors(errors) _success("Cipher suites have been reset to the default") @staticmethod def get_man_page_name(): return "couchbase-cli-reset-cipher-suites" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Rests cipher suites to the default" class MasterPassword(LocalSubcommand): """The master password subcommand""" def __init__(self): super(MasterPassword, self).__init__() self.parser.prog = "couchbase-cli master-password" group = self.parser.add_argument_group("Master password options") group.add_argument("--send-password", dest="send_password", metavar="<password>", required=False, action=CBNonEchoedAction, envvar=None, prompt_text="Enter master password:", help="Sends the master password to start the server") def execute(self, opts): if opts.send_password is not None: path = [CB_BIN_PATH, os.environ['PATH']] if os.name == 'posix': os.environ['PATH'] = ':'.join(path) else: os.environ['PATH'] = ';'.join(path) cookiefile = os.path.join(opts.config_path, "couchbase-server.babysitter.cookie") if not os.path.isfile(cookiefile): _exit_if_errors(["The node is down"]) cookie = _exit_on_file_read_failure(cookiefile, "Insufficient privileges to send master password - Please" " execute this command as a operating system user who has" " file system read permission on the Couchbase Server " " configuration").rstrip() nodefile = os.path.join(opts.config_path, "couchbase-server.babysitter.node") node = _exit_on_file_read_failure(nodefile).rstrip() self.prompt_for_master_pwd(node, cookie, opts.send_password, opts.config_path) else: _exit_if_errors(["No parameters set"]) def prompt_for_master_pwd(self, node, cookie, password, cb_cfg_path): ns_server_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_server", "ebin") babystr_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_babysitter", "ebin") inetrc_file = os.path.join(CB_ETC_PATH, "hosts.cfg") dist_cfg_file = os.path.join(cb_cfg_path, "config", "dist_cfg") if password == '': password = getpass.getpass("\nEnter master password:") name = '[email protected]' args = ['-pa', ns_server_ebin_path, babystr_ebin_path, '-noinput', '-name', name, '-proto_dist', 'cb', '-epmd_module', 'cb_epmd', '-kernel', 'inetrc', f'"{inetrc_file}"', 'dist_config_file', f'"{dist_cfg_file}"', '-setcookie', cookie, '-run', 'encryption_service', 'remote_set_password', node, password] rc, out, err = self.run_process("erl", args) if rc == 0: print("SUCCESS: Password accepted. Node started booting.") elif rc == 101: print("Incorrect password.") self.prompt_for_master_pwd(node, cookie, '', cb_cfg_path) elif rc == 102: _exit_if_errors(["Password was already supplied"]) elif rc == 103: _exit_if_errors(["The node is down"]) elif rc == 104: _exit_if_errors(["Incorrect password. Node shuts down."]) else: _exit_if_errors([f'Unknown error: {rc} {out}, {err}']) def run_process(self, name, args): try: if os.name == "nt": name = name + ".exe" args.insert(0, name) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = p.stdout.read() error = p.stderr.read() p.wait() rc = p.returncode return rc, output, error except OSError: _exit_if_errors([f'Could not locate the {name} executable']) @staticmethod def get_man_page_name(): return "couchbase-cli-master-password" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Unlocking the master password" class NodeInit(Subcommand): """The node initialization subcommand""" def __init__(self): super(NodeInit, self).__init__() self.parser.prog = "couchbase-cli node-init" group = self.parser.add_argument_group("Node initialization options") group.add_argument("--node-init-data-path", dest="data_path", metavar="<path>", help="The path to store database files") group.add_argument("--node-init-index-path", dest="index_path", metavar="<path>", help="The path to store index files") group.add_argument("--node-init-analytics-path", dest="analytics_path", metavar="<path>", action="append", help="The path to store analytics files (supply one parameter for each path desired)") group.add_argument("--node-init-eventing-path", dest="eventing_path", metavar="<path>", help="The path to store eventing files") group.add_argument("--node-init-java-home", dest="java_home", metavar="<path>", help="The path of the Java Runtime Environment (JRE) to use on this server") group.add_argument("--node-init-hostname", dest="hostname", metavar="<hostname>", help="Sets the hostname for this server") group.add_argument("--ipv6", dest="ipv6", action="store_true", default=False, help="Configure the node to communicate via ipv6") group.add_argument("--ipv4", dest="ipv4", action="store_true", default=False, help="Configure the node to communicate via ipv4") @rest_initialiser() def execute(self, opts): # Cluster does not need to be initialized for this command if (opts.data_path is None and opts.index_path is None and opts.analytics_path is None and opts.eventing_path is None and opts.java_home is None and opts.hostname is None and opts.ipv6 is None and opts.ipv4 is None): _exit_if_errors(["No node initialization parameters specified"]) if opts.ipv4 and opts.ipv6: _exit_if_errors(["Use either --ipv4 or --ipv6"]) if opts.ipv4: afamily = 'ipv4' elif opts.ipv6: afamily = 'ipv6' else: afamily = None _, errors = self.rest.node_init(hostname=opts.hostname, afamily=afamily, data_path=opts.data_path, index_path=opts.index_path, cbas_path=opts.analytics_path, eventing_path=opts.eventing_path, java_home=opts.java_home) _exit_if_errors(errors) _success("Node initialized") @staticmethod def get_man_page_name(): return "couchbase-cli-node-init" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Set node specific settings" class Rebalance(Subcommand): """The rebalance subcommand""" def __init__(self): super(Rebalance, self).__init__() self.parser.prog = "couchbase-cli rebalance" group = self.parser.add_argument_group("Rebalance options") group.add_argument("--server-remove", dest="server_remove", metavar="<server_list>", help="A list of servers to remove from the cluster") group.add_argument("--no-progress-bar", dest="no_bar", action="store_true", default=False, help="Disables the progress bar") group.add_argument("--no-wait", dest="wait", action="store_false", default=True, help="Don't wait for rebalance completion") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): eject_nodes = [] if opts.server_remove: eject_nodes = apply_default_port(opts.server_remove) _, errors = self.rest.rebalance(eject_nodes) _exit_if_errors(errors) time.sleep(1) if opts.wait: bar = TopologyProgressBar(self.rest, 'Rebalancing', opts.no_bar) errors = bar.show() _exit_if_errors(errors) _success("Rebalance complete") else: _success("Rebalance started") @staticmethod def get_man_page_name(): return "couchbase-cli-rebalance" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Start a cluster rebalancing" class RebalanceStatus(Subcommand): """The rebalance status subcommand""" def __init__(self): super(RebalanceStatus, self).__init__() self.parser.prog = "couchbase-cli rebalance-status" @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): status, errors = self.rest.rebalance_status() _exit_if_errors(errors) print(json.dumps(status, indent=2)) @staticmethod def get_man_page_name(): return "couchbase-cli-rebalance-status" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Show rebalance status" class RebalanceStop(Subcommand): """The rebalance stop subcommand""" def __init__(self): super(RebalanceStop, self).__init__() self.parser.prog = "couchbase-cli rebalance-stop" @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): _, errors = self.rest.stop_rebalance() _exit_if_errors(errors) _success("Rebalance stopped") @staticmethod def get_man_page_name(): return "couchbase-cli-rebalance-stop" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Stop a rebalance" class Recovery(Subcommand): """The recovery command""" def __init__(self): super(Recovery, self).__init__() self.parser.prog = "couchbase-cli recovery" group = self.parser.add_argument_group("Recovery options") group.add_argument("--server-recovery", dest="servers", metavar="<server_list>", required=True, help="The list of servers to recover") group.add_argument("--recovery-type", dest="recovery_type", metavar="type", choices=["delta", "full"], default="delta", help="The recovery type (delta or full)") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): servers = apply_default_port(opts.servers) for server in servers: _, errors = self.rest.recovery(server, opts.recovery_type) _exit_if_errors(errors) _success("Servers recovered") @staticmethod def get_man_page_name(): return "couchbase-cli-recovery" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Recover one or more servers" class ResetAdminPassword(LocalSubcommand): """The reset admin password command""" def __init__(self): super(ResetAdminPassword, self).__init__() self.parser.prog = "couchbase-cli reset-admin-password" group = self.parser.add_argument_group("Reset password options") group.add_argument("--new-password", dest="new_password", metavar="<password>", required=False, action=CBNonEchoedAction, envvar=None, prompt_text="Enter new administrator password:", confirm_text="Confirm new administrator password:", help="The new administrator password") group.add_argument("--regenerate", dest="regenerate", action="store_true", help="Generates a random administrator password") group.add_argument("-P", "--port", metavar="<port>", default="8091", help="The REST API port, defaults to 8091") def execute(self, opts): token = _exit_on_file_read_failure(os.path.join(opts.config_path, "localtoken")).rstrip() rest = ClusterManager("http://127.0.0.1:" + opts.port, "@localtoken", token) check_cluster_initialized(rest) check_versions(rest) if opts.new_password is not None and opts.regenerate: _exit_if_errors(["Cannot specify both --new-password and --regenerate at the same time"]) elif opts.new_password is not None: _, errors = rest.set_admin_password(opts.new_password) _exit_if_errors(errors) _success("Administrator password changed") elif opts.regenerate: result, errors = rest.regenerate_admin_password() _exit_if_errors(errors) print(result["password"]) else: _exit_if_errors(["No parameters specified"]) @staticmethod def get_man_page_name(): return "couchbase-cli-reset-admin-password" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Resets the administrator password" class ServerAdd(Subcommand): """The server add command""" def __init__(self): super(ServerAdd, self).__init__() self.parser.prog = "couchbase-cli server-add" group = self.parser.add_argument_group("Server add options") group.add_argument("--server-add", dest="servers", metavar="<server_list>", required=True, help="The list of servers to add") group.add_argument("--server-add-username", dest="server_username", metavar="<username>", required=True, help="The username for the server to add") group.add_argument("--server-add-password", dest="server_password", metavar="<password>", required=True, help="The password for the server to add") group.add_argument("--group-name", dest="group_name", metavar="<name>", help="The server group to add this server into") group.add_argument("--services", dest="services", default="data", metavar="<services>", help="The services this server will run") group.add_argument("--index-storage-setting", dest="index_storage_mode", metavar="<mode>", choices=["default", "memopt"], help="The index storage mode") @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if not self.enterprise and opts.index_storage_mode == 'memopt': _exit_if_errors(["memopt option for --index-storage-setting can only be configured on enterprise edition"]) opts.services, errors = process_services(opts.services, self.enterprise) _exit_if_errors(errors) settings, errors = self.rest.index_settings() _exit_if_errors(errors) if opts.index_storage_mode is None and settings['storageMode'] == "" and "index" in opts.services: opts.index_storage_mode = "default" # For supporting the default index backend changing from forestdb to plasma in Couchbase 5.0 default = "plasma" if opts.index_storage_mode == "default" and settings['storageMode'] == "forestdb" or not self.enterprise: default = "forestdb" if opts.index_storage_mode: param = index_storage_mode_to_param(opts.index_storage_mode, default) _, errors = self.rest.set_index_settings(param, None, None, None, None, None, None, None) _exit_if_errors(errors) servers = opts.servers.split(',') for server in servers: _, errors = self.rest.add_server(server, opts.group_name, opts.server_username, opts.server_password, opts.services) _exit_if_errors(errors) _success("Server added") @staticmethod def get_man_page_name(): return "couchbase-cli-server-add" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Add servers to the cluster" class ServerEshell(Subcommand): """The server eshell subcommand""" def __init__(self): super(ServerEshell, self).__init__() self.parser.prog = "couchbase-cli server-eshell" group = self.parser.add_argument_group("Server eshell options") group.add_argument("--vm", dest="vm", default="ns_server", metavar="<name>", help="The vm to connect to") group.add_argument("--erl-path", dest="erl_path", metavar="<path>", default=CB_BIN_PATH, help="Override the path to the erl executable") @rest_initialiser(version_check=True) def execute(self, opts): # Cluster does not need to be initialized for this command result, errors = self.rest.node_info() _exit_if_errors(errors) node = result['otpNode'] cookie = result['otpCookie'] if opts.vm != 'ns_server': cookie, errors = self.rest.get_babysitter_cookie() _exit_if_errors(errors) [short, _] = node.split('@') if opts.vm == 'babysitter': node = f'babysitter_of_{short}@cb.local' elif opts.vm == 'couchdb': node = f'couchdb_{short}@cb.local' else: _exit_if_errors([f'Unknown vm type `{opts.vm}`']) rand_chars = ''.join(random.choice(string.ascii_letters) for _ in range(20)) name = f'ctl-{rand_chars}@127.0.0.1' cb_erl = os.path.join(opts.erl_path, 'erl') if os.path.isfile(cb_erl): path = cb_erl else: _warning("Cannot locate Couchbase erlang. Attempting to use non-Couchbase erlang") path = 'erl' inetrc_file = os.path.join(CB_ETC_PATH, 'hosts.cfg') if os.path.isfile(inetrc_file): inetrc_opt = ['-kernel', 'inetrc', f'"{inetrc_file}"'] else: inetrc_opt = [] ns_server_ebin_path = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_server", "ebin") with tempfile.NamedTemporaryFile() as temp: temp.write(f'[{{preferred_local_proto,{result["addressFamily"]}_tcp_dist}}].'.encode()) temp.flush() temp_name = temp.name args = [path, '-name', name, '-setcookie', cookie, '-hidden', '-remsh', node, '-proto_dist', 'cb', '-epmd_module', 'cb_epmd', '-pa', ns_server_ebin_path, '-kernel', 'dist_config_file', f'"{temp_name}"'] + inetrc_opt if opts.debug: print(f'Running {" ".join(args)}') try: subprocess.call(args) except OSError: _exit_if_errors(["Unable to find the erl executable"]) @staticmethod def get_man_page_name(): return "couchbase-cli-server-eshell" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Opens a shell to the Couchbase cluster manager" @staticmethod def is_hidden(): # Internal command not recommended for production use return True class ServerInfo(Subcommand): """The server info subcommand""" def __init__(self): super(ServerInfo, self).__init__() self.parser.prog = "couchbase-cli server-info" @rest_initialiser(version_check=True) def execute(self, opts): # Cluster does not need to be initialized for this command result, errors = self.rest.node_info() _exit_if_errors(errors) print(json.dumps(result, sort_keys=True, indent=2)) @staticmethod def get_man_page_name(): return "couchbase-cli-server-info" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Show details of a node in the cluster" class ServerList(Subcommand): """The server list subcommand""" def __init__(self): super(ServerList, self).__init__() self.parser.prog = "couchbase-cli server-list" @rest_initialiser(version_check=True) def execute(self, opts): result, errors = self.rest.pools('default') _exit_if_errors(errors) for node in result['nodes']: if node.get('otpNode') is None: raise Exception("could not access node") print(node['otpNode'], node['hostname'], node['status'], node['clusterMembership']) @staticmethod def get_man_page_name(): return "couchbase-cli-server-list" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "List all nodes in a cluster" class ServerReadd(Subcommand): """The server readd subcommand (Deprecated)""" def __init__(self): super(ServerReadd, self).__init__() self.parser.prog = "couchbase-cli server-readd" group = self.parser.add_argument_group("Server re-add options") group.add_argument("--server-add", dest="servers", metavar="<server_list>", required=True, help="The list of servers to recover") # The parameters are unused, but kept for backwards compatibility group.add_argument("--server-username", dest="server_username", metavar="<username>", help="The admin username for the server") group.add_argument("--server-password", dest="server_password", metavar="<password>", help="The admin password for the server") group.add_argument("--group-name", dest="name", metavar="<name>", help="The name of the server group") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): _deprecated("Please use the recovery command instead") servers = apply_default_port(opts.servers) for server in servers: _, errors = self.rest.readd_server(server) _exit_if_errors(errors) _success("Servers recovered") @staticmethod def get_man_page_name(): return "couchbase-cli-server-readd" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Add failed server back to the cluster" @staticmethod def is_hidden(): # Deprecated command in 4.6, hidden in 5.0, pending removal return True class SettingAlert(Subcommand): """The setting alert subcommand""" def __init__(self): super(SettingAlert, self).__init__() self.parser.prog = "couchbase-cli setting-alert" group = self.parser.add_argument_group("Alert settings") group.add_argument("--enable-email-alert", dest="enabled", metavar="<1|0>", required=True, choices=["0", "1"], help="Enable/disable email alerts") group.add_argument("--email-recipients", dest="email_recipients", metavar="<email_list>", help="A comma separated list of email addresses") group.add_argument("--email-sender", dest="email_sender", metavar="<email_addr>", help="The sender email address") group.add_argument("--email-user", dest="email_username", metavar="<username>", default="", help="The email server username") group.add_argument("--email-password", dest="email_password", metavar="<password>", default="", help="The email server password") group.add_argument("--email-host", dest="email_host", metavar="<host>", help="The email server host") group.add_argument("--email-port", dest="email_port", metavar="<port>", help="The email server port") group.add_argument("--enable-email-encrypt", dest="email_encrypt", metavar="<1|0>", choices=["0", "1"], help="Enable SSL encryption for emails") group.add_argument("--alert-auto-failover-node", dest="alert_af_node", action="store_true", help="Alert when a node is auto-failed over") group.add_argument("--alert-auto-failover-max-reached", dest="alert_af_max_reached", action="store_true", help="Alert when the max number of auto-failover nodes was reached") group.add_argument("--alert-auto-failover-node-down", dest="alert_af_node_down", action="store_true", help="Alert when a node wasn't auto-failed over because other nodes " + "were down") group.add_argument("--alert-auto-failover-cluster-small", dest="alert_af_small", action="store_true", help="Alert when a node wasn't auto-failed over because cluster was" + " too small") group.add_argument("--alert-auto-failover-disable", dest="alert_af_disable", action="store_true", help="Alert when a node wasn't auto-failed over because auto-failover" + " is disabled") group.add_argument("--alert-ip-changed", dest="alert_ip_changed", action="store_true", help="Alert when a nodes IP address changed") group.add_argument("--alert-disk-space", dest="alert_disk_space", action="store_true", help="Alert when disk usage on a node reaches 90%%") group.add_argument("--alert-meta-overhead", dest="alert_meta_overhead", action="store_true", help="Alert when metadata overhead is more than 50%%") group.add_argument("--alert-meta-oom", dest="alert_meta_oom", action="store_true", help="Alert when all bucket memory is used for metadata") group.add_argument("--alert-write-failed", dest="alert_write_failed", action="store_true", help="Alert when writing data to disk has failed") group.add_argument("--alert-audit-msg-dropped", dest="alert_audit_dropped", action="store_true", help="Alert when writing event to audit log failed") group.add_argument("--alert-indexer-max-ram", dest="alert_indexer_max_ram", action="store_true", help="Alert when indexer is using all of its allocated memory") group.add_argument("--alert-timestamp-drift-exceeded", dest="alert_cas_drift", action="store_true", help="Alert when clocks on two servers are more than five seconds" + "apart") group.add_argument("--alert-communication-issue", dest="alert_communication_issue", action="store_true", help="Alert when nodes are experiencing communication issues") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.enabled == "1": if opts.email_recipients is None: _exit_if_errors(["--email-recipient must be set when email alerts are enabled"]) if opts.email_sender is None: _exit_if_errors(["--email-sender must be set when email alerts are enabled"]) if opts.email_host is None: _exit_if_errors(["--email-host must be set when email alerts are enabled"]) if opts.email_port is None: _exit_if_errors(["--email-port must be set when email alerts are enabled"]) alerts = list() if opts.alert_af_node: alerts.append('auto_failover_node') if opts.alert_af_max_reached: alerts.append('auto_failover_maximum_reached') if opts.alert_af_node_down: alerts.append('auto_failover_other_nodes_down') if opts.alert_af_small: alerts.append('auto_failover_cluster_too_small') if opts.alert_af_disable: alerts.append('auto_failover_disabled') if opts.alert_ip_changed: alerts.append('ip') if opts.alert_disk_space: alerts.append('disk') if opts.alert_meta_overhead: alerts.append('overhead') if opts.alert_meta_oom: alerts.append('ep_oom_errors') if opts.alert_write_failed: alerts.append('ep_item_commit_failed') if opts.alert_audit_dropped: alerts.append('audit_dropped_events') if opts.alert_indexer_max_ram: alerts.append('indexer_ram_max_usage') if opts.alert_cas_drift: alerts.append('ep_clock_cas_drift_threshold_exceeded') if opts.alert_communication_issue: alerts.append('communication_issue') enabled = "true" if opts.enabled == "0": enabled = "false" email_encrypt = "false" if opts.email_encrypt == "1": email_encrypt = "true" _, errors = self.rest.set_alert_settings(enabled, opts.email_recipients, opts.email_sender, opts.email_username, opts.email_password, opts.email_host, opts.email_port, email_encrypt, ",".join(alerts)) _exit_if_errors(errors) _success("Email alert settings modified") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-alert" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify email alert settings" class SettingAudit(Subcommand): """The settings audit subcommand""" def __init__(self): super(SettingAudit, self).__init__() self.parser.prog = "couchbase-cli setting-audit" self.parser.description = "Available only in Couchbase Server Enterprise Edition" group = self.parser.add_argument_group("Audit settings") group.add_argument("--list-filterable-events", dest="list_events", action="store_true", help="Retrieve a list of filterable event IDs and the descriptions") group.add_argument("--get-settings", dest="get_settings", action="store_true", help="Retrieve current audit settings") group.add_argument("--set", dest="set_settings", action="store_true", help="Set current audit settings") group.add_argument("--audit-enabled", dest="enabled", metavar="<1|0>", choices=["0", "1"], help="Enable/disable auditing") group.add_argument("--audit-log-path", dest="log_path", metavar="<path>", help="The audit log path") group.add_argument("--audit-log-rotate-interval", dest="rotate_interval", type=(int), metavar="<seconds>", help="The audit log rotate interval") group.add_argument("--audit-log-rotate-size", dest="rotate_size", type=(int), metavar="<bytes>", help="The audit log rotate size") group.add_argument("--disabled-users", dest="disabled_users", default=None, help="A comma-separated list of users to ignore events from") group.add_argument("--disable-events", dest="disable_events", default=None, help="A comma-separated list of audit-event IDs to not audit") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): flags = sum([opts.list_events, opts.get_settings, opts.set_settings]) if flags != 1: _exit_if_errors(["One of the following is required: --list-filterable-events, --get-settings or --set"]) if opts.list_events: descriptors, errors = self.rest.get_id_descriptors() _exit_if_errors(errors) if opts.output == 'json': print(json.dumps(descriptors, indent=4)) return self.format_descriptors_in_table(descriptors) elif opts.get_settings: audit_settings, errors = self.rest.get_audit_settings() _exit_if_errors(errors) if opts.output == 'json': print(json.dumps(audit_settings, indent=4)) return descriptors, errors = self.rest.get_id_descriptors() _exit_if_errors(errors) self.format_audit_settings(audit_settings, descriptors) elif opts.set_settings: if not (opts.enabled or opts.log_path or opts.rotate_interval or opts.rotate_size or opts.disable_events is not None or opts.disabled_users is not None): _exit_if_errors(["At least one of [--audit-enabled, --audit-log-path, --audit-log-rotate-interval," " --audit-log-rotate-size, --disabled-users, --disable-events] is required with" " --set"]) if opts.enabled == "1": opts.enabled = "true" elif opts.enabled == "0": opts.enabled = "false" _, errors = self.rest.set_audit_settings(opts.enabled, opts.log_path, opts.rotate_interval, opts.rotate_size, opts.disable_events, opts.disabled_users) _exit_if_errors(errors) _success("Audit settings modified") @staticmethod def format_audit_settings(audit_settings, json_descriptors): print(f'Audit enabled: {audit_settings["auditdEnabled"]}') print(f'UUID: {audit_settings["uid"]}') print(f'Log path: {audit_settings["logPath"] if "logPath" in audit_settings else "N/A"}') print(f'Rotate interval: {audit_settings["rotateInterval"]}') print(f'Rotate size: {audit_settings["rotateSize"]}') print(f'Disabled users: {audit_settings["disabledUsers"]}') if not audit_settings["auditdEnabled"]: return # change id lists to maps to make lookup o(1) disable_map = {eventID for eventID in audit_settings['disabled']} json_descriptors.sort(key=itemgetter('module', 'id')) all_descriptors_sets = {events["id"] for events in json_descriptors} padding_name = 12 for descriptor in json_descriptors: if len(descriptor['name']) > padding_name: padding_name = len(descriptor['name']) padding_name += 2 header = f'{"ID":<6}| {"Module":<15}| {"Name":<{padding_name}}| Enabled' print(header) print('-' * len(header)) for descriptor in json_descriptors: print(f'{descriptor["id"]:<6}| {descriptor["module"]:<15}| {descriptor["name"]:<{padding_name}}| ' f'{"False" if descriptor["id"] in disable_map else "True"}') not_recognized = disable_map - all_descriptors_sets for unrecognized in not_recognized: print(f'{unrecognized:<6}| {"unknown":<15}| {"unknown":<{padding_name}}| False') @staticmethod def format_descriptors_in_table(json_descriptors): sorted_descriptors = sorted(json_descriptors, key=itemgetter('module', 'id')) padding_name = 15 for descriptor in sorted_descriptors: if len(descriptor['name']) > padding_name: padding_name = len(descriptor['name']) padding_name += 2 header = f'{"ID":<6}| {"Module":<15}| {"Name":<{padding_name}}| Description' print(header) print('-' * len(header)) for descriptor in sorted_descriptors: print(f'{descriptor["id"]:<6}| {descriptor["module"]:<15}| {descriptor["name"]:<{padding_name}}| ' f'{descriptor["description"]}') @staticmethod def get_man_page_name(): return "couchbase-cli-setting-audit" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify audit settings" class SettingAutofailover(Subcommand): """The settings auto-failover subcommand""" def __init__(self): super(SettingAutofailover, self).__init__() self.parser.prog = "couchbase-cli setting-autofailover" group = self.parser.add_argument_group("Auto-failover settings") group.add_argument("--enable-auto-failover", dest="enabled", metavar="<1|0>", choices=["0", "1"], help="Enable/disable auto-failover") group.add_argument("--auto-failover-timeout", dest="timeout", metavar="<seconds>", type=(int), help="The auto-failover timeout") group.add_argument("--enable-failover-of-server-groups", dest="enable_failover_of_server_groups", metavar="<1|0>", choices=["0", "1"], help="Enable/disable auto-failover of server Groups") group.add_argument("--max-failovers", dest="max_failovers", metavar="<1|2|3>", choices=["1", "2", "3"], help="Maximum number of times an auto-failover event can happen") group.add_argument("--enable-failover-on-data-disk-issues", dest="enable_failover_on_data_disk_issues", metavar="<1|0>", choices=["0", "1"], help="Enable/disable auto-failover when the Data Service reports disk issues. " + "Couchbase Server Enterprise Edition only.") group.add_argument("--failover-data-disk-period", dest="failover_on_data_disk_period", metavar="<seconds>", type=(int), help="The amount of time the Data Serivce disk failures has to be happening for to trigger" " an auto-failover") group.add_argument("--can-abort-rebalance", metavar="<1|0>", choices=["1", "0"], dest="can_abort_rebalance", help="Enables auto-failover to abort rebalance and perform the failover. (EE only)") @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if opts.enabled == "1": opts.enabled = "true" elif opts.enabled == "0": opts.enabled = "false" if opts.enable_failover_on_data_disk_issues == "1": opts.enable_failover_on_data_disk_issues = "true" elif opts.enable_failover_on_data_disk_issues == "0": opts.enable_failover_on_data_disk_issues = "false" if opts.enable_failover_of_server_groups == "1": opts.enable_failover_of_server_groups = "true" elif opts.enable_failover_of_server_groups == "0": opts.enable_failover_of_server_groups = "false" if not self.enterprise: if opts.enable_failover_of_server_groups: _exit_if_errors(["--enable-failover-of-server-groups can only be configured on enterprise edition"]) if opts.enable_failover_on_data_disk_issues or opts.failover_on_data_disk_period: _exit_if_errors(["Auto failover on Data Service disk issues can only be configured on enterprise" + " edition"]) if opts.max_failovers: _exit_if_errors(["--max-count can only be configured on enterprise edition"]) if opts.can_abort_rebalance: _exit_if_errors(["--can-abort-rebalance can only be configured on enterprise edition"]) if not any([opts.enabled, opts.timeout, opts.enable_failover_on_data_disk_issues, opts.failover_on_data_disk_period, opts.enable_failover_of_server_groups, opts.max_failovers]): _exit_if_errors(["No settings specified to be changed"]) if ((opts.enable_failover_on_data_disk_issues is None or opts.enable_failover_on_data_disk_issues == "false") and opts.failover_on_data_disk_period): _exit_if_errors(["--enable-failover-on-data-disk-issues must be set to 1 when auto-failover Data" " Service disk period has been set"]) if opts.enable_failover_on_data_disk_issues and opts.failover_on_data_disk_period is None: _exit_if_errors(["--failover-data-disk-period must be set when auto-failover on Data Service disk" " is enabled"]) if opts.enabled == "false" or opts.enabled is None: if opts.enable_failover_on_data_disk_issues or opts.failover_on_data_disk_period: _exit_if_errors(["--enable-auto-failover must be set to 1 when auto-failover on Data Service disk issues" " settings are being configured"]) if opts.enable_failover_of_server_groups: _exit_if_errors(["--enable-auto-failover must be set to 1 when enabling auto-failover of Server Groups"]) if opts.timeout: _warning("Timeout specified will not take affect because auto-failover is being disabled") if opts.can_abort_rebalance == '1': opts.can_abort_rebalance = 'true' elif opts.can_abort_rebalance == '0': opts.can_abort_rebalance = 'false' _, errors = self.rest.set_autofailover_settings(opts.enabled, opts.timeout, opts.enable_failover_of_server_groups, opts.max_failovers, opts.enable_failover_on_data_disk_issues, opts.failover_on_data_disk_period, opts.can_abort_rebalance) _exit_if_errors(errors) _success("Auto-failover settings modified") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-autofailover" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify auto failover settings" class SettingAutoreprovision(Subcommand): """The settings auto-reprovision subcommand""" def __init__(self): super(SettingAutoreprovision, self).__init__() self.parser.prog = "couchbase-cli setting-autoreprovision" group = self.parser.add_argument_group("Auto-reprovision settings") group.add_argument("--enabled", dest="enabled", metavar="<1|0>", required=True, choices=["0", "1"], help="Enable/disable auto-reprovision") group.add_argument("--max-nodes", dest="max_nodes", metavar="<num>", type=(int), help="The numbers of server that can be auto-reprovisioned before a rebalance") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.enabled == "1": opts.enabled = "true" elif opts.enabled == "0": opts.enabled = "false" if opts.enabled == "true" and opts.max_nodes is None: _exit_if_errors(["--max-nodes must be specified if auto-reprovision is enabled"]) if not (opts.enabled or opts.max_nodes): _exit_if_errors(["No settings specified to be changed"]) if (opts.enabled is None or opts.enabled == "false") and opts.max_nodes: _warning("--max-servers will not take affect because auto-reprovision is being disabled") _, errors = self.rest.set_autoreprovision_settings(opts.enabled, opts.max_nodes) _exit_if_errors(errors) _success("Auto-reprovision settings modified") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-autoreprovision" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify auto-reprovision settings" class SettingCluster(Subcommand): """The settings cluster subcommand""" def __init__(self): super(SettingCluster, self).__init__() self.parser.prog = "couchbase-cli setting-cluster" group = self.parser.add_argument_group("Cluster settings") group.add_argument("--cluster-username", dest="new_username", metavar="<username>", help="The cluster administrator username") group.add_argument("--cluster-password", dest="new_password", metavar="<password>", help="Only compact the data files") group.add_argument("--cluster-port", dest="port", type=(int), metavar="<port>", help="The cluster administration console port") group.add_argument("--cluster-ramsize", dest="data_mem_quota", metavar="<quota>", type=(int), help="The data service memory quota in mebibytes") group.add_argument("--cluster-index-ramsize", dest="index_mem_quota", metavar="<quota>", type=(int), help="The index service memory quota in mebibytes") group.add_argument("--cluster-fts-ramsize", dest="fts_mem_quota", metavar="<quota>", type=(int), help="The full-text service memory quota in mebibytes") group.add_argument("--cluster-eventing-ramsize", dest="eventing_mem_quota", metavar="<quota>", type=(int), help="The Eventing service memory quota in mebibytes") group.add_argument("--cluster-analytics-ramsize", dest="cbas_mem_quota", metavar="<quota>", type=(int), help="The analytics service memory quota in mebibytes") group.add_argument("--cluster-name", dest="name", metavar="<name>", help="The cluster name") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if (opts.data_mem_quota or opts.index_mem_quota or opts.fts_mem_quota or opts.cbas_mem_quota or opts.eventing_mem_quota or opts.name): _, errors = self.rest.set_pools_default(opts.data_mem_quota, opts.index_mem_quota, opts.fts_mem_quota, opts.cbas_mem_quota, opts.eventing_mem_quota, opts.name) _exit_if_errors(errors) if opts.new_username or opts.new_password or opts.port: username = opts.username if opts.new_username: username = opts.new_username password = opts.password if opts.new_password: password = opts.new_password _, errors = self.rest.set_admin_credentials(username, password, opts.port) _exit_if_errors(errors) _success("Cluster settings modified") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-cluster" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify cluster settings" class ClusterEdit(SettingCluster): """The cluster edit subcommand (Deprecated)""" def __init__(self): super(ClusterEdit, self).__init__() self.parser.prog = "couchbase-cli cluster-edit" def execute(self, opts): _deprecated("Please use the setting-cluster command instead") super(ClusterEdit, self).execute(opts) @staticmethod def get_man_page_name(): return "couchbase-cli-cluster-edit" + ".1" if os.name != "nt" else ".html" @staticmethod def is_hidden(): # Deprecated command in 4.6, hidden in 5.0, pending removal return True class SettingCompaction(Subcommand): """The setting compaction subcommand""" def __init__(self): super(SettingCompaction, self).__init__() self.parser.prog = "couchbase-cli setting-compaction" group = self.parser.add_argument_group("Compaction settings") group.add_argument("--compaction-db-percentage", dest="db_perc", metavar="<perc>", type=(int), help="Compacts the db once the fragmentation reaches this percentage") group.add_argument("--compaction-db-size", dest="db_size", metavar="<mebibytes>", type=(int), help="Compacts db once the fragmentation reaches this size (MiB)") group.add_argument("--compaction-view-percentage", dest="view_perc", metavar="<perc>", type=(int), help="Compacts the view once the fragmentation reaches this percentage") group.add_argument("--compaction-view-size", dest="view_size", metavar="<mebibytes>", type=(int), help="Compacts view once the fragmentation reaches this size (MiB)") group.add_argument("--compaction-period-from", dest="from_period", metavar="<HH:MM>", help="Only run compaction after this time") group.add_argument("--compaction-period-to", dest="to_period", metavar="<HH:MM>", help="Only run compaction before this time") group.add_argument("--enable-compaction-abort", dest="enable_abort", metavar="<1|0>", choices=["0", "1"], help="Allow compactions to be aborted") group.add_argument("--enable-compaction-parallel", dest="enable_parallel", metavar="<1|0>", choices=["0", "1"], help="Allow parallel compactions") group.add_argument("--metadata-purge-interval", dest="purge_interval", metavar="<float>", type=(float), help="The metadata purge interval") group.add_argument("--gsi-compaction-mode", dest="gsi_mode", choices=["append", "circular"], help="Sets the gsi compaction mode (append or circular)") group.add_argument("--compaction-gsi-percentage", dest="gsi_perc", type=(int), metavar="<perc>", help="Starts compaction once gsi file fragmentation has reached this percentage" + "(Append mode only)") group.add_argument("--compaction-gsi-interval", dest="gsi_interval", metavar="<days>", help="A comma separated list of days compaction can run (Circular mode only)") group.add_argument("--compaction-gsi-period-from", dest="gsi_from_period", metavar="<HH:MM>", help="Allow gsi compaction to run after this time (Circular mode only)") group.add_argument("--compaction-gsi-period-to", dest="gsi_to_period", metavar="<HH:MM>", help="Allow gsi compaction to run before this time (Circular mode only)") group.add_argument("--enable-gsi-compaction-abort", dest="enable_gsi_abort", metavar="<1|0>", choices=["0", "1"], help="Abort gsi compaction if when run outside of the accepted interaval" + "(Circular mode only)") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.db_perc is not None and (opts.db_perc < 2 or opts.db_perc > 100): _exit_if_errors(["--compaction-db-percentage must be between 2 and 100"]) if opts.view_perc is not None and (opts.view_perc < 2 or opts.view_perc > 100): _exit_if_errors(["--compaction-view-percentage must be between 2 and 100"]) if opts.db_size is not None: if int(opts.db_size) < 1: _exit_if_errors(["--compaction-db-size must be between greater than 1 or infinity"]) opts.db_size = int(opts.db_size) * 1024**2 if opts.view_size is not None: if int(opts.view_size) < 1: _exit_if_errors(["--compaction-view-size must be between greater than 1 or infinity"]) opts.view_size = int(opts.view_size) * 1024**2 if opts.from_period and not (opts.to_period and opts.enable_abort): errors = [] if opts.to_period is None: errors.append("--compaction-period-to is required when using --compaction-period-from") if opts.enable_abort is None: errors.append("--enable-compaction-abort is required when using --compaction-period-from") _exit_if_errors(errors) if opts.to_period and not (opts.from_period and opts.enable_abort): errors = [] if opts.from_period is None: errors.append("--compaction-period-from is required when using --compaction-period-to") if opts.enable_abort is None: errors.append("--enable-compaction-abort is required when using --compaction-period-to") _exit_if_errors(errors) if opts.enable_abort and not (opts.from_period and opts.to_period): errors = [] if opts.from_period is None: errors.append("--compaction-period-from is required when using --enable-compaction-abort") if opts.to_period is None: errors.append("--compaction-period-to is required when using --enable-compaction-abort") _exit_if_errors(errors) from_hour, from_min = self._handle_timevalue(opts.from_period, "--compaction-period-from") to_hour, to_min = self._handle_timevalue(opts.to_period, "--compaction-period-to") if opts.enable_abort == "1": opts.enable_abort = "true" elif opts.enable_abort == "0": opts.enable_abort = "false" if opts.enable_parallel == "1": opts.enable_parallel = "true" else: opts.enable_parallel = "false" if opts.purge_interval is not None and (opts.purge_interval < 0.04 or opts.purge_interval > 60.0): _exit_if_errors(["--metadata-purge-interval must be between 0.04 and 60.0"]) g_from_hour = None g_from_min = None g_to_hour = None g_to_min = None if opts.gsi_mode == "append": opts.gsi_mode = "full" if opts.gsi_perc is None: _exit_if_errors(['--compaction-gsi-percentage must be specified when --gsi-compaction-mode is set ' 'to append']) elif opts.gsi_mode == "circular": if opts.gsi_from_period is not None and opts.gsi_to_period is None: _exit_if_errors(["--compaction-gsi-period-to is required with --compaction-gsi-period-from"]) if opts.gsi_to_period is not None and opts.gsi_from_period is None: _exit_if_errors(["--compaction-gsi-period-from is required with --compaction-gsi-period-to"]) g_from_hour, g_from_min = self._handle_timevalue(opts.gsi_from_period, "--compaction-gsi-period-from") g_to_hour, g_to_min = self._handle_timevalue(opts.gsi_to_period, "--compaction-gsi-period-to") if opts.enable_gsi_abort == "1": opts.enable_gsi_abort = "true" else: opts.enable_gsi_abort = "false" _, errors = self.rest.set_compaction_settings(opts.db_perc, opts.db_size, opts.view_perc, opts.view_size, from_hour, from_min, to_hour, to_min, opts.enable_abort, opts.enable_parallel, opts.purge_interval, opts.gsi_mode, opts.gsi_perc, opts.gsi_interval, g_from_hour, g_from_min, g_to_hour, g_to_min, opts.enable_gsi_abort) _exit_if_errors(errors) _success("Compaction settings modified") def _handle_timevalue(self, opt_value, opt_name): hour = None minute = None if opt_value: if opt_value.find(':') == -1: _exit_if_errors([f'Invalid value for {opt_name}, must be in form XX:XX']) hour, minute = opt_value.split(':', 1) try: hour = int(hour) except ValueError: _exit_if_errors([f'Invalid hour value for {opt_name}, must be an integer']) if hour not in range(24): _exit_if_errors([f'Invalid hour value for {opt_name}, must be 0-23']) try: minute = int(minute) except ValueError: _exit_if_errors([f'Invalid minute value for {opt_name}, must be an integer']) if minute not in range(60): _exit_if_errors([f'Invalid minute value for {opt_name}, must be 0-59']) return hour, minute @staticmethod def get_man_page_name(): return "couchbase-cli-setting-compaction" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify auto-compaction settings" class SettingIndex(Subcommand): """The setting index subcommand""" def __init__(self): super(SettingIndex, self).__init__() self.parser.prog = "couchbase-cli setting-index" group = self.parser.add_argument_group("Index settings") group.add_argument("--index-max-rollback-points", dest="max_rollback", metavar="<num>", type=(int), help="Max rollback points") group.add_argument("--index-stable-snapshot-interval", dest="stable_snap", type=(int), metavar="<seconds>", help="Stable snapshot interval in seconds") group.add_argument("--index-memory-snapshot-interval", dest="mem_snap", metavar="<ms>", type=(int), help="Stable snapshot interval in milliseconds") group.add_argument("--index-storage-setting", dest="storage_mode", metavar="<mode>", choices=["default", "memopt"], help="The index storage backend") group.add_argument("--index-threads", dest="threads", metavar="<num>", type=(int), help="The number of indexer threads") group.add_argument("--index-log-level", dest="log_level", metavar="<level>", choices=["debug", "silent", "fatal", "error", "warn", "info", "verbose", "timing", "trace"], help="The indexer log level") group.add_argument('--replicas', metavar='<num>', type=int, help='Number of index replicas') group.add_argument('--optimize-placement', metavar='<1|0>', type=str, help='Optimize index placement on a rebalance.') @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if (opts.max_rollback is None and opts.stable_snap is None and opts.mem_snap is None and opts.storage_mode is None and opts.threads is None and opts.log_level is None and opts.replicas is None and opts.optimize_placement is None): _exit_if_errors(["No settings specified to be changed"]) settings, errors = self.rest.index_settings() _exit_if_errors(errors) # For supporting the default index backend changing from forestdb to plasma in Couchbase 5.0 default = "plasma" if opts.storage_mode == "default" and settings['storageMode'] == "forestdb" or not self.enterprise: default = "forestdb" opts.storage_mode = index_storage_mode_to_param(opts.storage_mode, default) _, errors = self.rest.set_index_settings(opts.storage_mode, opts.max_rollback, opts.stable_snap, opts.mem_snap, opts.threads, opts.log_level, opts.replicas, opts.optimize_placement) _exit_if_errors(errors) _success("Indexer settings modified") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-index" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify index settings" class SettingSaslauthd(Subcommand): """The setting sasl subcommand""" def __init__(self): super(SettingSaslauthd, self).__init__() self.parser.prog = "couchbase-cli setting-saslauthd" group = self.parser.add_argument_group("saslauthd settings") group.add_argument("--enabled", dest="enabled", metavar="<1|0>", required=True, choices=["0", "1"], help="Enable/disable saslauthd") group.add_argument("--admins", dest="admins", metavar="<user_list>", help="A comma separated list of full admins") group.add_argument("--roadmins", dest="roadmins", metavar="<user_list>", help="A comma separated list of read only admins") group.add_argument("--default", dest="default", default="none", choices=["admins", "roadmins", "none"], metavar="<default>", help="Default roles for saslauthd users") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): admins = "" if opts.admins: admins = opts.admins.replace(",", "\n") ro_admins = "" if opts.roadmins: ro_admins = opts.roadmins.replace(",", "\n") errors = None if opts.enabled == '1': if opts.default == 'admins': if ro_admins: _warning("--ro-admins option ignored since default is read only admins") _, errors = self.rest.sasl_settings('true', ro_admins, None) elif opts.default == 'roadmins': if admins: _warning("--admins option ignored since default is admins") _, errors = self.rest.sasl_settings('true', None, admins) else: _, errors = self.rest.sasl_settings('true', ro_admins, admins) else: if admins: _warning("--admins option ignored since saslauthd is being disabled") if ro_admins: _warning("--roadmins option ignored since saslauthd is being disabled") _, errors = self.rest.sasl_settings('false', "", "") _exit_if_errors(errors) _success("saslauthd settings modified") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-saslauthd" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify saslauthd settings" class SettingLdap(Subcommand): """The setting Ldap subcommand""" def __init__(self): super(SettingLdap, self).__init__() self.parser.prog = "couchbase-cli setting-ldap" group = self.parser.add_argument_group("LDAP settings") group.add_argument("--get", dest="get", default=False, action="store_true", help='When the get flag is provided it will retrieve the current ldap settings') group.add_argument("--authentication-enabled", dest="authentication_enabled", metavar="<1|0>", choices=["1", "0"], help="Enable LDAP authentication, otherwise it defaults to disable") group.add_argument("--authorization-enabled", dest="authorization_enabled", metavar="<1|0>", choices=["1", "0"], help="Enable LDAP authorization, otherwise defaults to false") group.add_argument("--hosts", dest="hosts", metavar="<host_list>", help="Coma separated list of LDAP servers") group.add_argument("--port", dest="port", metavar="<port>", help="LDAP port", type=int) group.add_argument("--encryption", dest="encryption", metavar="<tls|startTLS|none>", choices=["tls", "startTLS", "none"], help="Encryption used") group.add_argument("--server-cert-validation", dest="server_cert_val", metavar="<1|0>", choices=["0", "1"], help="Enable or disable certificate validation when connecting to LDAP server") group.add_argument("--ldap-cacert", dest="cacert_ldap", metavar="<path>", help="CA certificate to be used for LDAP server certificate validation, required if" + " certificate validation is not disabled") group.add_argument("--user-dn-query", metavar="<query>", dest="user_dn_query", help="LDAP query to get user's DN. Must contains at least one instance of %%u") group.add_argument("--user-dn-template", metavar="<template>", dest="user_dn_template", help="Template to construct user's DN. Must contain at least one instance of %%u") group.add_argument("--client-cert", metavar="<path>", dest="client_cert", help="The client TLS certificate for authentication") group.add_argument("--client-key", metavar="<path>", dest="client_key", help="The client TLS key for authentication") group.add_argument("--request-timeout", metavar="<ms>", dest="timeout", help="Request time out in milliseconds") group.add_argument("--max-parallel", dest="max_parallel", metavar="<max>", type=int, help="Maximum number of parallel connections that can be established") group.add_argument("--max-cache-size", dest="max_cache_size", metavar="<size>", help="Maximum number of cached LDAP requests") group.add_argument("--cache-value-lifetime", dest="cache_value_lifetime", metavar="<ms>", help="Cache value lifetime in milliseconds") group.add_argument("--bind-dn", dest="bind_dn", metavar="<DN>", help="The DN of a user to bind as to performance lookups") group.add_argument("--bind-password", dest="bind_password", metavar="<password>", help="The password of the bind user") group.add_argument("--group-query", dest="group_query", metavar="<query>", help="LDAP query to get user's groups by username") group.add_argument("--enable-nested-groups", dest="nested_groups", metavar="<1|0>", choices=["0", "1"]) group.add_argument("--nested-group-max-depth", dest="nested_max_depth", metavar="<max>", type=int, help="Maximum number of recursive group requests allowed. [1 - 100]") @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=True) def execute(self, opts): if opts.get: data, rv = self.rest.get_ldap() _exit_if_errors(rv) print(json.dumps(data)) else: self._set(opts) def _set(self, opts): if opts.authentication_enabled == '1': opts.authentication_enabled = 'true' elif opts.authentication_enabled == '0': opts.authentication_enabled = 'false' if opts.authorization_enabled == '1': opts.authorization_enabled = 'true' elif opts.authorization_enabled == '0': opts.authorization_enabled = 'false' if opts.server_cert_val == '1': opts.server_cert_val = 'true' elif opts.server_cert_val == '0': opts.server_cert_val = 'false' if opts.server_cert_val == 'false' and opts.cacert_ldap is not None: _exit_if_errors(['--server-cert-validation 0 and --ldap-cert can not be used together']) if opts.cacert_ldap is not None: opts.cacert_ldap = _exit_on_file_read_failure(opts.cacert_ldap) if opts.encryption == "tls": opts.encryption = "TLS" elif opts.encryption == "startTLS": opts.encryption = "StartTLSExtension" elif opts.encryption == "none": opts.encryption = "None" if opts.nested_groups == '1': opts.nested_groups = 'true' elif opts.nested_groups == '0': opts.nested_groups = 'false' if opts.user_dn_query is not None and opts.user_dn_template is not None: _exit_if_errors(['--user-dn-query and --user-dn-template can not be used together']) mapping = None if opts.user_dn_query is not None: mapping = f'{{"query": "{opts.user_dn_query}"}}' if opts.user_dn_template is not None: mapping = f'{{"template": "{opts.user_dn_template}"}}' if (opts.client_cert and not opts.client_key) or (not opts.client_cert and opts.client_key): _exit_if_errors(['--client-cert and --client--key have to be used together']) if opts.client_cert is not None: opts.client_cert = _exit_on_file_read_failure(opts.client_cert) if opts.client_key is not None: opts.client_key = _exit_on_file_read_failure(opts.client_key) _, errors = self.rest.ldap_settings(opts.authentication_enabled, opts.authorization_enabled, opts.hosts, opts.port, opts.encryption, mapping, opts.timeout, opts.max_parallel, opts.max_cache_size, opts.cache_value_lifetime, opts.bind_dn, opts.bind_password, opts.client_cert, opts.client_key, opts.group_query, opts.nested_groups, opts.nested_max_depth, opts.server_cert_val, opts.cacert_ldap) _exit_if_errors(errors) _success("LDAP settings modified") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-ldap" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify LDAP settings" class SettingNotification(Subcommand): """The settings notification subcommand""" def __init__(self): super(SettingNotification, self).__init__() self.parser.prog = "couchbase-cli setting-notification" group = self.parser.add_argument_group("Notification Settings") group.add_argument("--enable-notifications", dest="enabled", metavar="<1|0>", required=True, choices=["0", "1"], help="Enables/disable software notifications") @rest_initialiser(version_check=True) def execute(self, opts): enabled = None if opts.enabled == "1": enabled = True elif opts.enabled == "0": enabled = False _, errors = self.rest.enable_notifications(enabled) _exit_if_errors(errors) _success("Software notification settings updated") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-notification" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify software notification settings" class SettingPasswordPolicy(Subcommand): """The settings password policy subcommand""" def __init__(self): super(SettingPasswordPolicy, self).__init__() self.parser.prog = "couchbase-cli setting-password-policy" group = self.parser.add_argument_group("Password Policy Settings") group.add_argument("--get", dest="get", action="store_true", default=False, help="Get the current password policy") group.add_argument("--set", dest="set", action="store_true", default=False, help="Set a new password policy") group.add_argument("--min-length", dest="min_length", type=int, default=None, metavar="<num>", help="Specifies the minimum password length for new passwords") group.add_argument("--uppercase", dest="upper_case", metavar="<0|1>", choices=["0", "1"], help="Specifies new passwords must contain an upper case character") group.add_argument("--lowercase", dest="lower_case", metavar="<0|1>", choices=["0", "1"], help="Specifies new passwords must contain a lower case character") group.add_argument("--digit", dest="digit", metavar="<0|1>", choices=["0", "1"], help="Specifies new passwords must at least one digit") group.add_argument("--special-char", dest="special_char", metavar="<0|1>", choices=["0", "1"], help="Specifies new passwords must at least one special character") @rest_initialiser(version_check=True) def execute(self, opts): actions = sum([opts.get, opts.set]) if actions == 0: _exit_if_errors(["Must specify either --get or --set"]) elif actions > 1: _exit_if_errors(["The --get and --set flags may not be specified at the same time"]) elif opts.get: if opts.min_length is not None or any([opts.upper_case, opts.lower_case, opts.digit, opts.special_char]): _exit_if_errors(["The --get flag must be used without any other arguments"]) self._get() elif opts.set: if opts.min_length is None: _exit_if_errors(["--min-length is required when using --set flag"]) if opts.min_length <= 0: _exit_if_errors(["--min-length has to be greater than 0"]) self._set(opts) def _get(self): policy, errors = self.rest.get_password_policy() _exit_if_errors(errors) print(json.dumps(policy, sort_keys=True, indent=2)) def _set(self, opts): _, errors = self.rest.set_password_policy(opts.min_length, opts.upper_case, opts.lower_case, opts.digit, opts.special_char) _exit_if_errors(errors) _success("Password policy updated") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-password-policy" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify the password policy" class SettingSecurity(Subcommand): """The settings security subcommand""" def __init__(self): super(SettingSecurity, self).__init__() self.parser.prog = "couchbase-cli setting-security" group = self.parser.add_argument_group("Cluster Security Settings") group.add_argument('--get', default=False, action='store_true', help='Get security settings.') group.add_argument('--set', default=False, action='store_true', help='Set security settings.') group.add_argument("--disable-http-ui", dest="disable_http_ui", metavar="<0|1>", choices=['0', '1'], default=None, help="Disables access to the UI over HTTP (0 or 1)") group.add_argument("--disable-www-authenticate", dest="disable_www_authenticate", metavar="<0|1>", choices=['0', '1'], default=None, help="Disables use of WWW-Authenticate (0 or 1") group.add_argument("--cluster-encryption-level", dest="cluster_encryption_level", metavar="<all|control>", choices=['all', 'control'], default=None, help="Set cluster encryption level, only used when cluster encryption enabled.") group.add_argument('--tls-min-version', dest='tls_min_version', metavar='<tlsv1|tlsv1.1|tlsv1.2>', choices=['tlsv1', 'tlsv1.1', 'tlsv1.2'], default=None, help='Set the minimum TLS version') group.add_argument('--tls-honor-cipher-order', dest='tls_honor_cipher_order', metavar='<1|0>', choices=['1', '0'], help='Specify or not the cipher order has to be followed.', default=None) group.add_argument('--cipher-suites', metavar='<ciphers>', default=None, help='Comma separated list of ciphers to use.If an empty string (e.g "") given it will' ' reset ciphers to default.') @rest_initialiser(version_check=True) def execute(self, opts): if sum([opts.get, opts.set]) != 1: _exit_if_errors(['Provided either --set or --get.']) if opts.get: val, err = self.rest.get_security_settings() _exit_if_errors(err) print(json.dumps(val)) elif opts.set: self._set(self.rest, opts.disable_http_ui, opts.cluster_encryption_level, opts.tls_min_version, opts.tls_honor_cipher_order, opts.cipher_suites, opts.disable_www_authenticate) @staticmethod def _set(rest, disable_http_ui, encryption_level, tls_min_version, honor_order, cipher_suites, disable_www_authenticate): if not any([True if x is not None else False for x in [disable_http_ui, encryption_level, tls_min_version, honor_order, cipher_suites, disable_www_authenticate]]): _exit_if_errors(['please provide at least one of --cluster-encryption-level, --disable-http-ui,' ' --tls-min-version, --tls-honor-cipher-order or --cipher-suites together with --set']) if disable_http_ui == '1': disable_http_ui = 'true' elif disable_http_ui == '0': disable_http_ui = 'false' if disable_www_authenticate == '1': disable_www_authenticate = 'true' elif disable_www_authenticate == '0': disable_www_authenticate = 'false' if honor_order == '1': honor_order = 'true' elif honor_order == '0': honor_order = 'false' if cipher_suites == '': cipher_suites = json.dumps([]) elif cipher_suites is not None: cipher_suites = json.dumps(cipher_suites.split(',')) _, errors = rest.set_security_settings(disable_http_ui, encryption_level, tls_min_version, honor_order, cipher_suites, disable_www_authenticate) _exit_if_errors(errors) _success("Security settings updated") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-security" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify security settings" class SettingXdcr(Subcommand): """The setting xdcr subcommand""" def __init__(self): super(SettingXdcr, self).__init__() self.parser.prog = "couchbase-cli setting-xdcr" group = self.parser.add_argument_group("XDCR Settings") group.add_argument("--checkpoint-interval", dest="chk_int", type=(int), metavar="<num>", help="Intervals between checkpoints in seconds (60 to 14400)") group.add_argument("--worker-batch-size", dest="worker_batch_size", metavar="<num>", type=(int), help="Doc batch size (500 to 10000)") group.add_argument("--doc-batch-size", dest="doc_batch_size", type=(int), metavar="<KB>", help="Document batching size in KB (10 to 100000)") group.add_argument("--failure-restart-interval", dest="fail_interval", metavar="<seconds>", type=(int), help="Interval for restarting failed xdcr in seconds (1 to 300)") group.add_argument("--optimistic-replication-threshold", dest="rep_thresh", type=(int), metavar="<bytes>", help="Document body size threshold (bytes) to trigger optimistic " + "replication") group.add_argument("--source-nozzle-per-node", dest="src_nozzles", metavar="<num>", type=(int), help="The number of source nozzles per source node (1 to 10)") group.add_argument("--target-nozzle-per-node", dest="dst_nozzles", metavar="<num>", type=(int), help="The number of outgoing nozzles per target node (1 to 10)") group.add_argument("--bandwidth-usage-limit", dest="usage_limit", type=(int), metavar="<num>", help="The bandwidth usage limit in MiB/Sec") group.add_argument("--enable-compression", dest="compression", metavar="<1|0>", choices=["1", "0"], help="Enable/disable compression") group.add_argument("--log-level", dest="log_level", metavar="<level>", choices=["Error", "Info", "Debug", "Trace"], help="The XDCR log level") group.add_argument("--stats-interval", dest="stats_interval", metavar="<ms>", help="The interval for statistics updates (in milliseconds)") group.add_argument('--max-processes', dest='max_proc', metavar="<num>", type=int, help='Number of processes to be allocated to XDCR. The default is 4.') @rest_initialiser(version_check=True, cluster_init_check=True, enterprise_check=False) def execute(self, opts): if not self.enterprise and opts.compression: _exit_if_errors(["--enable-compression can only be configured on enterprise edition"]) if opts.compression == "0": opts.compression = "None" elif opts.compression == "1": opts.compression = "Auto" _, errors = self.rest.xdcr_global_settings(opts.chk_int, opts.worker_batch_size, opts.doc_batch_size, opts.fail_interval, opts.rep_thresh, opts.src_nozzles, opts.dst_nozzles, opts.usage_limit, opts.compression, opts.log_level, opts.stats_interval, opts.max_proc) _exit_if_errors(errors) _success("Global XDCR settings updated") @staticmethod def get_man_page_name(): return "couchbase-cli-setting-xdcr" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Modify XDCR related settings" class SettingMasterPassword(Subcommand): """The setting master password subcommand""" def __init__(self): super(SettingMasterPassword, self).__init__() self.parser.prog = "couchbase-cli setting-master-password" group = self.parser.add_argument_group("Master password options") group.add_argument("--new-password", dest="new_password", metavar="<password>", required=False, action=CBNonEchoedAction, envvar=None, prompt_text="Enter new master password:", confirm_text="Confirm new master password:", help="Sets a new master password") group.add_argument("--rotate-data-key", dest="rotate_data_key", action="store_true", help="Rotates the master password data key") @rest_initialiser(version_check=True) def execute(self, opts): if opts.new_password is not None: _, errors = self.rest.set_master_pwd(opts.new_password) _exit_if_errors(errors) _success("New master password set") elif opts.rotate_data_key: _, errors = self.rest.rotate_master_pwd() _exit_if_errors(errors) _success("Data key rotated") else: _exit_if_errors(["No parameters set"]) @staticmethod def get_man_page_name(): return "couchbase-cli-setting-master-password" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Changing the settings of the master password" class SslManage(Subcommand): """The user manage subcommand""" def __init__(self): super(SslManage, self).__init__() self.parser.prog = "couchbase-cli ssl-manage" group = self.parser.add_argument_group("SSL manage options") group.add_argument("--cluster-cert-info", dest="cluster_cert", action="store_true", default=False, help="Gets the cluster certificate") group.add_argument("--node-cert-info", dest="node_cert", action="store_true", default=False, help="Gets the node certificate") group.add_argument("--regenerate-cert", dest="regenerate", metavar="<path>", help="Regenerate the cluster certificate and save it to a file") group.add_argument("--set-node-certificate", dest="set_cert", action="store_true", default=False, help="Sets the node certificate") group.add_argument("--upload-cluster-ca", dest="upload_cert", metavar="<path>", help="Upload a new cluster certificate") group.add_argument("--set-client-auth", dest="client_auth_path", metavar="<path>", help="A path to a file containing the client auth configuration") group.add_argument("--client-auth", dest="show_client_auth", action="store_true", help="Show ssl client certificate authentication value") group.add_argument("--extended", dest="extended", action="store_true", default=False, help="Print extended certificate information") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.regenerate is not None: try: open(opts.regenerate, 'a').close() except IOError: _exit_if_errors([f'Unable to create file at `{opts.regenerate}`']) certificate, errors = self.rest.regenerate_cluster_certificate() _exit_if_errors(errors) _exit_on_file_write_failure(opts.regenerate, certificate) _success(f'Certificate regenerate and copied to `{opts.regenerate}`') elif opts.cluster_cert: certificate, errors = self.rest.retrieve_cluster_certificate(opts.extended) _exit_if_errors(errors) if isinstance(certificate, dict): print(json.dumps(certificate, sort_keys=True, indent=2)) else: print(certificate) elif opts.node_cert: host = urllib.parse.urlparse(opts.cluster).netloc certificate, errors = self.rest.retrieve_node_certificate(host) _exit_if_errors(errors) print(json.dumps(certificate, sort_keys=True, indent=2)) elif opts.upload_cert: certificate = _exit_on_file_read_failure(opts.upload_cert) _, errors = self.rest.upload_cluster_certificate(certificate) _exit_if_errors(errors) _success(f'Uploaded cluster certificate to {opts.cluster}') elif opts.set_cert: _, errors = self.rest.set_node_certificate() _exit_if_errors(errors) _success("Node certificate set") elif opts.client_auth_path: data = _exit_on_file_read_failure(opts.client_auth_path) try: config = json.loads(data) except ValueError as e: _exit_if_errors([f'Client auth config does not contain valid json: {e}']) _, errors = self.rest.set_client_cert_auth(config) _exit_if_errors(errors) _success("SSL client auth updated") elif opts.show_client_auth: result, errors = self.rest.retrieve_client_cert_auth() _exit_if_errors(errors) print(json.dumps(result, sort_keys=True, indent=2)) else: _exit_if_errors(["No options specified"]) @staticmethod def get_man_page_name(): return "couchbase-cli-ssl-manage" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Manage cluster certificates" class UserManage(Subcommand): """The user manage subcommand""" def __init__(self): super(UserManage, self).__init__() self.parser.prog = "couchbase-cli user-manage" group = self.parser.add_argument_group("User manage options") group.add_argument("--delete", dest="delete", action="store_true", default=False, help="Delete an existing RBAC user") group.add_argument("--get", dest="get", action="store_true", default=False, help="Display RBAC user details") group.add_argument("--list", dest="list", action="store_true", default=False, help="List all RBAC users and their roles") group.add_argument("--my-roles", dest="my_roles", action="store_true", default=False, help="List my roles") group.add_argument("--set", dest="set", action="store_true", default=False, help="Create or edit an RBAC user") group.add_argument("--set-group", dest="set_group", action="store_true", default=False, help="Create or edit a user group") group.add_argument("--delete-group", dest="delete_group", action="store_true", default=False, help="Delete a user group") group.add_argument("--list-groups", dest="list_group", action="store_true", default=False, help="List all groups") group.add_argument("--get-group", dest="get_group", action="store_true", default=False, help="Get group") group.add_argument("--rbac-username", dest="rbac_user", metavar="<username>", help="The RBAC username") group.add_argument("--rbac-password", dest="rbac_pass", metavar="<password>", help="The RBAC password") group.add_argument("--rbac-name", dest="rbac_name", metavar="<name>", help="The full name of the user") group.add_argument("--roles", dest="roles", metavar="<roles_list>", help="The roles for the specified user") group.add_argument("--auth-domain", dest="auth_domain", metavar="<domain>", choices=["external", "local"], help="The authentication type for the specified user") group.add_argument("--user-groups", dest="groups", metavar="<groups>", help="List of groups for the user to be added to") group.add_argument("--group-name", dest="group", metavar="<group>", help="Group name") group.add_argument("--group-description", dest="description", metavar="<text>", help="Group description") group.add_argument("--ldap-ref", dest="ldap_ref", metavar="<ref>", help="LDAP group's distinguished name") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): num_selectors = sum([opts.delete, opts.list, opts.my_roles, opts.set, opts.get, opts.get_group, opts.list_group, opts.delete_group, opts.set_group]) if num_selectors == 0: _exit_if_errors(['Must specify --delete, --list, --my_roles, --set, --get, --get-group, --set-group, ' '--list-groups or --delete-group']) elif num_selectors != 1: _exit_if_errors(['Only one of the following can be specified:--delete, --list, --my_roles, --set, --get,' ' --get-group, --set-group, --list-groups or --delete-group']) if opts.delete: self._delete(opts) elif opts.list: self._list(opts) elif opts.my_roles: self._my_roles(opts) elif opts.set: self._set(opts) elif opts.get: self._get(opts) elif opts.get_group: self._get_group(opts) elif opts.set_group: self._set_group(opts) elif opts.list_group: self._list_groups() elif opts.delete_group: self._delete_group(opts) def _delete_group(self, opts): if opts.group is None: _exit_if_errors(['--group-name is required with the --delete-group option']) _, errors = self.rest.delete_user_group(opts.group) _exit_if_errors(errors) _success(f"Group '{opts.group}' was deleted") def _get_group(self, opts): if opts.group is None: _exit_if_errors(['--group-name is required with the --get-group option']) group, errors = self.rest.get_user_group(opts.group) _exit_if_errors(errors) print(json.dumps(group, indent=2)) def _set_group(self, opts): if opts.group is None: _exit_if_errors(['--group-name is required with --set-group']) _, errors = self.rest.set_user_group(opts.group, opts.roles, opts.description, opts.ldap_ref) _exit_if_errors(errors) _success(f"Group '{opts.group}' set") def _list_groups(self): groups, errors = self.rest.list_user_groups() _exit_if_errors(errors) print(json.dumps(groups, indent=2)) def _delete(self, opts): if opts.rbac_user is None: _exit_if_errors(["--rbac-username is required with the --delete option"]) if opts.rbac_pass is not None: _warning("--rbac-password is not used with the --delete option") if opts.rbac_name is not None: _warning("--rbac-name is not used with the --delete option") if opts.roles is not None: _warning("--roles is not used with the --delete option") if opts.auth_domain is None: _exit_if_errors(["--auth-domain is required with the --delete option"]) _, errors = self.rest.delete_rbac_user(opts.rbac_user, opts.auth_domain) _exit_if_errors(errors) _success(f"User '{opts.rbac_user}' was removed") def _list(self, opts): if opts.rbac_user is not None: _warning(["--rbac-username is not used with the --list option"]) if opts.rbac_pass is not None: _warning(["--rbac-password is not used with the --list option"]) if opts.rbac_name is not None: _warning("--rbac-name is not used with the --list option") if opts.roles is not None: _warning("--roles is not used with the --list option") if opts.auth_domain is not None: _warning("--auth-domain is not used with the --list option") result, errors = self.rest.list_rbac_users() _exit_if_errors(errors) print(json.dumps(result, indent=2)) def _get(self, opts): if opts.rbac_user is None: _exit_if_errors(["--rbac-username is required with the --get option"]) if opts.rbac_pass is not None: _warning("--rbac-password is not used with the --get option") if opts.rbac_name is not None: _warning("--rbac-name is not used with the --get option") if opts.roles is not None: _warning("--roles is not used with the --get option") if opts.auth_domain is not None: _warning("--auth-domain is not used with the --get option") result, errors = self.rest.list_rbac_users() _exit_if_errors(errors) user = [u for u in result if u['id'] == opts.rbac_user] if len(user) != 0: print(json.dumps(user, indent=2)) else: _exit_if_errors([f'no user {opts.rbac_user}']) def _my_roles(self, opts): if opts.rbac_user is not None: _warning("--rbac-username is not used with the --my-roles option") if opts.rbac_pass is not None: _warning("--rbac-password is not used with the --my-roles option") if opts.rbac_name is not None: _warning("--rbac-name is not used with the --my-roles option") if opts.roles is not None: _warning("--roles is not used with the --my-roles option") if opts.auth_domain is not None: _warning("--auth-domain is not used with the --my-roles option") result, errors = self.rest.my_roles() _exit_if_errors(errors) print(json.dumps(result, indent=2)) def _set(self, opts): if opts.rbac_user is None: _exit_if_errors(["--rbac-username is required with the --set option"]) if opts.rbac_pass is not None and opts.auth_domain == "external": _warning("--rbac-password cannot be used with the external auth domain") opts.rbac_pass = None if opts.auth_domain is None: _exit_if_errors(["--auth-domain is required with the --set option"]) _, errors = self.rest.set_rbac_user(opts.rbac_user, opts.rbac_pass, opts.rbac_name, opts.roles, opts.auth_domain, opts.groups) _exit_if_errors(errors) if opts.roles is not None and "query_external_access" in opts.roles: _warning('Granting the query_external_access role permits execution of the N1QL ' 'function CURL() and may allow access to other network endpoints in the local network and' 'the Internet.') _success(f"User {opts.rbac_user} set") @staticmethod def get_man_page_name(): return "couchbase-cli-user-manage" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Manage RBAC users" class XdcrReplicate(Subcommand): """The xdcr replicate subcommand""" def __init__(self): super(XdcrReplicate, self).__init__() self.parser.prog = "couchbase-cli xdcr-replicate" group = self.parser.add_argument_group("XDCR replicate options") group.add_argument("--get", action="store_true", help="Retrieve the settings of a XDCR replication.") group.add_argument("--create", dest="create", action="store_true", default=False, help="Create an XDCR replication") group.add_argument("--delete", dest="delete", action="store_true", default=False, help="Delete an XDCR replication") group.add_argument("--pause", dest="pause", action="store_true", default=False, help="Pause an XDCR replication") group.add_argument("--list", dest="list", action="store_true", default=False, help="List all XDCR replications") group.add_argument("--resume", dest="resume", action="store_true", default=False, help="Resume an XDCR replication") group.add_argument("--settings", dest="settings", action="store_true", default=False, help="Set advanced settings for an XDCR replication") group.add_argument("--xdcr-from-bucket", dest="from_bucket", metavar="<bucket>", help="The name bucket to replicate data from") group.add_argument("--xdcr-to-bucket", dest="to_bucket", metavar="<bucket>", help="The name bucket to replicate data to") group.add_argument("--xdcr-cluster-name", dest="cluster_name", metavar="<name>", help="The name of the cluster reference to replicate to") group.add_argument("--xdcr-replication-mode", dest="rep_mode", metavar="<mode>", choices=["xmem", "capi"], action=CBDeprecatedAction, help=SUPPRESS) group.add_argument("--filter-expression", dest="filter", metavar="<regex>", help="Regular expression to filter replication streams") group.add_argument("--filter-skip-restream", dest="filter_skip", action="store_true", default=False, help="Restart the replication. It must be specified together with --filter-expression") group.add_argument("--xdcr-replicator", dest="replicator_id", metavar="<id>", help="Replication ID") group.add_argument("--checkpoint-interval", dest="chk_int", type=(int), metavar="<seconds>", help="Intervals between checkpoints in seconds (60 to 14400)") group.add_argument("--worker-batch-size", dest="worker_batch_size", type=(int), metavar="<num>", help="Doc batch size (500 to 10000)") group.add_argument("--doc-batch-size", dest="doc_batch_size", type=(int), metavar="<KB>", help="Document batching size in KB (10 to 100000)") group.add_argument("--failure-restart-interval", dest="fail_interval", type=(int), metavar="<seconds>", help="Interval for restarting failed xdcr in seconds (1 to 300)") group.add_argument("--optimistic-replication-threshold", dest="rep_thresh", type=(int), metavar="<bytes>", help="Document body size threshold to trigger optimistic replication" + " (bytes)") group.add_argument("--source-nozzle-per-node", dest="src_nozzles", type=(int), metavar="<num>", help="The number of source nozzles per source node (1 to 10)") group.add_argument("--target-nozzle-per-node", dest="dst_nozzles", type=(int), metavar="<num>", help="The number of outgoing nozzles per target node (1 to 10)") group.add_argument("--bandwidth-usage-limit", dest="usage_limit", type=(int), metavar="<num>", help="The bandwidth usage limit in MiB/Sec") group.add_argument("--enable-compression", dest="compression", metavar="<1|0>", choices=["1", "0"], help="Enable/disable compression") group.add_argument("--log-level", dest="log_level", metavar="<level>", choices=["Error", "Warn", "Info", "Debug", "Trace"], help="The XDCR log level") group.add_argument("--stats-interval", dest="stats_interval", metavar="<ms>", help="The interval for statistics updates (in milliseconds)") group.add_argument("--priority", dest="priority", choices=['High', 'Medium', 'Low'], metavar="<High|Medium|Low>", help='XDCR priority, by default set to High') group.add_argument('--reset-expiry', choices=['1', '0'], metavar='<1|0>', dest='reset_expiry', default=None, help='When set to true the expiry of mutations will be set to zero') group.add_argument('--filter-deletion', choices=['1', '0'], metavar='<1|0>', default=None, dest='filter_del', help='When set to true delete mutations will be filter out and not sent to the target ' 'cluster') group.add_argument('--filter-expiration', choices=['1', '0'], metavar='<1|0>', default=None, dest='filter_exp', help='When set to true expiry mutations will be filter out and not sent to the target ' 'cluster') collection_group = self.parser.add_argument_group("Collection options") collection_group.add_argument('--collection-explicit-mappings', choices=['1', '0'], metavar='<1|0>', default=None, help='If explicit collection mappings is to be used. ' '(Enterprise Edition Only)') collection_group.add_argument('--collection-migration', choices=['1', '0'], metavar='<1|0>', default=None, help='If XDCR is to run in collection migration mode. ' '(Enterprise Edition only)') collection_group.add_argument('--collection-mapping-rules', type=str, default=None, metavar='<mappings>', help='The mapping rules specified as a JSON formatted string. ' '(Enterprise Edition Only)') @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if not self.enterprise and opts.compression: _exit_if_errors(["--enable-compression can only be configured on enterprise edition"]) if not self.enterprise and (opts.collection_migration or opts.collection_explicit_mappings is not None or opts.collection_mapping_rules is not None): _exit_if_errors(["[--collection-migration, --collection-explicit-mappings, --collection-mapping-rules] can" " only be configured on enterprise edition"]) if opts.compression == "0": opts.compression = "None" elif opts.compression == "1": opts.compression = "Auto" actions = sum([opts.create, opts.delete, opts.pause, opts.list, opts.resume, opts.settings, opts.get]) if actions == 0: _exit_if_errors(['Must specify one of --create, --delete, --pause, --list, --resume, --settings, --get']) elif actions > 1: _exit_if_errors(['The --create, --delete, --pause, --list, --resume, --settings, --get flags may not be ' 'specified at the same time']) elif opts.create: self._create(opts) elif opts.delete: self._delete(opts) elif opts.pause or opts.resume: self._pause_resume(opts) elif opts.list: self._list() elif opts.settings: self._settings(opts) elif opts.get: self._get(opts) def _get(self, opts): if opts.replicator_id is None: _exit_if_errors(["--xdcr-replicator is need to get the replicator settings"]) settings, errors = self.rest.get_xdcr_replicator_settings(opts.replicator_id) _exit_if_errors(errors) print(json.dumps(settings, indent=4, sort_keys=True)) def _create(self, opts): if opts.collection_migration == '1' and opts.collection_explicit_mappings == '1': _exit_if_errors(['cannot enable both collection migration and explicit mappings']) if opts.filter_skip and opts.filter is None: _exit_if_errors(["--filter-expersion is needed with the --filter-skip-restream option"]) _, errors = self.rest.create_xdcr_replication(opts.cluster_name, opts.to_bucket, opts.from_bucket, opts.chk_int, opts.worker_batch_size, opts.doc_batch_size, opts.fail_interval, opts.rep_thresh, opts.src_nozzles, opts.dst_nozzles, opts.usage_limit, opts.compression, opts.log_level, opts.stats_interval, opts.filter, opts.priority, opts.reset_expiry, opts.filter_del, opts.filter_exp, opts.collection_explicit_mappings, opts.collection_migration, opts.collection_mapping_rules) _exit_if_errors(errors) _success("XDCR replication created") def _delete(self, opts): if opts.replicator_id is None: _exit_if_errors(["--xdcr-replicator is needed to delete a replication"]) _, errors = self.rest.delete_xdcr_replicator(opts.replicator_id) _exit_if_errors(errors) _success("XDCR replication deleted") def _pause_resume(self, opts): if opts.replicator_id is None: _exit_if_errors(["--xdcr-replicator is needed to pause or resume a replication"]) tasks, errors = self.rest.get_tasks() _exit_if_errors(errors) for task in tasks: if task["type"] == "xdcr" and task["id"] == opts.replicator_id: if opts.pause and task["status"] == "notRunning": _exit_if_errors(["The replication is not running yet. Pause is not needed"]) if opts.resume and task["status"] == "running": _exit_if_errors(["The replication is running already. Resume is not needed"]) break if opts.pause: _, errors = self.rest.pause_xdcr_replication(opts.replicator_id) _exit_if_errors(errors) _success("XDCR replication paused") elif opts.resume: _, errors = self.rest.resume_xdcr_replication(opts.replicator_id) _exit_if_errors(errors) _success("XDCR replication resume") def _list(self): tasks, errors = self.rest.get_tasks() _exit_if_errors(errors) for task in tasks: if task["type"] == "xdcr": print(f'stream id: {task["id"]}') print(f' status: {task["status"]}') print(f' source: {task["source"]}') print(f' target: {task["target"]}') if "filterExpression" in task and task["filterExpression"] != "": print(f' filter: {task["filterExpression"]}') def _settings(self, opts): if opts.replicator_id is None: _exit_if_errors(["--xdcr-replicator is needed to change a replicators settings"]) if opts.filter_skip and opts.filter is None: _exit_if_errors(["--filter-expersion is needed with the --filter-skip-restream option"]) if opts.collection_migration == '1' and opts.collection_explicit_mappings == '1': _exit_if_errors(['cannot enable both collection migration and explicit mappings']) _, errors = self.rest.xdcr_replicator_settings(opts.chk_int, opts.worker_batch_size, opts.doc_batch_size, opts.fail_interval, opts.rep_thresh, opts.src_nozzles, opts.dst_nozzles, opts.usage_limit, opts.compression, opts.log_level, opts.stats_interval, opts.replicator_id, opts.filter, opts.filter_skip, opts.priority, opts.reset_expiry, opts.filter_del, opts.filter_exp, opts.collection_explicit_mappings, opts.collection_migration, opts.collection_mapping_rules) _exit_if_errors(errors) _success("XDCR replicator settings updated") @staticmethod def get_man_page_name(): return "couchbase-cli-xdcr-replicate" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Manage XDCR cluster references" class XdcrSetup(Subcommand): """The xdcr setup subcommand""" def __init__(self): super(XdcrSetup, self).__init__() self.parser.prog = "couchbase-cli xdcr-setup" group = self.parser.add_argument_group("XDCR setup options") group.add_argument("--create", dest="create", action="store_true", default=False, help="Create an XDCR remote reference") group.add_argument("--delete", dest="delete", action="store_true", default=False, help="Delete an XDCR remote reference") group.add_argument("--edit", dest="edit", action="store_true", default=False, help="Set the local read-only user") group.add_argument("--list", dest="list", action="store_true", default=False, help="List all XDCR remote references") group.add_argument("--xdcr-cluster-name", dest="name", metavar="<name>", help="The name for the remote cluster reference") group.add_argument("--xdcr-hostname", dest="hostname", metavar="<hostname>", help="The hostname of the remote cluster reference") group.add_argument("--xdcr-username", dest="r_username", metavar="<username>", help="The username of the remote cluster reference") group.add_argument("--xdcr-password", dest="r_password", metavar="<password>", help="The password of the remote cluster reference") group.add_argument("--xdcr-user-certificate", dest="r_certificate", metavar="<path>", help="The user certificate for authentication") group.add_argument("--xdcr-user-key", dest="r_key", metavar="<path>", help="The user key for authentication") group.add_argument("--xdcr-demand-encryption", dest="encrypt", choices=["0", "1"], action=CBDeprecatedAction, help=SUPPRESS) group.add_argument("--xdcr-encryption-type", dest="encryption_type", choices=["full", "half"], metavar="<type>", action=CBDeprecatedAction, help=SUPPRESS) group.add_argument("--xdcr-certificate", dest="certificate", metavar="<path>", help="The certificate used for encryption") group.add_argument("--xdcr-secure-connection", dest="secure_connection", choices=["none", "full", "half"], metavar="<type>", help="The XDCR secure connection type") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): actions = sum([opts.create, opts.delete, opts.edit, opts.list]) if actions == 0: _exit_if_errors(["Must specify one of --create, --delete, --edit, --list"]) elif actions > 1: _exit_if_errors(["The --create, --delete, --edit, --list flags may not be specified at the same time"]) elif opts.create or opts.edit: self._set(opts) elif opts.delete: self._delete(opts) elif opts.list: self._list() def _set(self, opts): cmd = "create" if opts.edit: cmd = "edit" if opts.name is None: _exit_if_errors([f'--xdcr-cluster-name is required to {cmd} a cluster connection']) if opts.hostname is None: _exit_if_errors([f'--xdcr-hostname is required to {cmd} a cluster connections']) if opts.username is None: _exit_if_errors([f'--xdcr-username is required to {cmd} a cluster connections']) if opts.password is None: _exit_if_errors([f'--xdcr-password is required to {cmd} a cluster connections']) if (opts.encrypt is not None or opts.encryption_type is not None) and opts.secure_connection is not None: _exit_if_errors(["Cannot use deprecated flags --xdcr-demand-encryption or --xdcr-encryption-type with" " --xdcr-secure-connection"]) if opts.secure_connection == "none": opts.encrypt = "0" opts.encryption_type = None elif opts.secure_connection == "half": opts.encrypt = "1" opts.encryption_type = "half" elif opts.secure_connection == "full": opts.encrypt = "1" opts.encryption_type = "full" elif opts.encrypt is None and opts.encryption_type is None: opts.encrypt = "0" opts.encryption_type = None raw_cert = None if opts.encrypt == "1": if opts.encryption_type is None: opts.encryption_type = "full" if opts.encryption_type == "full": if opts.certificate is None: _exit_if_errors(["certificate required if encryption is demanded"]) raw_cert = _exit_on_file_read_failure(opts.certificate) raw_user_key = None if opts.r_key: raw_user_key = _exit_on_file_read_failure(opts.r_key) raw_user_cert = None if opts.r_certificate: raw_user_cert = _exit_on_file_read_failure(opts.r_certificate) if opts.create: _, errors = self.rest.create_xdcr_reference(opts.name, opts.hostname, opts.r_username, opts.r_password, opts.encrypt, opts.encryption_type, raw_cert, raw_user_cert, raw_user_key) _exit_if_errors(errors) _success("Cluster reference created") else: _, errors = self.rest.edit_xdcr_reference(opts.name, opts.hostname, opts.r_username, opts.r_password, opts.encrypt, opts.encryption_type, raw_cert, raw_user_cert, raw_user_key) _exit_if_errors(errors) _success("Cluster reference edited") def _delete(self, opts): if opts.name is None: _exit_if_errors(["--xdcr-cluster-name is required to deleta a cluster connection"]) _, errors = self.rest.delete_xdcr_reference(opts.name) _exit_if_errors(errors) _success("Cluster reference deleted") def _list(self): clusters, errors = self.rest.list_xdcr_references() _exit_if_errors(errors) for cluster in clusters: if not cluster["deleted"]: print(f'cluster name: {cluster["name"]}') print(f' uuid: {cluster["uuid"]}') print(f' host name: {cluster["hostname"]}') print(f' user name: {cluster["username"]}') print(f' uri: {cluster["uri"]}') @staticmethod def get_man_page_name(): return "couchbase-cli-xdcr-setup" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Manage XDCR replications" class EventingFunctionSetup(Subcommand): """The Eventing Service Function setup subcommand""" def __init__(self): super(EventingFunctionSetup, self).__init__() self.parser.prog = "couchbase-cli eventing-function-setup" group = self.parser.add_argument_group("Eventing Service Function setup options") group.add_argument("--import", dest="_import", action="store_true", default=False, help="Import functions") group.add_argument("--export", dest="export", action="store_true", default=False, help="Export a function") group.add_argument("--export-all", dest="export_all", action="store_true", default=False, help="Export all functions") group.add_argument("--delete", dest="delete", action="store_true", default=False, help="Delete a function") group.add_argument("--list", dest="list", action="store_true", default=False, help="List all functions") group.add_argument("--deploy", dest="deploy", action="store_true", default=False, help="Deploy a function") group.add_argument("--undeploy", dest="undeploy", action="store_true", default=False, help="Undeploy a function") group.add_argument("--boundary", dest="boundary", metavar="<from-everything|from-now>", choices=["from-everything", "from-now"], default=False, help="Set the function deployment boundary") group.add_argument("--name", dest="name", metavar="<name>", default=False, help="The name of the function to take an action on") group.add_argument("--file", dest="filename", metavar="<file>", default=False, help="The file to export and import function(s) to and from") group.add_argument("--pause", dest="pause", action="store_true", help="Pause a function") group.add_argument("--resume", dest="resume", action="store_true", help="Resume a function") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): # pylint: disable=protected-access actions = sum([opts._import, opts.export, opts.export_all, opts.delete, opts.list, opts.deploy, opts.undeploy, opts.pause, opts.resume]) if actions == 0: _exit_if_errors(["Must specify one of --import, --export, --export-all, --delete, --list, --deploy," " --undeploy, --pause, --resume"]) elif actions > 1: _exit_if_errors(['The --import, --export, --export-all, --delete, --list, --deploy, --undeploy, --pause, ' '--resume flags may not be specified at the same time']) elif opts._import: # pylint: disable=protected-access self._import(opts) elif opts.export: self._export(opts) elif opts.export_all: self._export_all(opts) elif opts.delete: self._delete(opts) elif opts.list: self._list() elif opts.deploy: self._deploy_undeploy(opts, True) elif opts.undeploy: self._deploy_undeploy(opts, False) elif opts.pause: self._pause_resume(opts, True) elif opts.resume: self._pause_resume(opts, False) def _pause_resume(self, opts, pause): if not opts.name: _exit_if_errors([f"Flag --name is required with the {'--pause' if pause else '--resume'} flag"]) _, err = self.rest.pause_resume_function(opts.name, pause) _exit_if_errors(err) _success(f"Function was {'paused' if pause else 'resumed'}") def _import(self, opts): if not opts.filename: _exit_if_errors(["--file is needed to import functions"]) import_functions = _exit_on_file_read_failure(opts.filename) import_functions = json.loads(import_functions) _, errors = self.rest.import_functions(import_functions) _exit_if_errors(errors) _success("Events imported") def _export(self, opts): if not opts.filename: _exit_if_errors(["--file is needed to export a function"]) if not opts.name: _exit_if_errors(["--name is needed to export a function"]) functions, errors = self.rest.export_functions() _exit_if_errors(errors) exported_function = None for function in functions: if function["appname"] == opts.name: exported_function = [function] if not exported_function: _exit_if_errors([f'Function {opts.name} does not exist']) _exit_on_file_write_failure(opts.filename, json.dumps(exported_function, separators=(',', ':'))) _success("Function exported to: " + opts.filename) def _export_all(self, opts): if not opts.filename: _exit_if_errors(["--file is needed to export all functions"]) exported_functions, errors = self.rest.export_functions() _exit_if_errors(errors) _exit_on_file_write_failure(opts.filename, json.dumps(exported_functions, separators=(',', ':'))) _success(f'All functions exported to: {opts.filename}') def _delete(self, opts): if not opts.name: _exit_if_errors(["--name is needed to delete a function"]) _, errors = self.rest.delete_function(opts.name) _exit_if_errors(errors) _success("Request to delete the function was accepted") def _deploy_undeploy(self, opts, deploy): if not opts.name: _exit_if_errors([f"--name is needed to {'deploy' if deploy else 'undeploy'} a function"]) if deploy and not opts.boundary: _exit_if_errors(["--boundary is needed to deploy a function"]) _, errors = self.rest.deploy_undeploy_function(opts.name, deploy, opts.boundary) _exit_if_errors(errors) _success(f"Request to {'deploy' if deploy else 'undeploy'} the function was accepted") def _list(self): functions, errors = self.rest.list_functions() _exit_if_errors(errors) for function in functions: print(function['appname']) status = '' if function['settings']['deployment_status']: status = 'Deployed' else: status = 'Undeployed' print(f' Status: {status}') print(f' Source Bucket: {function["depcfg"]["source_bucket"]}') print(f' Metadata Bucket: {function["depcfg"]["metadata_bucket"]}') @staticmethod def get_man_page_name(): return "couchbase-cli-eventing-function-setup" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Manage Eventing Service Functions" class AnalyticsLinkSetup(Subcommand): """The analytics link setup subcommand""" def __init__(self): super(AnalyticsLinkSetup, self).__init__() self.parser.prog = "couchbase-cli analytics-link-setup" group = self.parser.add_argument_group("Analytics Service link setup options") group.add_argument("--create", dest="create", action="store_true", default=False, help="Create a link") group.add_argument("--delete", dest="delete", action="store_true", default=False, help="Delete a link") group.add_argument("--edit", dest="edit", action="store_true", default=False, help="Modify a link") group.add_argument("--list", dest="list", action="store_true", default=False, help="List all links") group.add_argument("--dataverse", dest="dataverse", metavar="<name>", help="The dataverse of the link (Deprecated)") group.add_argument("--scope", dest="scope", metavar="<name>", help="The analytics scope of the link in its canonical form") group.add_argument("--name", dest="name", metavar="<name>", help="The name of the link") group.add_argument("--type", dest="type", metavar="<type>", choices=["couchbase", "s3", "azureblob"], help="The type of the link ('couchbase', 's3' or 'azureblob')") group = self.parser.add_argument_group("Analytics Service Couchbase link setup options") group.add_argument("--hostname", dest="hostname", metavar="<hostname>", help="The hostname of the link") group.add_argument("--link-username", dest="link_username", metavar="<username>", help="The username of the link") group.add_argument("--link-password", dest="link_password", metavar="<password>", help="The password of the link") group.add_argument("--user-certificate", dest="user_certificate", metavar="<path>", help="The user certificate for authentication") group.add_argument("--user-key", dest="user_key", metavar="<path>", help="The user key for authentication") group.add_argument("--certificate", dest="certificate", metavar="<path>", help="The certificate used for encryption") group.add_argument("--encryption", dest="encryption", choices=["none", "full", "half"], metavar="<type>", help="The link encryption type ('none', 'full' or 'half')") group = self.parser.add_argument_group("Analytics Service S3 link setup options") group.add_argument("--access-key-id", dest="access_key_id", metavar="<id>", help="The access key ID of the link") group.add_argument("--secret-access-key", dest="secret_access_key", metavar="<key>", help="The secret access key of the link") group.add_argument("--session-token", dest="session_token", metavar="<token>", help="Temporary credentials session token") group.add_argument("--region", dest="region", metavar="<region>", help="The region of the link") group.add_argument("--service-endpoint", dest="service_endpoint", metavar="<url>", help="The service endpoint of the link (optional)") group = self.parser.add_argument_group("Analytics Service Azure Blob link setup options") group.add_argument("--connection-string", dest="connection_string", metavar="<key>", help="The connection string of the link") group.add_argument("--account-name", dest="account_name", metavar="<id>", help="The account name of the link") group.add_argument("--account-key", dest="account_key", metavar="<key>", help="The account key of the link") group.add_argument("--shared-access-signature", dest="shared_access_signature", metavar="<token>", help="The shared access signature of the link") group.add_argument("--blob-endpoint", dest="blob_endpoint", metavar="<url>", help="The blob endpoint of the link (optional)") group.add_argument("--endpoint-suffix", dest="endpoint_suffix", metavar="<url>", help="The endpoint suffix of the link (optional)") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): actions = sum([opts.create, opts.delete, opts.edit, opts.list]) if actions == 0: _exit_if_errors(["Must specify one of --create, --delete, --edit, --list"]) elif actions > 1: _exit_if_errors(["The --create, --delete, --edit, --list flags may not be specified at the same time"]) if opts.dataverse: _deprecated("--dataverse is deprecated, please use --scope instead") if opts.dataverse and opts.scope: _exit_if_errors(['Only one of --dataverse and --scope is allowed']) if opts.create or opts.edit: self._set(opts) elif opts.delete: self._delete(opts) elif opts.list: self._list(opts) def _set(self, opts): cmd = "create" if opts.edit: cmd = "edit" if opts.dataverse is None and opts.scope is None: _exit_if_errors([f'--dataverse or --scope is required to {cmd} a link']) if opts.name is None: _exit_if_errors([f'--name is required to {cmd} a link']) if opts.create and opts.type is None: _exit_if_errors([f'--type is required to {cmd} a link']) if opts.type == 'azureblob': if opts.connection_string is None and opts.account_key is None and opts.shared_access_signature is None: _exit_if_errors(['No authentication parameters provided']) if opts.connection_string and (opts.account_key or opts.shared_access_signature): _exit_if_errors(['Only a single authentication method is allowed']) if opts.account_key and opts.shared_access_signature: _exit_if_errors(['Only a single authentication method is allowed']) if opts.dataverse: opts.scope = opts.dataverse if opts.certificate: opts.certificate = _exit_on_file_read_failure(opts.certificate) if opts.user_key: opts.user_key = _exit_on_file_read_failure(opts.user_key) if opts.user_certificate: opts.user_certificate = _exit_on_file_read_failure(opts.user_certificate) if opts.create: _, errors = self.rest.create_analytics_link(opts) _exit_if_errors(errors) _success("Link created") else: _, errors = self.rest.edit_analytics_link(opts) _exit_if_errors(errors) _success("Link edited") def _delete(self, opts): if opts.dataverse is None and opts.scope is None: _exit_if_errors(['--dataverse or --scope is required to delete a link']) if opts.name is None: _exit_if_errors(['--name is required to delete a link']) if opts.dataverse: opts.scope = opts.dataverse _, errors = self.rest.delete_analytics_link(opts.scope, opts.name) _exit_if_errors(errors) _success("Link deleted") def _list(self, opts): if opts.dataverse: opts.scope = opts.dataverse clusters, errors = self.rest.list_analytics_links(opts.scope, opts.name, opts.type) _exit_if_errors(errors) print(json.dumps(clusters, sort_keys=True, indent=2)) @staticmethod def get_man_page_name(): return "couchbase-cli-analytics-link-setup" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Manage Analytics Links" class UserChangePassword(Subcommand): """The change password subcommand""" def __init__(self): super(UserChangePassword, self).__init__() self.parser.prog = "couchbase-cli user-change-password" group = self.parser.add_argument_group("User password change option") group.add_argument("--new-password", dest="new_pass", metavar="<password>", required=True, help="The new password") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): if opts.new_pass is None: _exit_if_errors(["--new-password is required"]) _, rv = self.rest.user_change_passsword(opts.new_pass) _exit_if_errors(rv) _success(f'Changed password for {opts.username}') @staticmethod def get_man_page_name(): return "couchbase-cli-user-change-password" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Change user password" class CollectionManage(Subcommand): """The collections-manage subcommand""" def __init__(self): super(CollectionManage, self).__init__() self.parser.prog = "couchbase-cli collection-manage" group = self.parser.add_argument_group("Collection manage option") group.add_argument("--bucket", dest="bucket", metavar="<bucket>", required=True, help="The bucket to use") group.add_argument("--create-scope", dest="create_scope", metavar="<scope>", default=None, help="The name of the scope to make") group.add_argument("--drop-scope", dest="drop_scope", metavar="<scope>", default=None, help="The name of the scope to remove") group.add_argument("--list-scopes", dest="list_scopes", action="store_true", default=None, help="List all of the scopes in the bucket") group.add_argument("--create-collection", dest="create_collection", metavar="<collection>", default=None, help="The path to the collection to make") group.add_argument("--drop-collection", dest="drop_collection", metavar="<collection>", default=None, help="The path to the collection to remove") group.add_argument("--list-collections", dest="list_collections", metavar="<scope_list>", default=None, const="", nargs='?', help="List all of the collections in the provided scopes. If no scopes " "are provided it will print all collections") group.add_argument("--max-ttl", dest="max_ttl", metavar="<seconds>", type=int, help="Set the maximum TTL the collection will accept") @rest_initialiser(cluster_init_check=True, version_check=True) def execute(self, opts): cmds = [opts.create_scope, opts.drop_scope, opts.list_scopes, opts.create_collection, opts.drop_collection, opts.list_collections] cmd_total = sum(cmd is not None for cmd in cmds) args = "--create-scope, --drop-scope, --list-scopes, --create-collection, --drop-collection, or " \ "--list-collections" if cmd_total == 0: _exit_if_errors([f'Must specify one of the following: {args}']) elif cmd_total != 1: _exit_if_errors([f'Only one of the following may be specified: {args}']) if opts.max_ttl is not None and opts.create_collection is None: _exit_if_errors(["--max-ttl can only be set with --create-collection"]) if opts.create_scope: self._create_scope(opts) if opts.drop_scope: self._drop_scope(opts) if opts.list_scopes: self._list_scopes(opts) if opts.create_collection: self._create_collection(opts) if opts.drop_collection: self._drop_collection(opts) if opts.list_collections is not None: self._list_collections(opts) def _create_scope(self, opts): _, errors = self.rest.create_scope(opts.bucket, opts.create_scope) _exit_if_errors(errors) _success("Scope created") def _drop_scope(self, opts): _, errors = self.rest.drop_scope(opts.bucket, opts.drop_scope) _exit_if_errors(errors) _success("Scope dropped") def _list_scopes(self, opts): manifest, errors = self.rest.get_manifest(opts.bucket) _exit_if_errors(errors) for scope in manifest['scopes']: print(scope['name']) def _create_collection(self, opts): scope, collection = self._get_scope_collection(opts.create_collection) _, errors = self.rest.create_collection(opts.bucket, scope, collection, opts.max_ttl) _exit_if_errors(errors) _success("Collection created") def _drop_collection(self, opts): scope, collection = self._get_scope_collection(opts.drop_collection) _, errors = self.rest.drop_collection(opts.bucket, scope, collection) _exit_if_errors(errors) _success("Collection dropped") def _list_collections(self, opts): manifest, errors = self.rest.get_manifest(opts.bucket) _exit_if_errors(errors) if opts.list_collections == "": scope_dict = {} else: scope_dict = {scope: False for scope in opts.list_collections.split(',')} if opts.output == 'json': self._json_list_collections(manifest, scope_dict) return for scope in manifest['scopes']: if len(scope_dict) == 0 or scope['name'] in scope_dict: if len(scope_dict) > 0: scope_dict[scope['name']] = True print(f'Scope {scope["name"]}:') for collection in scope['collections']: print(f' - {collection["name"]}') if len(scope_dict) > 0: for scope, found in scope_dict.items(): if not found: _warning(f'Scope "{scope}" does not exist') @staticmethod def _json_list_collections(manifest: Dict[str, Any], scope_dict: Dict[str, bool]): out = {} for scope in manifest['scopes']: if len(scope_dict) == 0 or scope['name'] in scope_dict: out[scope['name']] = [collection["name"] for collection in scope['collections']] print(json.dumps(out, indent=4)) def _get_scope_collection(self, path): scope, collection, err = self.expand_collection_shortcut(path) if err is not None: _exit_if_errors([err]) return scope, collection @staticmethod def expand_collection_shortcut(path): parts = path.split('.') if len(parts) != 2: return None, None, f'invalid collection path {path}' parts = ['_default' if x == '' else x for x in parts] return parts[0], parts[1], None @staticmethod def get_man_page_name(): return "couchbase-cli-collection-manage" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Manage collections in a bucket" class EnableDeveloperPreview(Subcommand): """"The enable developer preview command""" def __init__(self): super(EnableDeveloperPreview, self).__init__() self.parser.prog = "couchbase-cli enable-developer-preview" group = self.parser.add_argument_group("Developer preview option") group.add_argument('--enable', dest='enable', required=False, action="store_true", help='Enable developer preview mode in target cluster') group.add_argument('--list', dest='list', required=False, action="store_true", help='Check if cluster is in developer preview mode') @rest_initialiser(version_check=True) def execute(self, opts): if not (opts.enable or opts.list): _exit_if_errors(['--enable or --list must be provided']) if opts.enable and opts.list: _exit_if_errors(['cannot provide both --enable and --list']) if opts.enable: confirm = input('Developer preview cannot be disabled once it is enabled. ' 'If you enter developer preview mode you will not be able to ' 'upgrade. DO NOT USE IN PRODUCTION.\nAre you sure [y/n]: ') if confirm == 'y': _, errors = self.rest.set_dp_mode() _exit_if_errors(errors) _success("Cluster is in developer preview mode") elif confirm == 'n': _success("Developer preview mode has NOT been enabled") else: _exit_if_errors(["Unknown option provided"]) if opts.list: pools, rv = self.rest.pools() _exit_if_errors(rv) if 'isDeveloperPreview' in pools and pools['isDeveloperPreview']: print('Cluster is in developer preview mode') else: print('Cluster is NOT in developer preview mode') @staticmethod def get_man_page_name(): return "couchbase-cli-enable-developer-preview" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Enable developer preview mode in target cluster" class SettingAlternateAddress(Subcommand): """"Setting alternate address command""" def __init__(self): super(SettingAlternateAddress, self).__init__() self.parser.prog = "couchbase-cli setting-alternate-address" group = self.parser.add_argument_group("Configure alternate addresses") group.add_argument('--set', dest='set', required=False, action="store_true", help='Set external address configuration for the node') group.add_argument('--remove', dest='remove', required=False, action="store_true", help='Remove external address configuration') group.add_argument('--list', dest='list', required=False, action='store_true', help='Retrieve current alternate address configuration for all nodes') group.add_argument('--node', dest='node', metavar="<node>", help="Specify the node to update") group.add_argument('--hostname', dest='alternate_hostname', metavar="<host>", help='Alternate address') group.add_argument('--ports', dest='ports', metavar="<ports>", help="A comma separated list specifying port mappings for the services") @rest_initialiser(version_check=True) def execute(self, opts): flags_used = sum([opts.set, opts.list, opts.remove]) if flags_used != 1: _exit_if_errors(['Use exactly one of --set, --list or --remove']) if opts.set or opts.remove: if not opts.node: _exit_if_errors(['--node has to be set when using --set or --remove']) # Alternate address can only be set on the node it self. The opts.cluster # is updated with the opts.node instead to allow ease of use. # The node name can have a port number (./cluster_run) hostname, port = self._get_host_port(opts.node) url = urllib.parse.urlparse(opts.cluster) if url.scheme: scheme = url.scheme if url.port and not port: port = url.port elif not port: _, old_port = self._get_host_port(opts.cluster) if old_port: port = old_port if scheme: cluster = f'{scheme}://' cluster += hostname if port: cluster += f':{port}' opts.cluster = cluster # override rest client so it uses the node to be altered self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify, opts.cacert, opts.debug) if opts.set: ports, error = self._parse_ports(opts.ports) _exit_if_errors(error) _, error = self.rest.set_alternate_address(opts.alternate_hostname, ports) _exit_if_errors(error) if opts.remove: _, error = self.rest.delete_alternate_address() _exit_if_errors(error) _success('Alternate address configuration deleted') if opts.list: add, error = self.rest.get_alternate_address() _exit_if_errors(error) if opts.output == 'standard': port_names = set() for node in add: if 'alternateAddresses' in node and 'ports' in node['alternateAddresses']['external']: for port in node['alternateAddresses']['external']['ports'].keys(): port_names.add(port) print('{:20}{:20}{}'.format('Hostname', 'Alternate Address', 'Ports (Primary/Alternate)')) print('{:40}'.format(' '), end='') port_names = sorted(port_names) for port in port_names: column_size = len(port) + 1 if column_size < 11: column_size = 11 print(f'{port:{column_size}}', end='') print() for node in add: if 'alternateAddresses' in node: # For cluster_run and single node clusters there is no hostname try: print(f'{node["hostname"]:20}{node["alternateAddresses"]["external"]["hostname"]:20}', end='') except KeyError: host = 'UNKNOWN' print(f'{host:20}{node["alternateAddresses"]["external"]["hostname"]:20}', end='') for port in port_names: column_size = len(port) + 1 if column_size < 11: column_size = 11 ports = ' ' if port in node['alternateAddresses']['external']['ports']: ports = f'{str(node["services"][port])}' \ f'/{str(node["alternateAddresses"]["external"]["ports"][port])}' print(f'{ports:{column_size}}', end='') print() else: # For cluster_run and single node clusters there is no hostanme try: print(f'{node["hostname"]}') except KeyError: print('UNKNOWN') else: print(json.dumps(add)) @staticmethod def _parse_ports(ports): if ports is None: return None, None port_mappings = ports.split(',') port_tuple_list = [] for port_value_pair in port_mappings: p_v = port_value_pair.split('=') if len(p_v) != 2: return None, [f'invalid port mapping: {port_value_pair}'] try: int(p_v[1]) except ValueError: return None, [f'invalid port mapping: {port_value_pair}'] port_tuple_list.append((p_v[0], p_v[1])) return port_tuple_list, None @staticmethod def _get_host_port(host): if ']' in host: host_port = host.split(']:') if len(host_port) == 2: return host_port[0] + ']', host_port[1] return host_port[0], None else: host_port = host.split(':') if len(host_port) == 2: return host_port[0], host_port[1] return host_port[0], None @staticmethod def get_man_page_name(): return "couchbase-cli-setting-alternate-address" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Configure alternate addresses" class SettingQuery(Subcommand): """"Command to configure query settings""" def __init__(self): super(SettingQuery, self).__init__() self.parser.prog = "couchbase-cli setting-query" group = self.parser.add_argument_group("Query service settings") group.add_argument('--set', dest='set', action="store_true", help='Set query settings') group.add_argument('--get', dest='get', action="store_true", help='Retrieve current query settings') group.add_argument('--pipeline-batch', metavar='<num>', type=int, default=None, help='Number of items execution operators can batch.') group.add_argument('--pipeline-cap', metavar='<num>', type=int, default=None, help='Maximum number of items each execution operator can buffer.') group.add_argument('--scan-cap', metavar='<size>', type=int, default=None, help='Maximum buffer size for index scans.') group.add_argument('--timeout', metavar='<ms>', type=int, default=None, help='Server execution timeout.') group.add_argument('--prepared-limit', metavar='<max>', type=int, default=None, help='Maximum number of prepared statements.') group.add_argument('--completed-limit', metavar='<max>', type=int, default=None, help='Maximum number of completed requests.') group.add_argument('--completed-threshold', metavar='<ms>', type=int, default=None, help='Cache completed query lasting longer than this many milliseconds.') group.add_argument('--log-level', choices=['trace', 'debug', 'info', 'warn', 'error', 'sever', 'none'], default=None, metavar='<trace|debug|info|warn|error|server|none>', help='Log level: debug, trace, info, warn, error, severe, none.') group.add_argument('--max-parallelism', metavar='<max>', type=int, default=None, help='Maximum parallelism per query.') group.add_argument('--n1ql-feature-control', metavar='<num>', type=int, default=None, help='N1QL Feature Controls') group.add_argument('--temp-dir', metavar='<path>', type=str, default=None, help='This specifies the directory for temporary query data.') group.add_argument('--temp-dir-size', metavar='<mebibytes>', type=int, default=None, help='Specify the maximum size in mebibytes for the temporary query data directory.') group.add_argument('--cost-based-optimizer', metavar='<1|0>', type=str, default=None, help='Use cost-based optimizer (Developer Preview).') group.add_argument('--memory-quota', metavar='<mebibytes>', type=int, default=None, help='Sets the query memory quota in MiB.') group.add_argument('--transaction-timeout', metavar='<duration>', type=str, default=None, help='A duration string for the transaction timeout i.e (100ns, 10ms, 1s, 1m).') access_list_group = self.parser.add_argument_group('Query curl access settings') access_list_group.add_argument('--curl-access', choices=['restricted', 'unrestricted'], default=None, help='Specify either unrestricted or restricted, to determine which URLs are' ' permitted to be accessed by the curl function.') access_list_group.add_argument('--allowed-urls', metavar='<urls>', type=str, default=None, help='Comma separated lists of URLs that are allowed to be accessed by the curl' ' function.') access_list_group.add_argument('--disallowed-urls', metavar='<urls>', type=str, default=None, help='Comma separated lists of URLs that are disallowed to be accessed by the' ' curl function.') @rest_initialiser(version_check=True) def execute(self, opts): if sum([opts.get, opts.set]) != 1: _exit_if_errors(['Please provide --set or --get, both can not be provided at the same time']) if opts.get: settings, err = self.rest.get_query_settings() _exit_if_errors(err) print(json.dumps(settings)) if opts.set: access_list = self._post_query_access_list(opts) self._post_query_settings(opts, access_list) _success('Updated the query settings') def _post_query_access_list(self, opts) -> bool: if opts.curl_access != 'restricted' and (opts.allowed_urls is not None or opts.disallowed_urls is not None): _exit_if_errors(['Can only provide --allowed-urls or --disallowed-urls with --curl-access restricted']) if opts.curl_access: allowed = opts.allowed_urls.strip().split(',') if opts.allowed_urls is not None else None disallowed = opts.disallowed_urls.strip().split(',') if opts.disallowed_urls is not None else None _, err = self.rest.post_query_curl_access_settings(opts.curl_access == 'restricted', allowed, disallowed) _exit_if_errors(err) return True return False def _post_query_settings(self, opts, access_list): if all(v is None for v in [opts.pipeline_batch, opts.pipeline_cap, opts.scan_cap, opts.timeout, opts.prepared_limit, opts.completed_limit, opts.completed_threshold, opts.log_level, opts.max_parallelism, opts.n1ql_feature_control, opts.temp_dir, opts.temp_dir_size, opts.cost_based_optimizer, opts.memory_quota, opts.transaction_timeout]): if access_list: return _exit_if_errors(['Please provide at least one other option with --set']) _, err = self.rest.post_query_settings(opts.pipeline_batch, opts.pipeline_cap, opts.scan_cap, opts.timeout, opts.prepared_limit, opts.completed_limit, opts.completed_threshold, opts.log_level, opts.max_parallelism, opts.n1ql_feature_control, opts.temp_dir, opts.temp_dir_size, opts.cost_based_optimizer, opts.memory_quota, opts.transaction_timeout) _exit_if_errors(err) @staticmethod def get_man_page_name(): return "couchbase-cli-setting-query" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Manage query settings" class IpFamily(Subcommand): """"Command to switch between IP family for node to node communication""" def __init__(self): super(IpFamily, self).__init__() self.parser.prog = "couchbase-cli ip-family" group = self.parser.add_argument_group("IP family options") group.add_argument('--get', action="store_true", default=False, help='Retrieve current used IP family') group.add_argument('--set', action="store_true", default=False, help='Change current used IP family') group.add_argument('--ipv4', dest='ipv4', default=False, action="store_true", help='Set IP family to IPv4') group.add_argument('--ipv6', dest='ipv6', default=False, action="store_true", help='Set IP family to IPv6') @rest_initialiser(version_check=True) def execute(self, opts): flags_used = sum([opts.set, opts.get]) if flags_used == 0: _exit_if_errors(['Please provide one of --set, or --get']) elif flags_used > 1: _exit_if_errors(['Please provide only one of --set, or --get']) if opts.get: self._get(self.rest) if opts.set: if sum([opts.ipv6, opts.ipv4]) != 1: _exit_if_errors(['Provided exactly one of --ipv4 or --ipv6 together with the --set option']) self._set(self.rest, opts.ipv6, opts.ssl) @staticmethod def _set(rest, ipv6, ssl): ip_fam, ip_fam_disable = ('ipv6', 'ipv4') if ipv6 else ('ipv4', 'ipv6') node_data, err = rest.pools('nodes') if err and err[0] == '"unknown pool"': _, err = rest.enable_external_listener(ipfamily=ip_fam) _exit_if_errors(err) _, err = rest.setup_net_config(ipfamily=ip_fam) _exit_if_errors(err) _, err = rest.disable_unused_external_listeners(ipfamily=ip_fam_disable) _exit_if_errors(err) _success('Switched IP family of the cluster') return _exit_if_errors(err) hosts = [] for n in node_data['nodes']: host = f'http://{n["hostname"]}' if ssl: addr = host.rsplit(":", 1)[0] host = f'https://{addr}:{n["ports"]["httpsMgmt"]}' _, err = rest.enable_external_listener(host=host, ipfamily=ip_fam) _exit_if_errors(err) hosts.append(host) for h in hosts: _, err = rest.setup_net_config(host=h, ipfamily=ip_fam) _exit_if_errors(err) print(f'Switched IP family for node: {h}') for h in hosts: _, err = rest.disable_unused_external_listeners(host=h, ipfamily=ip_fam_disable) _exit_if_errors(err) _success('Switched IP family of the cluster') @staticmethod def _get(rest): nodes, err = rest.nodes_info() _exit_if_errors(err) fam = {} for n in nodes: fam[n['addressFamily']] = True family = list(fam.keys()) if len(family) == 1: ipv_fam = 'UNKNOWN' if family[0] == 'inet' or family[0] == 'inet_tls': ipv_fam = 'ipv4' elif family[0] == 'inet6' or family[0] == 'inet6_tls': ipv_fam = 'ipv6' print(f'Cluster using {ipv_fam}') else: print('Cluster is in mixed mode') @staticmethod def get_man_page_name(): return "couchbase-cli-ip-family" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Change or get the address family" class NodeToNodeEncryption(Subcommand): """"Command to enable/disable cluster encryption""" def __init__(self): super(NodeToNodeEncryption, self).__init__() self.parser.prog = "couchbase-cli node-to-node-encryption" group = self.parser.add_argument_group("Node-to-node encryption options") group.add_argument('--enable', action="store_true", default=False, help='Enable node-to-node encryption') group.add_argument('--disable', action="store_true", default=False, help='Disable node-to-node encryption') group.add_argument('--get', action="store_true", default=False, help='Retrieve current status of node-to-node encryption (on or off)') @rest_initialiser(version_check=True) def execute(self, opts): flags_used = sum([opts.enable, opts.disable, opts.get]) if flags_used == 0: _exit_if_errors(['Please provide one of --enable, --disable or --get']) elif flags_used > 1: _exit_if_errors(['Please provide only one of --enable, --disable or --get']) if opts.get: self._get(self.rest) elif opts.enable: self._change_encryption(self.rest, 'on', opts.ssl) elif opts.disable: self._change_encryption(self.rest, 'off', opts.ssl) @staticmethod def _change_encryption(rest, encryption, ssl): node_data, err = rest.pools('nodes') encryption_disable = 'off' if encryption == 'on' else 'on' if err and err[0] == '"unknown pool"': _, err = rest.enable_external_listener(encryption=encryption) _exit_if_errors(err) _, err = rest.setup_net_config(encryption=encryption) _exit_if_errors(err) _, err = rest.disable_unused_external_listeners(encryption=encryption_disable) _exit_if_errors(err) _success(f'Switched node-to-node encryption {encryption}') return _exit_if_errors(err) hosts = [] for n in node_data['nodes']: host = f'http://{n["hostname"]}' if ssl: addr = host.rsplit(":", 1)[0] host = f'https://{addr}:{n["ports"]["httpsMgmt"]}' _, err = rest.enable_external_listener(host=host, encryption=encryption) _exit_if_errors(err) hosts.append(host) for h in hosts: _, err = rest.setup_net_config(host=h, encryption=encryption) _exit_if_errors(err) print(f'Turned {encryption} encryption for node: {h}') for h in hosts: _, err = rest.disable_unused_external_listeners(host=h, encryption=encryption_disable) _exit_if_errors(err) _success(f'Switched node-to-node encryption {encryption}') @staticmethod def _get(rest): # this will start the correct listeners in all the nodes nodes, err = rest.nodes_info() _exit_if_errors(err) encrypted_nodes = [] unencrpyted_nodes = [] for n in nodes: if n['nodeEncryption']: encrypted_nodes.append(n['hostname']) else: unencrpyted_nodes.append(n['hostname']) if len(encrypted_nodes) == len(nodes): print('Node-to-node encryption is enabled') elif len(unencrpyted_nodes) == len(nodes): print('Node-to-node encryption is disabled') else: print('Cluster is in mixed mode') print(f'Nodes with encryption enabled: {encrypted_nodes}') print(f'Nodes with encryption disabled: {unencrpyted_nodes}') @staticmethod def get_man_page_name(): return "couchbase-cli-node-to-node-encryption" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Change or get the cluster encryption configuration" class SettingRebalance(Subcommand): """The rebalance subcommand""" def __init__(self): super(SettingRebalance, self).__init__() self.parser.prog = "couchbase-cli setting-rebalance" group = self.parser.add_argument_group("Rebalance configuration") group.add_argument("--set", default=False, action='store_true', help='Set the automatic rebalance retry settings.') group.add_argument("--get", default=False, action='store_true', help='Get the automatic rebalance retry settings.') group.add_argument('--cancel', default=False, action='store_true', help='Cancel pending rebalance retry.') group.add_argument('--moves-per-node', type=int, metavar='<num>', help='Specify the number of [1-64] vBuckets to move concurrently') group.add_argument('--pending-info', default=False, action='store_true', help='Get info for pending rebalance retry.') group.add_argument("--enable", metavar="<1|0>", choices=["1", "0"], help="Enable or disable automatic rebalance retry") group.add_argument("--wait-for", metavar="<sec>", type=int, help="Specify the time to wat before retrying the rebalance [5-3600] seconds.") group.add_argument("--max-attempts", metavar="<num>", type=int, help="Maximum number of rebalance retires [1-3].") group.add_argument('--rebalance-id', metavar='<id>', help='Specify the id of the failed rebalance to cancel the retry.') @rest_initialiser(cluster_init_check=True, version_check=True, enterprise_check=False) def execute(self, opts): if sum([opts.set, opts.get, opts.cancel, opts.pending_info]) != 1: _exit_if_errors(['Provide either --set, --get, --cancel or --pending-info']) if opts.get: settings, err = self.rest.get_settings_rebalance() _exit_if_errors(err) if self.enterprise: retry_settings, err = self.rest.get_settings_rebalance_retry() _exit_if_errors(err) settings.update(retry_settings) if opts.output == 'json': print(json.dumps(settings)) else: if self.enterprise: print(f'Automatic rebalance retry {"enabled" if settings["enabled"] else "disabled"}') print(f'Retry wait time: {settings["afterTimePeriod"]}') print(f'Maximum number of retries: {settings["maxAttempts"]}') print(f'Maximum number of vBucket move per node: {settings["rebalanceMovesPerNode"]}') elif opts.set: if (not self.enterprise and (opts.enable is not None or opts.wait_for is not None or opts.max_attempts is not None)): _exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"]) if opts.enable == '1': opts.enable = 'true' else: opts.enable = 'false' if opts.wait_for is not None and (opts.wait_for < 5 or opts.wait_for > 3600): _exit_if_errors(['--wait-for must be a value between 5 and 3600']) if opts.max_attempts is not None and (opts.max_attempts < 1 or opts.max_attempts > 3): _exit_if_errors(['--max-attempts must be a value between 1 and 3']) if self.enterprise: _, err = self.rest.set_settings_rebalance_retry(opts.enable, opts.wait_for, opts.max_attempts) _exit_if_errors(err) if opts.moves_per_node is not None: if not 1 <= opts.moves_per_node <= 64: _exit_if_errors(['--moves-per-node must be a value between 1 and 64']) _, err = self.rest.set_settings_rebalance(opts.moves_per_node) _exit_if_errors(err) _success('Rebalance settings updated') elif opts.cancel and not self.enterprise: _exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"]) if opts.rebalance_id is None: _exit_if_errors(['Provide the failed rebalance id using --rebalance-id <id>']) _, err = self.rest.cancel_rebalance_retry(opts.rebalance_id) _exit_if_errors(err) _success('Rebalance retry canceled') else: if not self.enterprise: _exit_if_errors(["Automatic rebalance retry configuration is an Enterprise Edition only feature"]) rebalance_info, err = self.rest.get_rebalance_info() _exit_if_errors(err) print(json.dumps(rebalance_info)) @staticmethod def get_man_page_name(): return "couchbase-cli-setting-rebalance" + ".1" if os.name != "nt" else ".html" @staticmethod def get_description(): return "Configure automatic rebalance settings" class BackupService(Subcommand): """BackupService class is a subcommand that will contain other commands to configure the service as well as manage it. This approach attempts to make the interface more intuitive by keeping a hierarchical structure where the service can have all its options under one command instead of having multiple completely separate commands (e.g settings-backups, manage-backups and repository-setup-backup.) The idea is that the interface will look like: couchbase-cli backup-service [settings | plans | repositories | cloud-credentials] where each element in [] is a subcommand to manage those options for that part of the backup service. As such if the user is not sure of what they want to do they can always do couchbase-cli backup-service -h to get a top level details and then move down the hierarchy to a more concrete option. """ def __init__(self): super(BackupService, self).__init__() self.parser.prog = "couchbase-cli backup-service" self.subparser = self.parser.add_subparsers(help='Sub command help', dest='sub_cmd', metavar='<subcommand>') self.settings_cmd = BackupServiceSettings(self.subparser) self.repository_cmd = BackupServiceRepository(self.subparser) self.plan_cmd = BackupServicePlan(self.subparser) def execute(self, opts): if opts.sub_cmd is None or opts.sub_cmd not in ['settings', 'repository', 'plan']: _exit_if_errors(['<subcommand> must be one off [settings, repository, plan]']) if opts.sub_cmd == 'settings': self.settings_cmd.execute(opts) elif opts.sub_cmd == 'repository': self.repository_cmd.execute(opts) elif opts.sub_cmd == 'plan': self.plan_cmd.execute(opts) @staticmethod def get_man_page_name(): return 'couchbase-cli-backup-service' + '.1' if os.name != 'nt' else '.html' @staticmethod def get_description(): return "Manage the backup service" class BackupServiceSettings: """Backup service settings is a nested command and manages the backup service settings""" def __init__(self, subparser): self.rest = None setting_parser = subparser.add_parser('settings', help='Manage backup service settings', add_help=False, allow_abbrev=False) group = setting_parser.add_argument_group('Backup service settings options') group.add_argument('--get', action='store_true', help='Get current backup service configuration') group.add_argument('--set', action='store_true', help='Change the service configuration') group.add_argument('--history-rotation-period', dest='rotation_period', type=int, metavar='<days>', help='The number of days after which the task history should be rotated') group.add_argument('--history-rotation-size', dest='rotation_size', type=int, metavar='<mebibytes>', help='The size in MiB at which to rotate the task history') group.add_argument("-h", "--help", action=CBHelpAction, klass=self, help="Prints the short or long help message") @rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True) def execute(self, opts): if sum([opts.get, opts.set]) != 1: _exit_if_errors(['Must use one and only one of [--get, --set]']) if opts.get: self._get(opts) if opts.set: self._set(opts) def _get(self, opts): config, err = self.rest.get_backup_service_settings() _exit_if_errors(err) if opts.output == 'json': print(json.dumps(config, indent=4)) else: print('-- Backup service configuration --') size = config['history_rotation_size'] if 'history_rotation_size' in config else 'N/A' period = config['history_rotation_period'] if 'history_rotation_period' in config else 'N/A' print(f'History rotation size: {size} MiB') print( f'History rotation period: {period} days') def _set(self, opts): if opts.rotation_period is None and opts.rotation_size is None: _exit_if_errors(['At least one of --history-rotation-period or --history-rotation-size is required']) _, err = self.rest.patch_backup_service_settings(opts.rotation_period, opts.rotation_size) _exit_if_errors(err) _success('Backup service settings patched') @staticmethod def get_man_page_name(): return 'couchbase-cli-backup-service-settings' + '.1' if os.name != 'nt' else '.html' @staticmethod def get_description(): return 'Manage backup service settings' class BackupServiceRepository: """This command manages backup services repositories. Things this command can do is: - List repositories - Get repository - Add repository - Archive repository - Import repository - Delete repository """ def __init__(self, subparser): """setup the parser""" self.rest = None repository_parser = subparser.add_parser('repository', help='Manage backup repositories', add_help=False, allow_abbrev=False) # action flags are mutually exclusive action_group = repository_parser.add_mutually_exclusive_group(required=True) action_group.add_argument('--list', action='store_true', help='Get all repositories') action_group.add_argument('--get', action='store_true', help='Get repository by id') action_group.add_argument('--archive', action='store_true', help='Archive a repository') action_group.add_argument('--add', action='store_true', help='Add a new active repository') action_group.add_argument('--remove', action='store_true', help='Remove an archived/imported repository') action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self, help="Prints the short or long help message") # other arguments group = repository_parser.add_argument_group('Backup service repository configuration') group.add_argument('--id', metavar='<id>', help='The repository id') group.add_argument('--new-id', metavar='<id>', help='The new repository id') group.add_argument('--state', metavar='<state>', choices=['active', 'archived', 'imported'], help='The repository state.') group.add_argument('--plan', metavar='<plan_name>', help='The plan to use as base for the repository') group.add_argument('--backup-archive', metavar='<archive>', help='The location to store the backups in') group.add_argument('--bucket-name', metavar='<name>', help='The bucket to backup') group.add_argument('--remove-data', action='store_true', help='Used to delete the repository data') # the cloud arguments are given the own group so that the short help is a bit more readable cloud_group = repository_parser.add_argument_group('Backup repository cloud arguments') cloud_group.add_argument('--cloud-credentials-name', metavar='<name>', help='The stored clouds credential name to use for the new repository') cloud_group.add_argument('--cloud-staging-dir', metavar='<path>', help='The path to the staging directory') cloud_group.add_argument('--cloud-credentials-id', metavar='<id>', help='The ID to use to communicate with the object store') cloud_group.add_argument('--cloud-credentials-key', metavar='<key>', help='The key to use to communicate with the object store') cloud_group.add_argument('--cloud-credentials-region', metavar='<region>', help='The region for the object store') cloud_group.add_argument('--cloud-endpoint', metavar='<endpoint>', help='Overrides the default endpoint used to communicate with the cloud provider. ' 'Use for object store compatible third party solutions') cloud_group.add_argument('--s3-force-path-style', action='store_true', help='When using S3 or S3 compatible storage it will use the old path style.') @rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True) def execute(self, opts): """Run the backup-service repository subcommand""" if opts.list: self.list_repositories(opts.state, opts.output == 'json') elif opts.get: self.get_repository(opts.id, opts.state, opts.output == 'json') elif opts.archive: self.archive_repository(opts.id, opts.new_id) elif opts.remove: self.remove_repository(opts.id, opts.state, opts.remove_data) elif opts.add: self.add_active_repository(opts.id, opts.plan, opts.backup_archive, bucket_name=opts.bucket_name, credentials_name=opts.cloud_credentials_name, credentials_id=opts.cloud_credentials_id, credentials_key=opts.cloud_credentials_key, cloud_region=opts.cloud_credentials_region, staging_dir=opts.cloud_staging_dir, cloud_endpoint=opts.cloud_endpoint, s3_path_style=opts.s3_force_path_style) def remove_repository(self, repository_id: str, state: str, delete_repo: bool = False): """Removes the repository in state 'state' and with id 'repository_id' Args: repository_id (str): The repository id state (str): It must be either archived or imported otherwise it will return an error delete_repo (bool): Whether or not the backup repository should be deleted """ if not repository_id: _exit_if_errors(['--id is required']) # the following is devided in two options to give better error messages depending if state is missing or if it # is invalid if not state: _exit_if_errors(['--state is required']) if state not in ['archived', 'imported']: _exit_if_errors(['can only delete archived or imported repositories to delete an active repository it needs to ' 'be archived first']) # can only delete repo of archived repositories if delete_repo and state == 'imported': _exit_if_errors(['cannot delete the repository for an imported repository']) _, errors = self.rest.delete_backup_repository(repository_id, state, delete_repo) _exit_if_errors(errors) _success('Repository was deleted') def add_active_repository(self, repository_id: str, plan: str, archive: str, **kwargs): """Adds a new active repository identified by 'repository_id' and that uses 'plan' as base. Args: repository_id (str): The ID to give to the repository. This must be unique, if it is not an error will be returned. plan (str): The name of the plan to use as base for the repository. If it does not exist the service will return an error. archive (str): The location to store the data in. It must be accessible by all nodes. To use S3 instead of providing a path to a filesystem directory use the syntax. s3://<bucket-name>/<optional_prefix>/<archive> **kwargs: Optional parameters [bucket_name, credentials_name, credentials_id, credentials_key, cloud_region, staging_dir, cloud_endpoint, s3_path_style] """ if not repository_id: _exit_if_errors(['--id is required']) if not plan: _exit_if_errors(['--plan is required']) if not archive: _exit_if_errors(['--backup-archive is required']) _exit_if_errors(self.check_cloud_params(archive, **kwargs)) add_request_body = { 'plan': plan, 'archive': archive, } if kwargs.get('bucket_name', False): add_request_body['bucket_name'] = kwargs.get('bucket_name') if kwargs.get('credentials_name', False): add_request_body['cloud_credential_name'] = kwargs.get('credentials_name') if kwargs.get('credentials_id', False): add_request_body['cloud_credentials_id'] = kwargs.get('credentials_id') if kwargs.get('credentials_key', False): add_request_body['cloud_credentials_key'] = kwargs.get('credentials_key') if kwargs.get('cloud_region', False): add_request_body['cloud_credentials_region'] = kwargs.get('cloud_region') if kwargs.get('cloud_endpoint', False): add_request_body['cloud_endpoint'] = kwargs.get('cloud_endpoint') if kwargs.get('s3_path_style', False): add_request_body['cloud_force_path_style'] = kwargs.get('s3_path_style') _, errors = self.rest.add_backup_active_repository(repository_id, add_request_body) _exit_if_errors(errors) _success('Added repository') @staticmethod def check_cloud_params(archive: str, **kwargs) -> Optional[List[str]]: """Checks that inside kwargs there is a valid set of parameters to add a cloud repository Args: archive (str): The archive to use for the repository. """ # If not an s3 archive skip this if not archive.startswith('s3://'): return None creds_name = kwargs.get('credentials_name') region = kwargs.get('cloud_region') creds_id = kwargs.get('credentials_id') creds_key = kwargs.get('credentials_key') staging_dir = kwargs.get('staging_dir') if (creds_name and (creds_id or creds_key)) or (not creds_name and not (creds_id or creds_key)): return ['must provide either --cloud-credentials-name or --cloud-credentials-key and ' '--cloud-credentials-id'] if not staging_dir: return ['--cloud-staging-dir is required'] if not creds_name and not region: return ['--cloud-credentials-region is required'] return None def archive_repository(self, repository_id, new_id): """Archive an repository. The archived repository will have the id `new_id` Args: repository_id (str): The active repository ID to be archived new_id (str): The id that will be given to the archived repository """ if not repository_id: _exit_if_errors(['--id is required']) if not new_id: _exit_if_errors(['--new-id is required']) _, errors = self.rest.archive_backup_repository(repository_id, new_id) _exit_if_errors(errors) _success('Archived repository') def list_repositories(self, state=None, json_out=False): """List the backup repositories. If a repository state is given only repositories in that state will be listed. This command supports listing both in json and human friendly format. Args: state (str, optional): One of ['active', 'imported', 'archived']. The repository on this state will be retrieved. json_out (bool): If True the output will be JSON otherwise it will be a human friendly format. """ states = ['active', 'archived', 'imported'] if state is None else [state] results = {} for get_state in states: repositories, errors = self.rest.get_backup_service_repositories(state=get_state) _exit_if_errors(errors) results[get_state] = repositories if json_out: print(json.dumps(results, indent=2)) else: self.human_friendly_print_repositories(results) def get_repository(self, repository_id, state, json_out=False): """Retrieves one repository from the backup service If the repository does not exist an error will be returned Args: repository_id (str): The repository id to be retrieved state (str): The state of the repository to retrieve json_out (bool): If True the output will be JSON otherwise it will be a human friendly format. """ if not repository_id: _exit_if_errors(['--id is required']) if not state: _exit_if_errors(['--state is required']) repository, errors = self.rest.get_backup_service_repository(repository_id, state) _exit_if_errors(errors) if json_out: print(json.dumps(repository, indent=2)) else: self.human_firendly_print_repository(repository) @staticmethod def human_firendly_print_repository(repository): """Print the repository in a human friendly format Args: repository (obj): The backup repository information """ print(f'ID: {repository["id"]}') print(f'State: {repository["state"]}') print(f'Healthy: {(not ("health" in repository and not repository["health"]["healthy"]))!s}') print(f'Archive: {repository["archive"]}') print(f'Repository: {repository["repo"]}') if 'bucket' in repository: print(f'Bucket: {repository["bucket"]["name"]}') if 'plan_name' in repository and repository['plan_name'] != "": print(f'plan: {repository["plan_name"]}') print(f'Creation time: {repository["creation_time"]}') if 'scheduled' in repository and repository['scheduled']: print() BackupServiceRepository.human_firendly_print_repository_scheduled_tasks(repository['scheduled']) one_off = repository['running_one_off'] if 'running_one_off' in repository else None running_scheduled = repository['running_tasks'] if 'running_tasks' in repository else None if one_off or running_scheduled: print() BackupServiceRepository.human_friendly_print_running_tasks(one_off, running_scheduled) @staticmethod def human_friendly_print_running_tasks(one_off, scheduled): """Prints the running task summary in a human friendly way Args: one_off (map<str, task object>): Running one off tasks scheduled (map<str, task object>): Running scheduled tasks """ all_vals = [] name_pad = 5 if one_off: for name in one_off: if len(name) > name_pad: name_pad = len(name) all_vals += one_off.values() if scheduled: for name in scheduled: if len(name) > name_pad: name_pad = len(name) all_vals += scheduled.values() name_pad += 1 header = f'{"Name":<{name_pad}}| Task type | Status | Start' print(header) print('-' * (len(header) + 5)) for task in all_vals: print(f'{task["name"]:<{name_pad}}| {task["type"].title():<10}| {task["status"]:<8} | {task["start"]}') @staticmethod def human_firendly_print_repository_scheduled_tasks(scheduled): """Print the scheduled task in a tabular format""" name_pad = 5 for name in scheduled: if len(name) > name_pad: name_pad = len(name) name_pad += 1 header = f'{"Name":<{name_pad}}| Task type | Next run' print('Scheduled tasks:') print(header) print('-' * (len(header) + 5)) for task in scheduled.values(): print(f'{task["name"]:<{name_pad}}| {task["task_type"].title():<10}| {task["next_run"]}') @staticmethod def human_friendly_print_repositories(repositories_map): """This will print the repositories in a tabular format Args: repository_map (map<state (str), repository (list of objects)>) """ repository_count = 0 id_pad = 5 plan_pad = 7 for repositories in repositories_map.values(): for repository in repositories: repository_count += 1 if id_pad < len(repository['id']): id_pad = len(repository['id']) if 'plan_name' in repository and plan_pad < len(repository['plan_name']): plan_pad = len(repository['plan_name']) if repository_count == 0: print('No repositories found') return # Get an extra space between the the information and the column separator plan_pad += 1 id_pad += 1 # build header header = f'{"ID":<{id_pad}}| {"State":<9}| {"plan":<{plan_pad}}| Healthy | Repository' print(header) print('-' * len(header)) # print repository summary for _, repositories in sorted(repositories_map.items()): for repository in repositories: healthy = not ('health' in repository and not repository['health']['healthy']) # archived and imported repositories may not have plans so we have to replace the empty string with N/A plan_name = 'N/A' if 'plan_name' in repository and len(repository['plan_name']) != 0: plan_name = repository['plan_name'] print(f"{repository['id']:<{id_pad}}| {repository['state']:<9}| {plan_name:<{plan_pad}}| " f" {healthy!s:<7}| {repository['repo']}") @staticmethod def get_man_page_name(): return 'couchbase-cli-backup-service-repository' + '.1' if os.name != 'nt' else '.html' @staticmethod def get_description(): return 'Manage backup service repositories' class BackupServicePlan: """This command manages backup services plans. Things this command can do is: - List plans - Add delete - Delete plans """ def __init__(self, subparser): """setup the parser""" self.rest = None plan_parser = subparser.add_parser('plan', help='Manage backup plans', add_help=False, allow_abbrev=False) # action flags are mutually exclusive action_group = plan_parser.add_mutually_exclusive_group(required=True) action_group.add_argument('--list', action='store_true', help='List all available backup plans') action_group.add_argument('--get', action='store_true', help='Get a plan by name') action_group.add_argument('--remove', action='store_true', help='Remove a plan by name') action_group.add_argument('--add', action='store_true', help='Add a new plan') action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self, help="Prints the short or long help message") options = plan_parser.add_argument_group('Plan options') options.add_argument('--name', metavar='<name>', help='Plan name') options.add_argument('--description', metavar='<description>', help='Optional description') options.add_argument('--services', metavar='<services>', help='A comma separated list of services to backup') options.add_argument('--task', metavar='<tasks>', nargs='+', help='JSON task definition') @rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True) def execute(self, opts): """Run the backup plan managment command""" if opts.list: self.list_plans(opts.output == 'json') elif opts.get: self.get_plan(opts.name, opts.output == 'json') elif opts.remove: self.remove_plan(opts.name) elif opts.add: self.add_plan(opts.name, opts.services, opts.task, opts.description) def add_plan(self, name: str, services: Optional[str], tasks: Optional[List[str]], description: Optional[str]): """Add a new backup plan The validation of the inputs in the CLI is intentionally lacking as this is offloaded to the backup service. Args: name (str): The name to give the new plan. It must be unique. services (optional list): A list of services to backup if empty all services are backed up. tasks (optional list): A list of JSON strings representing the tasks to be run. description (optional str): A optional description string. """ if not name: _exit_if_errors(['--name is required']) service_list = [] if services: service_list = [service.strip() for service in services.split(',')] tasks_objects = [] if tasks: for task_str in tasks: try: task = json.loads(task_str) tasks_objects.append(task) except json.decoder.JSONDecodeError as json_error: _exit_if_errors([f'invalid task {json_error!s}']) plan = {} if service_list: plan['services'] = service_list if tasks_objects: plan['tasks'] = tasks_objects if description: plan['description'] = description _, errors = self.rest.add_backup_plan(name, plan) _exit_if_errors(errors) _success('Added plan') def remove_plan(self, name: str): """Removes a plan by name""" if not name: _exit_if_errors(['--name is required']) _, errors = self.rest.delete_backup_plan(name) _exit_if_errors(errors) _success('Plan removed') def get_plan(self, name: str, json_output: bool = False): """Gets a backup plan by name Args: name (str): The name of the plan to retrieve json_output (bool): Whether to print in JSON or a more human friendly way """ if not name: _exit_if_errors(['--name is required']) plan, errors = self.rest.get_backup_plan(name) _exit_if_errors(errors) if json_output: print(json.dumps(plan, indent=2)) else: self.human_print_plan(plan) def list_plans(self, json_output: bool = False): """Prints all the plans stored in the backup service Args: json_output (bool): Whether to print in JSON or a more human friendly way """ plans, errors = self.rest.list_backup_plans() _exit_if_errors(errors) if json_output: print(json.dumps(plans, indent=2)) else: self.human_print_plans(plans) @staticmethod def human_print_plan(plan: object): """Prints the plan in a human friendly way""" print(f'Name: {plan["name"]}') print(f'Description: {plan["description"] if "description" in plan else "N/A"}') print(f'Services: {BackupServicePlan.service_list_to_str(plan["services"])}') print(f'Default: {(plan["default"] if "deafult" in plan else False)!s}') # If the are no tasks return if not plan["tasks"]: return print() print('Tasks:') task_name_pad = 5 schedule_pad = 10 for task in plan['tasks']: if len(task['name']) > task_name_pad: task_name_pad = len(task['name']) task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule']) if len(task['schedule_str']) > schedule_pad: schedule_pad = len(task['schedule_str']) task_name_pad += 1 schedule_pad += 1 header = f'{"Name":<{task_name_pad}} | {"Schedule":<{schedule_pad}} | Options' print(header) print('-' * (len(header) + 5)) for task in plan['tasks']: options = BackupServicePlan.format_options(task) print(f'{task["name"]:<{task_name_pad}} | {task["schedule_str"]:<{schedule_pad}} | {options}') @staticmethod def format_options(task: object) -> str: """Format the full backup or merge options""" options = 'N/A' if task['task_type'] == 'BACKUP' and task['full_backup']: options = 'Full backup' elif task['task_type'] == 'MERGE': if 'merge_options' in task: options = (f'Merge from {task["merge_options"]["offset_start"]} to ' f'{task["merge_options"]["offset_end"]}') else: options = 'Merge everything' return options @staticmethod def format_schedule(schedule: object) -> str: """Format the schedule object in a string of the format <task> every <frequency>? <period> (at <time>)?""" task_start = f'{schedule["job_type"].lower()}' frequency_part = 'every' if schedule['frequency'] == 1: period = schedule["period"].lower() period = period if period[-1] != 's' else period[:-1] frequency_part += f' {period}' else: frequency_part += f' {schedule["frequency"]} {schedule["period"].lower()}' time_part = f' at {schedule["time"]}' if 'time' in schedule else '' return f'{task_start} {frequency_part}{time_part}' @staticmethod def human_print_plans(plans: List[Any]): """Prints a table with an overview of each plan""" # if plans is empty or none print no plans message if not plans: print('No plans') return name_pad = 5 service_pad = 8 for plan in plans: if len(plan['name']) > name_pad: name_pad = len(plan['name']) services_str = BackupServicePlan.service_list_to_str(plan['services']) if len(services_str) > service_pad: service_pad = len(services_str) name_pad += 1 service_pad += 1 header = f'{"Name":<{name_pad}} | # Tasks | {"Services":<{service_pad}} | Default' print(header) print('-' * (len(header) + 5)) for plan in plans: task_len = len(plan['tasks']) if 'tasks' in plan and plan['tasks'] else 0 print(f'{plan["name"]:<{name_pad}} | {task_len:<7} | ' f'{BackupServicePlan.service_list_to_str(plan["services"]):<{service_pad}} | ' f'{(plan["default"] if "default" in plan else False)!s}') @staticmethod def service_list_to_str(services: Optional[List[Any]]) -> str: """convert the list of services to a concise list of services""" if not services: return 'all' # a way to convert codenames to visible name convert = {'gsi': 'Indexing', 'cbas': 'Analytics', 'ft': 'Full Text Search'} return ', '.join([convert[service] if service in convert else service.title() for service in services]) @staticmethod def get_man_page_name(): return 'couchbase-cli-backup-service-plan' + '.1' if os.name != 'nt' else '.html' @staticmethod def get_description(): return 'Manage backup service plans'
48.129503
124
0.603769
[ "Apache-2.0" ]
b33f/couchbase-cli
cbmgr.py
267,215
Python
# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import argparse import datetime import json import os import random import time from collections import namedtuple from copy import deepcopy from functools import partial from pathlib import Path import numpy as np import torch import torch.utils from torch.utils.data import ConcatDataset, DataLoader, DistributedSampler import util.dist as dist import util.misc as utils from datasets import build_dataset, get_coco_api_from_dataset from datasets.clevrref import ClevrRefEvaluator from datasets.coco_eval import CocoEvaluator from datasets.flickr_eval import FlickrEvaluator from datasets.phrasecut_eval import PhrasecutEvaluator from datasets.refexp import RefExpEvaluator from engine import evaluate, train_one_epoch from models import build_model from models.postprocessors import build_postprocessors def get_args_parser(): parser = argparse.ArgumentParser("Set transformer detector", add_help=False) parser.add_argument("--run_name", default="", type=str) # Dataset specific parser.add_argument("--dataset_config", default=None, required=True) parser.add_argument("--do_qa", action="store_true", help="Whether to do question answering") parser.add_argument( "--predict_final", action="store_true", help="If true, will predict if a given box is in the actual referred set. Useful for CLEVR-Ref+ only currently.", ) parser.add_argument("--no_detection", action="store_true", help="Whether to train the detector") parser.add_argument( "--split_qa_heads", action="store_true", help="Whether to use a separate head per question type in vqa" ) parser.add_argument( "--combine_datasets", nargs="+", help="List of datasets to combine for training", default=["flickr"] ) parser.add_argument( "--combine_datasets_val", nargs="+", help="List of datasets to combine for eval", default=["flickr"] ) parser.add_argument("--coco_path", type=str, default="") parser.add_argument("--vg_img_path", type=str, default="") parser.add_argument("--vg_ann_path", type=str, default="") parser.add_argument("--clevr_img_path", type=str, default="") parser.add_argument("--clevr_ann_path", type=str, default="") parser.add_argument("--phrasecut_ann_path", type=str, default="") parser.add_argument( "--phrasecut_orig_ann_path", type=str, default="", ) parser.add_argument("--modulated_lvis_ann_path", type=str, default="") # Training hyper-parameters parser.add_argument("--lr", default=1e-4, type=float) parser.add_argument("--lr_backbone", default=1e-5, type=float) parser.add_argument("--text_encoder_lr", default=5e-5, type=float) parser.add_argument("--batch_size", default=2, type=int) parser.add_argument("--weight_decay", default=1e-4, type=float) parser.add_argument("--epochs", default=40, type=int) parser.add_argument("--lr_drop", default=35, type=int) parser.add_argument( "--epoch_chunks", default=-1, type=int, help="If greater than 0, will split the training set into chunks and validate/checkpoint after each chunk", ) parser.add_argument("--optimizer", default="adam", type=str) parser.add_argument("--clip_max_norm", default=0.1, type=float, help="gradient clipping max norm") parser.add_argument( "--eval_skip", default=1, type=int, help='do evaluation every "eval_skip" frames', ) parser.add_argument( "--schedule", default="linear_with_warmup", type=str, choices=("step", "multistep", "linear_with_warmup", "all_linear_with_warmup"), ) parser.add_argument("--ema", action="store_true") parser.add_argument("--ema_decay", type=float, default=0.9998) parser.add_argument("--fraction_warmup_steps", default=0.01, type=float, help="Fraction of total number of steps") # Model parameters parser.add_argument( "--frozen_weights", type=str, default=None, help="Path to the pretrained model. If set, only the mask head will be trained", ) parser.add_argument( "--freeze_text_encoder", action="store_true", help="Whether to freeze the weights of the text encoder" ) parser.add_argument( "--text_encoder_type", default="roberta-base", choices=("roberta-base", "distilroberta-base", "roberta-large"), ) # Backbone parser.add_argument( "--backbone", default="resnet101", type=str, help="Name of the convolutional backbone to use such as resnet50 resnet101 timm_tf_efficientnet_b3_ns", ) parser.add_argument( "--dilation", action="store_true", help="If true, we replace stride with dilation in the last convolutional block (DC5)", ) parser.add_argument( "--position_embedding", default="sine", type=str, choices=("sine", "learned"), help="Type of positional embedding to use on top of the image features", ) # Transformer parser.add_argument( "--enc_layers", default=6, type=int, help="Number of encoding layers in the transformer", ) parser.add_argument( "--dec_layers", default=6, type=int, help="Number of decoding layers in the transformer", ) parser.add_argument( "--dim_feedforward", default=2048, type=int, help="Intermediate size of the feedforward layers in the transformer blocks", ) parser.add_argument( "--hidden_dim", default=256, type=int, help="Size of the embeddings (dimension of the transformer)", ) parser.add_argument("--dropout", default=0.1, type=float, help="Dropout applied in the transformer") parser.add_argument( "--nheads", default=8, type=int, help="Number of attention heads inside the transformer's attentions", ) parser.add_argument("--num_queries", default=100, type=int, help="Number of query slots") parser.add_argument("--pre_norm", action="store_true") parser.add_argument( "--no_pass_pos_and_query", dest="pass_pos_and_query", action="store_false", help="Disables passing the positional encodings to each attention layers", ) # Segmentation parser.add_argument( "--mask_model", default="none", type=str, choices=("none", "smallconv", "v2"), help="Segmentation head to be used (if None, segmentation will not be trained)", ) parser.add_argument("--remove_difficult", action="store_true") parser.add_argument("--masks", action="store_true") # Loss parser.add_argument( "--no_aux_loss", dest="aux_loss", action="store_false", help="Disables auxiliary decoding losses (loss at each layer)", ) parser.add_argument( "--set_loss", default="hungarian", type=str, choices=("sequential", "hungarian", "lexicographical"), help="Type of matching to perform in the loss", ) parser.add_argument("--contrastive_loss", action="store_true", help="Whether to add contrastive loss") parser.add_argument( "--no_contrastive_align_loss", dest="contrastive_align_loss", action="store_false", help="Whether to add contrastive alignment loss", ) parser.add_argument( "--contrastive_loss_hdim", type=int, default=64, help="Projection head output size before computing normalized temperature-scaled cross entropy loss", ) parser.add_argument( "--temperature_NCE", type=float, default=0.07, help="Temperature in the temperature-scaled cross entropy loss" ) # * Matcher parser.add_argument( "--set_cost_class", default=1, type=float, help="Class coefficient in the matching cost", ) parser.add_argument( "--set_cost_bbox", default=5, type=float, help="L1 box coefficient in the matching cost", ) parser.add_argument( "--set_cost_giou", default=2, type=float, help="giou box coefficient in the matching cost", ) # Loss coefficients parser.add_argument("--ce_loss_coef", default=1, type=float) parser.add_argument("--mask_loss_coef", default=1, type=float) parser.add_argument("--dice_loss_coef", default=1, type=float) parser.add_argument("--bbox_loss_coef", default=5, type=float) parser.add_argument("--giou_loss_coef", default=2, type=float) parser.add_argument("--qa_loss_coef", default=1, type=float) parser.add_argument( "--eos_coef", default=0.1, type=float, help="Relative classification weight of the no-object class", ) parser.add_argument("--contrastive_loss_coef", default=0.1, type=float) parser.add_argument("--contrastive_align_loss_coef", default=1, type=float) # Run specific parser.add_argument("--inference", action="store_true", help="Whether to run inference only") parser.add_argument("--test", action="store_true", help="Whether to run evaluation on val or test set") parser.add_argument("--test_type", type=str, default="test", choices=("testA", "testB", "test")) parser.add_argument("--output-dir", default="", help="path where to save, empty for no saving") parser.add_argument("--device", default="cuda", help="device to use for training / testing") parser.add_argument("--seed", default=42, type=int) parser.add_argument("--resume", default="", help="resume from checkpoint") parser.add_argument("--load", default="", help="resume from checkpoint") parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch") parser.add_argument("--eval", action="store_true", help="Only run evaluation") parser.add_argument("--num_workers", default=5, type=int) parser.add_argument("--do_qa_with_qa_fine_tuned", action="store_true", help="Have the model been already fine-tuned on other QA dataset?") # Distributed training parameters parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes") parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training") return parser def main(args): # Init distributed mode dist.init_distributed_mode(args) # Update dataset specific configs if args.dataset_config is not None: # https://stackoverflow.com/a/16878364 d = vars(args) with open(args.dataset_config, "r") as f: cfg = json.load(f) d.update(cfg) print("git:\n {}\n".format(utils.get_sha())) # Segmentation related if args.mask_model != "none": args.masks = True if args.frozen_weights is not None: assert args.masks, "Frozen training is meant for segmentation only" print(args) device = torch.device(args.device) output_dir = Path(args.output_dir) # fix the seed for reproducibility seed = args.seed + dist.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.set_deterministic(True) # Build the model model, criterion, contrastive_criterion, qa_criterion, weight_dict = build_model(args) model.to(device) assert ( criterion is not None or qa_criterion is not None ), "Error: should train either detection or question answering (or both)" # Get a copy of the model for exponential moving averaged version of the model model_ema = deepcopy(model) if args.ema else None model_without_ddp = model if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print("number of params:", n_parameters) # Set up optimizers param_dicts = [ { "params": [ p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and "text_encoder" not in n and p.requires_grad ] }, { "params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad], "lr": args.lr_backbone, }, { "params": [p for n, p in model_without_ddp.named_parameters() if "text_encoder" in n and p.requires_grad], "lr": args.text_encoder_lr, }, ] if args.optimizer == "sgd": optimizer = torch.optim.SGD(param_dicts, lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) elif args.optimizer in ["adam", "adamw"]: optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay) else: raise RuntimeError(f"Unsupported optimizer {args.optimizer}") # Train dataset if len(args.combine_datasets) == 0 and not args.eval: raise RuntimeError("Please provide at least one training dataset") dataset_train, sampler_train, data_loader_train = None, None, None if not args.eval: dataset_train = ConcatDataset( [build_dataset(name, image_set="train", args=args) for name in args.combine_datasets] ) # To handle very big datasets, we chunk it into smaller parts. if args.epoch_chunks > 0: print( "Splitting the training set into {args.epoch_chunks} of size approximately " f" {len(dataset_train) // args.epoch_chunks}" ) chunks = torch.chunk(torch.arange(len(dataset_train)), args.epoch_chunks) datasets = [torch.utils.data.Subset(dataset_train, chunk.tolist()) for chunk in chunks] if args.distributed: samplers_train = [DistributedSampler(ds) for ds in datasets] else: samplers_train = [torch.utils.data.RandomSampler(ds) for ds in datasets] batch_samplers_train = [ torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True) for sampler_train in samplers_train ] assert len(batch_samplers_train) == len(datasets) data_loaders_train = [ DataLoader( ds, batch_sampler=batch_sampler_train, collate_fn=partial(utils.collate_fn, False), num_workers=args.num_workers, ) for ds, batch_sampler_train in zip(datasets, batch_samplers_train) ] else: if args.distributed: sampler_train = DistributedSampler(dataset_train) else: sampler_train = torch.utils.data.RandomSampler(dataset_train) batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True) data_loader_train = DataLoader( dataset_train, batch_sampler=batch_sampler_train, collate_fn=partial(utils.collate_fn, False), num_workers=args.num_workers, ) # Val dataset if len(args.combine_datasets_val) == 0: raise RuntimeError("Please provide at leas one validation dataset") Val_all = namedtuple(typename="val_data", field_names=["dataset_name", "dataloader", "base_ds", "evaluator_list"]) val_tuples = [] for dset_name in args.combine_datasets_val: dset = build_dataset(dset_name, image_set="val", args=args) sampler = ( DistributedSampler(dset, shuffle=False) if args.distributed else torch.utils.data.SequentialSampler(dset) ) dataloader = DataLoader( dset, args.batch_size, sampler=sampler, drop_last=False, collate_fn=partial(utils.collate_fn, False), num_workers=args.num_workers, ) base_ds = get_coco_api_from_dataset(dset) val_tuples.append(Val_all(dataset_name=dset_name, dataloader=dataloader, base_ds=base_ds, evaluator_list=None)) if args.frozen_weights is not None: if args.resume.startswith("https"): checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location="cpu", check_hash=True) else: checkpoint = torch.load(args.resume, map_location="cpu") if "model_ema" in checkpoint and checkpoint["model_ema"] is not None: model_without_ddp.detr.load_state_dict(checkpoint["model_ema"], strict=False) else: model_without_ddp.detr.load_state_dict(checkpoint["model"], strict=False) if args.ema: model_ema = deepcopy(model_without_ddp) # Used for loading weights from another model and starting a training from scratch. Especially useful if # loading into a model with different functionality. if args.load: print("loading from", args.load) checkpoint = torch.load(args.load, map_location="cpu") if "model_ema" in checkpoint: if args.do_qa_with_qa_fine_tuned: # Delete mismatching weights: del checkpoint["model_ema"]["qa_embed.weight"] del checkpoint["model_ema"]["answer_type_head.weight"] del checkpoint["model_ema"]["answer_type_head.bias"] model_without_ddp.load_state_dict(checkpoint["model_ema"], strict=False) else: if args.do_qa_with_qa_fine_tuned: # Delete mismatching weights: del checkpoint["model"]["qa_embed.weight"] del checkpoint["model"]["answer_type_head.weight"] del checkpoint["model"]["answer_type_head.bias"] model_without_ddp.load_state_dict(checkpoint["model"], strict=False) if args.ema: model_ema = deepcopy(model_without_ddp) # Used for resuming training from the checkpoint of a model. Used when training times-out or is pre-empted. if args.resume: if args.resume.startswith("https"): checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location="cpu", check_hash=True) else: checkpoint = torch.load(args.resume, map_location="cpu") model_without_ddp.load_state_dict(checkpoint["model"]) if not args.eval and "optimizer" in checkpoint and "epoch" in checkpoint: optimizer.load_state_dict(checkpoint["optimizer"]) args.start_epoch = checkpoint["epoch"] + 1 if args.ema: if "model_ema" not in checkpoint: print("WARNING: ema model not found in checkpoint, resetting to current model") model_ema = deepcopy(model_without_ddp) else: model_ema.load_state_dict(checkpoint["model_ema"]) def build_evaluator_list(base_ds, dataset_name): """Helper function to build the list of evaluators for a given dataset""" evaluator_list = [] if args.no_detection: return evaluator_list iou_types = ["bbox"] if args.masks: iou_types.append("segm") evaluator_list.append(CocoEvaluator(base_ds, tuple(iou_types), useCats=False)) if "refexp" in dataset_name: evaluator_list.append(RefExpEvaluator(base_ds, ("bbox"))) if "clevrref" in dataset_name: evaluator_list.append(ClevrRefEvaluator(base_ds, ("bbox"))) if "flickr" in dataset_name: evaluator_list.append( FlickrEvaluator( args.flickr_dataset_path, subset="test" if args.test else "val", merge_boxes=args.GT_type == "merged", ) ) if "phrasecut" in dataset_name: evaluator_list.append( PhrasecutEvaluator( "test" if args.test else "miniv", ann_folder=args.phrasecut_orig_ann_path, output_dir=os.path.join(output_dir, "phrasecut_eval"), eval_mask=args.masks, ) ) return evaluator_list # Runs only evaluation, by default on the validation set unless --test is passed. if args.eval: test_stats = {} test_model = model_ema if model_ema is not None else model for i, item in enumerate(val_tuples): evaluator_list = build_evaluator_list(item.base_ds, item.dataset_name) postprocessors = build_postprocessors(args, item.dataset_name) item = item._replace(evaluator_list=evaluator_list) print(f"Evaluating {item.dataset_name}") curr_test_stats = evaluate( model=test_model, criterion=criterion, contrastive_criterion=contrastive_criterion, qa_criterion=qa_criterion, postprocessors=postprocessors, weight_dict=weight_dict, data_loader=item.dataloader, evaluator_list=item.evaluator_list, device=device, args=args, ) test_stats.update({item.dataset_name + "_" + k: v for k, v in curr_test_stats.items()}) log_stats = { **{f"test_{k}": v for k, v in test_stats.items()}, "n_parameters": n_parameters, } print(log_stats) return # Runs training and evaluates after every --eval_skip epochs print("Start training") start_time = time.time() best_metric = 0.0 for epoch in range(args.start_epoch, args.epochs): if args.epoch_chunks > 0: sampler_train = samplers_train[epoch % len(samplers_train)] data_loader_train = data_loaders_train[epoch % len(data_loaders_train)] print(f"Starting epoch {epoch // len(data_loaders_train)}, sub_epoch {epoch % len(data_loaders_train)}") else: print(f"Starting epoch {epoch}") if args.distributed: sampler_train.set_epoch(epoch) train_stats = train_one_epoch( model=model, criterion=criterion, contrastive_criterion=contrastive_criterion, qa_criterion=qa_criterion, data_loader=data_loader_train, weight_dict=weight_dict, optimizer=optimizer, device=device, epoch=epoch, args=args, max_norm=args.clip_max_norm, model_ema=model_ema, ) if args.output_dir: checkpoint_paths = [output_dir / "checkpoint.pth"] # extra checkpoint before LR drop and every 2 epochs if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 2 == 0: checkpoint_paths.append(output_dir / f"checkpoint{epoch:04}.pth") for checkpoint_path in checkpoint_paths: dist.save_on_master( { "model": model_without_ddp.state_dict(), "model_ema": model_ema.state_dict() if args.ema else None, "optimizer": optimizer.state_dict(), "epoch": epoch, "args": args, }, checkpoint_path, ) if epoch % args.eval_skip == 0: test_stats = {} test_model = model_ema if model_ema is not None else model for i, item in enumerate(val_tuples): evaluator_list = build_evaluator_list(item.base_ds, item.dataset_name) item = item._replace(evaluator_list=evaluator_list) postprocessors = build_postprocessors(args, item.dataset_name) print(f"Evaluating {item.dataset_name}") curr_test_stats = evaluate( model=test_model, criterion=criterion, contrastive_criterion=contrastive_criterion, qa_criterion=qa_criterion, postprocessors=postprocessors, weight_dict=weight_dict, data_loader=item.dataloader, evaluator_list=item.evaluator_list, device=device, args=args, ) test_stats.update({item.dataset_name + "_" + k: v for k, v in curr_test_stats.items()}) else: test_stats = {} log_stats = { **{f"train_{k}": v for k, v in train_stats.items()}, **{f"test_{k}": v for k, v in test_stats.items()}, "epoch": epoch, "n_parameters": n_parameters, } if args.output_dir and dist.is_main_process(): with (output_dir / "log.txt").open("a") as f: f.write(json.dumps(log_stats) + "\n") if epoch % args.eval_skip == 0: if args.do_qa: if "gqa" in args.combine_datasets: metric = test_stats["gqa_accuracy_answer_total_unscaled"] else: metric = test_stats["vqa2_accuracy_answer_total_unscaled"] # TODO: в какой момент приписывается название датасета?) else: metric = np.mean([v[1] for k, v in test_stats.items() if "coco_eval_bbox" in k]) if args.output_dir and metric > best_metric: best_metric = metric checkpoint_paths = [output_dir / "BEST_checkpoint.pth"] # extra checkpoint before LR drop and every 100 epochs for checkpoint_path in checkpoint_paths: dist.save_on_master( { "model": model_without_ddp.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch, "args": args, }, checkpoint_path, ) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print("Training time {}".format(total_time_str)) if __name__ == "__main__": parser = argparse.ArgumentParser("DETR training and evaluation script", parents=[get_args_parser()]) args = parser.parse_args() if args.output_dir: Path(args.output_dir).mkdir(parents=True, exist_ok=True) main(args)
40.705615
142
0.626393
[ "Apache-2.0" ]
TopCoder2K/mdetr
main.py
26,866
Python
import sys import mrjob from mrjob.job import MRJob import re from itertools import islice, izip import itertools from mrjob.step import MRStep from mrjob.protocol import JSONValueProtocol WORD_RE = re.compile(r'[a-zA-Z]+') class BigramCount(MRJob): OUTPUT_PROTOCOL = JSONValueProtocol def mapper(self, _, line): words = WORD_RE.findall(line) for i in izip(words, islice(words, 1, None),islice(words,2,None)): bigram=(i[0],i[1],i[2]) s_bigram=sorted(bigram) yield s_bigram,1 def combiner(self, bigram, counts): yield (bigram,sum(counts)) def reducer(self, bigram, counts): yield (bigram,sum(counts)) if __name__ == '__main__': sys.stdout=open("data/samplejson.json",'w') BigramCount.run()
25.181818
75
0.631769
[ "MIT" ]
akhilaSharon/big-data-python-class
Homeworks/Homework5/code/p3.py
831
Python
import json from tests.constants import TEST_LOCATION_GUID def test_get_location_by_location_guid(test_client, auth_headers): get_resp = test_client.get('/mines/location/' + TEST_LOCATION_GUID, headers=auth_headers['full_auth_header']) get_data = json.loads(get_resp.data.decode()) assert get_data['mine_location_guid'] == TEST_LOCATION_GUID assert get_resp.status_code == 200
39.5
113
0.792405
[ "Apache-2.0" ]
MaxWardle/mds
python-backend/tests/mines/resources/test_mine_location_resource.py
395
Python
from lark import Lark _name_parser = Lark(""" ?start : name name : var* var : gen _extras _extras : (sub _extras_sub) | (sup _extras_sup) | (prime _extras) | () _extras_sub : (sup [prime]) | (prime _extras_sub) | () _extras_sup : (sub [prime]) | (prime _extras_sup) | () prime : "'" ((prime) | ()) sub : "_" subscript sup : "^" superscript ?gen : gen_singleton | gen_macro | ( "(" " "* gen_singleton " "* ")" ) | ( "(" " "* gen_macro " "* ")" )| gen_parens gen_singleton : /[A-Za-z]/ gen_macro : /\\\\[A-Za-z]+/ gen_parens : "(" (latex_macro | /[\w^ +\-'{},]/ | gen_parens)+ ")" ?latex_macro : /\\\\[A-Za-z]+/ ?subscript : ("{" /[0-9,]+/ "}") | /[0-9]+/ // commas allowed in subscript. ?superscript : ("{" /[0-9]+/ "}") | /[0-9]+/ %ignore " " """) from lark import Tree, Transformer class EvalName(Transformer): def gen_parens(self, args): import re result = re.sub("\\\\[A-Za-z]*", "\g<0>!!!!", "".join(args))\ .replace(" ", "")\ .replace("!!!!", " ")\ .strip() return "(" + result + ")" def gen_singleton(self, args): return str(args[0]).strip() def gen_macro(self, args): return str(args[0]).strip() def sub(self, args): return ["add_to_var", f"_{{{args[0]}}}"] def sup(self,args): return ["sup", str(args[0])] def var(self, args): name = args[0] result = [] for arg in sorted(args[1:]): if arg[0] == "add_to_var": name += arg[1] else: result.append(int(arg[1])) result = [name] + result if len(result) == 1: result.append(1) return result def prime(self, args): return ["add_to_var", "'"] def name(self, args): return args _name_evaluator = EvalName() def parse_name(name): t = _name_parser.parse(name) return reduce_monomial(_name_evaluator.transform(t)) def reduce_monomial(mono): result = {} for [k, v] in mono: result[k] = result.get(k, 0) + v return list(result.items()) def validate_name(name): from lark import LarkError err = None try: parse_name(name) except LarkError as e: err = {"name": type(e).__name__, "column" : getattr(e, "column", None)} return [err is None, err] # Write x^n but handle special cases x^0 ==> 1 and x^1 ==> x def power_name(var, n, zeroth_power=""): if n == 0: return zeroth_power elif n==1: return var else: # if var.find("'") > -1: # var = f"({var})" return f"{var}^{{{n}}}" def monomial_name(*exponents, keep_parens = False): result = " ".join(power_name(var, e) for [var,e] in exponents) if not keep_parens and len(exponents) == 1 and result.startswith("(") and result.endswith(")"): result = result[1:-1] if result.strip() == "": result = "1" return result def add_monomials(mono1, mono2): d = dict(mono1) for [k, v] in mono2: d[k] = d.get(k, 0) d[k] += v return list(d.items())
28.327586
122
0.5
[ "Apache-2.0", "MIT" ]
JoeyBF/sseq
python_ext/webserver/spectralsequences_webserver/name_tools.py
3,286
Python
# --------------------------------------------------------------------- # # Copyright (c) 2012 University of Oxford # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # --------------------------------------------------------------------- from django.conf.urls.defaults import patterns, url from . import views urlpatterns = patterns('', url(r'^(?:(?P<slug>[a-z\d\-/]+)/)?$', views.DocumentationView.as_view(), name='page'), )
43.878788
90
0.679558
[ "MIT" ]
dataflow/DataStage
datastage/web/documentation/urls.py
1,448
Python
#!/usr/bin/python # coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import sys #This works just like /bin/false, but Windows users might not have that sys.exit(1)
26.0625
93
0.736211
[ "Apache-2.0" ]
TansyArron/pants
tests/python/pants_test/tasks/false.py
417
Python
from django.http import HttpRequest from typing import Optional, Text from zerver.lib.actions import check_send_stream_message, \ check_send_private_message from zerver.lib.exceptions import StreamDoesNotExistError from zerver.lib.request import REQ, has_request_variables from zerver.models import UserProfile @has_request_variables def check_send_webhook_message( request: HttpRequest, user_profile: UserProfile, topic: Text, body: Text, stream: Optional[Text]=REQ(default=None), user_specified_topic: Optional[Text]=REQ("topic", default=None) ) -> None: if stream is None: assert user_profile.bot_owner is not None check_send_private_message(user_profile, request.client, user_profile.bot_owner, body) else: if user_specified_topic is not None: topic = user_specified_topic try: check_send_stream_message(user_profile, request.client, stream, topic, body) except StreamDoesNotExistError: # A PM will be sent to the bot_owner by check_message, notifying # that the webhook bot just tried to send a message to a non-existent # stream, so we don't need to re-raise it since it clutters up # webhook-errors.log pass
38.657143
81
0.67997
[ "Apache-2.0" ]
Romdeau/zulip
zerver/lib/webhooks/common.py
1,353
Python
# -*- coding: utf-8 -*- import json from urllib import quote from twisted.internet.defer import inlineCallbacks from vumi.message import TransportUserMessage from vumi.tests.helpers import VumiTestCase from vumi.transports.httprpc.tests.helpers import HttpRpcTransportHelper from vumi.tests.utils import LogCatcher from vxaat.ussd import AatUssdTransport class TestAatUssdTransport(VumiTestCase): def setUp(self): request_defaults = { 'msisdn': '27729042520', 'provider': 'MTN', } self.tx_helper = self.add_helper( HttpRpcTransportHelper( AatUssdTransport, request_defaults=request_defaults, ) ) def get_transport(self, config={}): defaults = { 'base_url': 'http://www.example.com/foo', 'web_path': '/api/aat/ussd/', 'web_port': '0', } defaults.update(config) return self.tx_helper.get_transport(defaults) def callback_url(self, to_addr): return "http://www.example.com/foo/api/aat/ussd/?to_addr=%s" % ( quote(to_addr),) def assert_inbound_message(self, msg, **field_values): expected_field_values = { 'content': "", 'from_addr': self.tx_helper.request_defaults['msisdn'], } expected_field_values.update(field_values) for field, expected_value in expected_field_values.iteritems(): self.assertEqual(msg[field], expected_value) def assert_outbound_message(self, msg, content, callback, continue_session=True): headertext = '<headertext>%s</headertext>' % content if continue_session: options = ( '<options>' '<option callback="%s" command="1" display="false"' ' order="1" />' '</options>' ) % callback else: options = "" xml = ''.join([ '<request>', headertext, options, '</request>', ]) self.assertEqual(msg, xml) def assert_ack(self, ack, reply): self.assertEqual(ack.payload['event_type'], 'ack') self.assertEqual(ack.payload['user_message_id'], reply['message_id']) self.assertEqual(ack.payload['sent_message_id'], reply['message_id']) def assert_nack(self, nack, reply, reason): self.assertEqual(nack.payload['event_type'], 'nack') self.assertEqual(nack.payload['user_message_id'], reply['message_id']) self.assertEqual(nack.payload['nack_reason'], reason) @inlineCallbacks def test_inbound_begin(self): yield self.get_transport() ussd_string = "*1234#" # Send initial request d = self.tx_helper.mk_request(request=ussd_string) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assert_inbound_message( msg, session_event=TransportUserMessage.SESSION_NEW, to_addr=ussd_string, content=None, ) reply_content = 'We are the Knights Who Say ... Ni!' reply = msg.reply(reply_content) self.tx_helper.dispatch_outbound(reply) response = yield d self.assert_outbound_message( response.delivered_body, reply_content, self.callback_url(ussd_string), ) [ack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_ack(ack, reply) @inlineCallbacks def test_inbound_begin_with_different_provider(self): yield self.get_transport({ 'provider_mappings': {'Camelot': 'camelot'} }) ussd_string = "*1234#" # Send initial request d = self.tx_helper.mk_request(request=ussd_string, provider="Camelot") [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assert_inbound_message( msg, session_event=TransportUserMessage.SESSION_NEW, to_addr=ussd_string, content=None, provider="camelot", ) reply_content = 'We are the Knights Who Say ... Ni!' reply = msg.reply(reply_content) self.tx_helper.dispatch_outbound(reply) response = yield d self.assert_outbound_message( response.delivered_body, reply_content, self.callback_url(ussd_string), ) [ack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_ack(ack, reply) @inlineCallbacks def test_inbound_with_unknown_provider(self): yield self.get_transport({ 'provider_mappings': {'Camelot': 'camelot'} }) ussd_string = "*1234#" with LogCatcher() as lc: d = self.tx_helper.mk_request(request=ussd_string, provider="Tim") [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assertTrue( "No mapping exists for provider 'Tim', using 'Tim' as a fallback" in lc.messages()) self.assert_inbound_message( msg, session_event=TransportUserMessage.SESSION_NEW, to_addr=ussd_string, content=None, provider="Tim", ) self.tx_helper.dispatch_outbound(msg.reply("I... am an enchanter")) yield d @inlineCallbacks def test_inbound_begin_with_close(self): yield self.get_transport() ussd_string = "*code#" # Send initial request d = self.tx_helper.mk_request(request=ussd_string) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assert_inbound_message( msg, session_event=TransportUserMessage.SESSION_NEW, content=None, ) reply_content = 'We are no longer the Knight who say Ni!' reply = msg.reply(reply_content, continue_session=False) self.tx_helper.dispatch_outbound(reply) response = yield d self.assert_outbound_message( response.delivered_body, reply_content, self.callback_url(ussd_string), continue_session=False, ) [ack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_ack(ack, reply) @inlineCallbacks def test_inbound_resume_and_reply_with_end(self): yield self.get_transport() ussd_string = "*1234#" user_content = "I didn't expect a kind of Spanish Inquisition!" d = self.tx_helper.mk_request(request=user_content, to_addr=ussd_string) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assert_inbound_message( msg, session_event=TransportUserMessage.SESSION_RESUME, content=user_content, ) reply_content = "Nobody expects the Spanish Inquisition!" reply = msg.reply(reply_content, continue_session=False) self.tx_helper.dispatch_outbound(reply) response = yield d self.assert_outbound_message( response.delivered_body, reply_content, self.callback_url(to_addr="*1234#"), continue_session=False, ) [ack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_ack(ack, reply) @inlineCallbacks def test_inbound_resume_and_reply_with_resume(self): yield self.get_transport() ussd_string = "xxxx" user_content = "Well, what is it you want?" d = self.tx_helper.mk_request(request=user_content, to_addr=ussd_string) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assert_inbound_message( msg, session_event=TransportUserMessage.SESSION_RESUME, content=user_content, to_addr=ussd_string ) reply_content = "We want ... a shrubbery!" reply = msg.reply(reply_content, continue_session=True) self.tx_helper.dispatch_outbound(reply) response = yield d self.assert_outbound_message( response.delivered_body, reply_content, self.callback_url(ussd_string), continue_session=True, ) [ack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_ack(ack, reply) @inlineCallbacks def test_request_with_missing_parameters(self): yield self.get_transport() response = yield self.tx_helper.mk_request_raw( params={"request": '', "provider": ''}) self.assertEqual( json.loads(response.delivered_body), {'missing_parameter': ['msisdn']}) self.assertEqual(response.code, 400) @inlineCallbacks def test_request_with_unexpected_parameters(self): yield self.get_transport() response = yield self.tx_helper.mk_request( unexpected_p1='', unexpected_p2='') self.assertEqual(response.code, 400) body = json.loads(response.delivered_body) self.assertEqual(set(['unexpected_parameter']), set(body.keys())) self.assertEqual( sorted(body['unexpected_parameter']), ['unexpected_p1', 'unexpected_p2']) @inlineCallbacks def test_no_reply_to_in_response(self): yield self.get_transport() msg = yield self.tx_helper.make_dispatch_outbound( content="Nudge, nudge, wink, wink. Know what I mean?", message_id=1 ) [nack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_nack(nack, msg, "Outbound message is not a reply") @inlineCallbacks def test_no_content_in_reply(self): yield self.get_transport() msg = yield self.tx_helper.make_dispatch_outbound( content="", message_id=1 ) [nack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_nack(nack, msg, "Outbound message has no content.") @inlineCallbacks def test_failed_request(self): yield self.get_transport() msg = yield self.tx_helper.make_dispatch_outbound( in_reply_to='xxxx', content="She turned me into a newt!", message_id=1 ) [nack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_nack(nack, msg, "Response to http request failed.") @inlineCallbacks def test_metadata_handled(self): yield self.get_transport({ 'provider_mappings': {'MTN': 'mtn'} }) ussd_session_id = 'xxxx' content = "*code#" d = self.tx_helper.mk_request(request=content, ussdSessionId=ussd_session_id) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assert_inbound_message( msg, session_event=TransportUserMessage.SESSION_NEW, content=None, helper_metadata={ 'session_id': ussd_session_id, }, transport_metadata={ 'aat_ussd': { 'provider': 'mtn', 'ussd_session_id': ussd_session_id, } } ) reply_content = "We want ... a shrubbery!" reply = msg.reply(reply_content, continue_session=True) self.tx_helper.dispatch_outbound(reply) yield d [ack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_ack(ack, reply) @inlineCallbacks def test_callback_url_with_trailing_slash(self): yield self.get_transport({ "base_url": "http://www.example.com/foo/", }) ussd_string = '*1234#' user_content = "Well, what is it you want?" d = self.tx_helper.mk_request(request=user_content, to_addr=ussd_string) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) self.assert_inbound_message( msg, session_event=TransportUserMessage.SESSION_RESUME, content=user_content, to_addr=ussd_string ) reply_content = "We want ... a shrubbery!" reply = msg.reply(reply_content, continue_session=True) self.tx_helper.dispatch_outbound(reply) response = yield d self.assert_outbound_message( response.delivered_body, reply_content, self.callback_url(ussd_string), continue_session=True, ) [ack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_ack(ack, reply) @inlineCallbacks def test_outbound_unicode(self): yield self.get_transport() content = "One, two, ... five!" ussd_string = '*1234#' d = self.tx_helper.mk_request(request=content, to_addr=ussd_string) [msg] = yield self.tx_helper.wait_for_dispatched_inbound(1) reply_content = "Thrëë, my lord." reply = msg.reply(reply_content, continue_session=True) self.tx_helper.dispatch_outbound(reply) response = yield d self.assert_outbound_message( response.delivered_body, reply_content, self.callback_url(ussd_string), continue_session=True, ) [ack] = yield self.tx_helper.wait_for_dispatched_events(1) self.assert_ack(ack, reply)
33.10219
78
0.610217
[ "BSD-3-Clause" ]
praekelt/vumi-aat
vxaat/tests/test_ussd.py
13,607
Python
# Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import re import netaddr import six from oslo_utils import uuidutils from django.core.exceptions import ValidationError # noqa from django.core import urlresolvers from django.forms import fields from django.forms import forms from django.forms.utils import flatatt # noqa from django.forms import widgets from django.template import Context # noqa from django.template.loader import get_template # noqa from django.utils.encoding import force_text from django.utils.functional import Promise # noqa from django.utils import html from django.utils.safestring import mark_safe # noqa from django.utils.translation import ugettext_lazy as _ ip_allowed_symbols_re = re.compile(r'^[a-fA-F0-9:/\.]+$') IPv4 = 1 IPv6 = 2 class IPField(fields.Field): """Form field for entering IP/range values, with validation. Supports IPv4/IPv6 in the format: .. xxx.xxx.xxx.xxx .. xxx.xxx.xxx.xxx/zz .. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff .. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/zz and all compressed forms. Also the short forms are supported: xxx/yy xxx.xxx/yy .. attribute:: version Specifies which IP version to validate, valid values are 1 (fields.IPv4), 2 (fields.IPv6) or both - 3 (fields.IPv4 | fields.IPv6). Defaults to IPv4 (1) .. attribute:: mask Boolean flag to validate subnet masks along with IP address. E.g: 10.0.0.1/32 .. attribute:: mask_range_from Subnet range limitation, e.g. 16 That means the input mask will be checked to be in the range 16:max_value. Useful to limit the subnet ranges to A/B/C-class networks. """ invalid_format_message = _("Incorrect format for IP address") invalid_version_message = _("Invalid version for IP address") invalid_mask_message = _("Invalid subnet mask") max_v4_mask = 32 max_v6_mask = 128 def __init__(self, *args, **kwargs): self.mask = kwargs.pop("mask", None) self.min_mask = kwargs.pop("mask_range_from", 0) self.version = kwargs.pop('version', IPv4) super(IPField, self).__init__(*args, **kwargs) def validate(self, value): super(IPField, self).validate(value) if not value and not self.required: return try: if self.mask: self.ip = netaddr.IPNetwork(value) else: self.ip = netaddr.IPAddress(value) except Exception: raise ValidationError(self.invalid_format_message) if not any([self.version & IPv4 > 0 and self.ip.version == 4, self.version & IPv6 > 0 and self.ip.version == 6]): raise ValidationError(self.invalid_version_message) if self.mask: if self.ip.version == 4 and \ not self.min_mask <= self.ip.prefixlen <= self.max_v4_mask: raise ValidationError(self.invalid_mask_message) if self.ip.version == 6 and \ not self.min_mask <= self.ip.prefixlen <= self.max_v6_mask: raise ValidationError(self.invalid_mask_message) def clean(self, value): super(IPField, self).clean(value) return str(getattr(self, "ip", "")) class MultiIPField(IPField): """Extends IPField to allow comma-separated lists of addresses.""" def validate(self, value): self.addresses = [] if value: addresses = value.split(',') for ip in addresses: super(MultiIPField, self).validate(ip) self.addresses.append(ip) else: super(MultiIPField, self).validate(value) def clean(self, value): super(MultiIPField, self).clean(value) return str(','.join(getattr(self, "addresses", []))) class SelectWidget(widgets.Select): """Customizable select widget, that allows to render data-xxx attributes from choices. This widget also allows user to specify additional html attributes for choices. .. attribute:: data_attrs Specifies object properties to serialize as data-xxx attribute. If passed ('id', ), this will be rendered as: <option data-id="123">option_value</option> where 123 is the value of choice_value.id .. attribute:: transform A callable used to render the display value from the option object. .. attribute:: transform_html_attrs A callable used to render additional HTML attributes for the option object. It returns a dictionary containing the html attributes and their values. For example, to define a title attribute for the choices:: helpText = { 'Apple': 'This is a fruit', 'Carrot': 'This is a vegetable' } def get_title(data): text = helpText.get(data, None) if text: return {'title': text} else: return {} .... .... widget=forms.ThemableSelect( attrs={'class': 'switchable', 'data-slug': 'source'}, transform_html_attrs=get_title ) self.fields[<field name>].choices = ([ ('apple','Apple'), ('carrot','Carrot') ]) """ def __init__(self, attrs=None, choices=(), data_attrs=(), transform=None, transform_html_attrs=None): self.data_attrs = data_attrs self.transform = transform self.transform_html_attrs = transform_html_attrs super(SelectWidget, self).__init__(attrs, choices) def render_option(self, selected_choices, option_value, option_label): option_value = force_text(option_value) other_html = (u' selected="selected"' if option_value in selected_choices else '') other_html += self.transform_option_html_attrs(option_label) data_attr_html = self.get_data_attrs(option_label) if data_attr_html: other_html += ' ' + data_attr_html option_label = self.transform_option_label(option_label) return u'<option value="%s"%s>%s</option>' % ( html.escape(option_value), other_html, option_label) def get_data_attrs(self, option_label): other_html = [] if not isinstance(option_label, (six.string_types, Promise)): for data_attr in self.data_attrs: data_value = html.conditional_escape( force_text(getattr(option_label, data_attr, ""))) other_html.append('data-%s="%s"' % (data_attr, data_value)) return ' '.join(other_html) def transform_option_label(self, option_label): if (not isinstance(option_label, (six.string_types, Promise)) and callable(self.transform)): option_label = self.transform(option_label) return html.conditional_escape(force_text(option_label)) def transform_option_html_attrs(self, option_label): if not callable(self.transform_html_attrs): return '' return flatatt(self.transform_html_attrs(option_label)) class ThemableSelectWidget(SelectWidget): """Bootstrap base select field widget.""" def render(self, name, value, attrs=None, choices=()): # NOTE(woodnt): Currently the "attrs" contents are being added to the # select that's hidden. It's unclear whether this is the # desired behavior. In some cases, the attribute should # remain solely on the now-hidden select. But in others # if it should live on the bootstrap button (visible) # or both. new_choices = [] initial_value = value for opt_value, opt_label in itertools.chain(self.choices, choices): other_html = self.transform_option_html_attrs(opt_label) data_attr_html = self.get_data_attrs(opt_label) if data_attr_html: other_html += ' ' + data_attr_html opt_label = self.transform_option_label(opt_label) # If value exists, save off its label for use if opt_value == value: initial_value = opt_label if other_html: new_choices.append((opt_value, opt_label, other_html)) else: new_choices.append((opt_value, opt_label)) if value is None and new_choices: initial_value = new_choices[0][1] attrs = self.build_attrs(attrs) id = attrs.pop('id', 'id_%s' % name) template = get_template('horizon/common/fields/_themable_select.html') context = Context({ 'name': name, 'options': new_choices, 'id': id, 'value': value, 'initial_value': initial_value, 'select_attrs': attrs, }) return template.render(context) class DynamicSelectWidget(SelectWidget): """A subclass of the ``Select`` widget which renders extra attributes for use in callbacks to handle dynamic changes to the available choices. """ _data_add_url_attr = "data-add-item-url" def render(self, *args, **kwargs): add_item_url = self.get_add_item_url() if add_item_url is not None: self.attrs[self._data_add_url_attr] = add_item_url return super(DynamicSelectWidget, self).render(*args, **kwargs) def get_add_item_url(self): if callable(self.add_item_link): return self.add_item_link() try: if self.add_item_link_args: return urlresolvers.reverse(self.add_item_link, args=self.add_item_link_args) else: return urlresolvers.reverse(self.add_item_link) except urlresolvers.NoReverseMatch: return self.add_item_link class ThemableDynamicSelectWidget(ThemableSelectWidget, DynamicSelectWidget): pass class ThemableChoiceField(fields.ChoiceField): """Bootstrap based select field.""" widget = ThemableSelectWidget class DynamicChoiceField(fields.ChoiceField): """A subclass of ``ChoiceField`` with additional properties that make dynamically updating its elements easier. Notably, the field declaration takes an extra argument, ``add_item_link`` which may be a string or callable defining the URL that should be used for the "add" link associated with the field. """ widget = DynamicSelectWidget def __init__(self, add_item_link=None, add_item_link_args=None, *args, **kwargs): super(DynamicChoiceField, self).__init__(*args, **kwargs) self.widget.add_item_link = add_item_link self.widget.add_item_link_args = add_item_link_args class ThemableDynamicChoiceField(DynamicChoiceField): widget = ThemableDynamicSelectWidget class DynamicTypedChoiceField(DynamicChoiceField, fields.TypedChoiceField): """Simple mix of ``DynamicChoiceField`` and ``TypedChoiceField``.""" pass class ThemableDynamicTypedChoiceField(ThemableDynamicChoiceField, fields.TypedChoiceField): """Simple mix of ``ThemableDynamicChoiceField`` & ``TypedChoiceField``.""" pass class ThemableCheckboxInput(widgets.CheckboxInput): """A subclass of the ``Checkbox`` widget which renders extra markup to allow a custom checkbox experience. """ def render(self, name, value, attrs=None): label_for = attrs.get('id', '') if not label_for: attrs['id'] = uuidutils.generate_uuid() label_for = attrs['id'] return html.format_html( u'<div class="themable-checkbox">{}<label for="{}"></label></div>', super(ThemableCheckboxInput, self).render(name, value, attrs), label_for ) class ThemableCheckboxChoiceInput(widgets.CheckboxChoiceInput): def render(self, name=None, value=None, attrs=None, choices=()): if self.id_for_label: label_for = html.format_html(' for="{}"', self.id_for_label) else: label_for = '' attrs = dict(self.attrs, **attrs) if attrs else self.attrs return html.format_html( u'<div class="themable-checkbox">{}<label{}>' + u'<span>{}</span></label></div>', self.tag(attrs), label_for, self.choice_label ) class ThemableCheckboxFieldRenderer(widgets.CheckboxFieldRenderer): choice_input_class = ThemableCheckboxChoiceInput class ThemableCheckboxSelectMultiple(widgets.CheckboxSelectMultiple): renderer = ThemableCheckboxFieldRenderer _empty_value = [] class ExternalFileField(fields.FileField): """A special flavor of FileField which is meant to be used in cases when instead of uploading file to Django it should be uploaded to some external location, while the form validation is done as usual. Should be paired with ExternalUploadMeta metaclass embedded into the Form class. """ def __init__(self, *args, **kwargs): super(ExternalFileField, self).__init__(*args, **kwargs) self.widget.attrs.update({'data-external-upload': 'true'}) class ExternalUploadMeta(forms.DeclarativeFieldsMetaclass): """Set this class as the metaclass of a form that contains ExternalFileField in order to process ExternalFileField fields in a specific way. A hidden CharField twin of FieldField is created which contains just the filename (if any file was selected on browser side) and a special `clean` method for FileField is defined which extracts just file name. This allows to avoid actual file upload to Django server, yet process form clean() phase as usual. Actual file upload happens entirely on client-side. """ def __new__(mcs, name, bases, attrs): def get_double_name(name): suffix = '__hidden' slen = len(suffix) return name[:-slen] if name.endswith(suffix) else name + suffix def make_clean_method(field_name): def _clean_method(self): value = self.cleaned_data[field_name] if value: self.cleaned_data[get_double_name(field_name)] = value return value return _clean_method new_attrs = {} for attr_name, attr in attrs.items(): new_attrs[attr_name] = attr if isinstance(attr, ExternalFileField): hidden_field = fields.CharField(widget=fields.HiddenInput, required=False) hidden_field.creation_counter = attr.creation_counter + 1000 new_attr_name = get_double_name(attr_name) new_attrs[new_attr_name] = hidden_field meth_name = 'clean_' + new_attr_name new_attrs[meth_name] = make_clean_method(new_attr_name) return super(ExternalUploadMeta, mcs).__new__( mcs, name, bases, new_attrs)
36.689655
79
0.632832
[ "Apache-2.0" ]
puppetlabs-operations/horizon
horizon/forms/fields.py
15,960
Python
""" Timings against numpy/itk/nibabel/etc where appropriate """ import os import nibabel as nib import itk import ants import time def time_nifti_to_numpy(N_TRIALS): """ Times how fast a framework can read a nifti file and convert it to numpy """ datadir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') img_paths = [] for dtype in ['CHAR', 'DOUBLE', 'FLOAT', 'SHORT', 'UNSIGNEDCHAR', 'UNSIGNEDSHORT']: for dim in [2,3]: img_paths.append(os.path.join(datadir, 'image_%s_%iD.nii.gz' % (dtype, dim))) def test_nibabel(): for img_path in img_paths: array = nib.load(img_path).get_data() def test_itk(): for img_path in img_paths: array = itk.GetArrayFromImage(itk.imread(img_path)) def test_ants(): for img_path in img_paths: array = ants.image_read(img_path).numpy() nib_start = time.time() for i in range(N_TRIALS): test_nibabel() nib_end = time.time() print('NIBABEL TIME: %.3f seconds' % (nib_end-nib_start)) itk_start = time.time() for i in range(N_TRIALS): test_itk() itk_end = time.time() print('ITK TIME: %.3f seconds' % (itk_end-itk_start)) ants_start = time.time() for i in range(N_TRIALS): test_ants() ants_end = time.time() print('ANTS TIME: %.3f seconds' % (ants_end-ants_start)) if __name__ == '__main__': time_nifti_to_numpy(N_TRIALS=1) time_nifti_to_numpy(N_TRIALS=20)
26
89
0.625163
[ "Apache-2.0" ]
ncullen93/ANTsPy
tests/timings.py
1,534
Python
from . import water from . import ion from . import cosolute from . import small_molecule from . import peptide from . import protein from . import dna from . import rna from . import lipid from .get_molecule_type_from_group_names import get_molecule_type_from_group_names from .get_molecule_type_from_sequence import get_molecule_type_from_sequence
25.142857
82
0.838068
[ "MIT" ]
uibcdf/MolModSAKs
molsysmt/element/molecule/__init__.py
352
Python
'''In ​Repetition Based on User Input​, you saw a loop that prompted users until they typed quit. This code won’t work if users type Quit, or QUIT, or any other version that isn’t exactly quit. Modify that loop so that it terminates if a user types that word with any capitalization.''' text = "" while text.lower()!= "quit": text = (input("Please Enter the command quit to exit this program,with any case you'd like:")) if text.lower() == "quit": print("...exitting program")
50
286
0.694
[ "MIT" ]
YordanIH/Intro_to_CS_w_Python
chapter09/exercise06.py
508
Python
# -*- coding: utf-8 -*- # # Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The command group for the vmware nodetypes CLI.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.calliope import base @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class NodeTypesAlpha(base.Group): """Show node types in Google Cloud VMware Engine.""" category = base.COMPUTE_CATEGORY @base.Hidden @base.ReleaseTracks(base.ReleaseTrack.BETA) class NodeTypesBeta(NodeTypesAlpha): """Show node types in Google Cloud VMware Engine."""
32.027778
74
0.770165
[ "Apache-2.0" ]
google-cloud-sdk-unofficial/google-cloud-sdk
lib/surface/vmware/nodetypes/__init__.py
1,153
Python
parts = ["aardvark", "abacus", "abbey", "abdomen", "ability", "abolishment", "abroad", "accelerant", "accelerator", "accident", "accompanist", "accordion", "account", "accountant", "achieve", "achiever", "acid", "acknowledgment", "acoustic", "acoustics", "acrylic", "act", "action", "active", "activity", "actor", "actress", "acupuncture", "ad", "adapter", "addiction", "addition", "address", "adjustment", "administration", "adrenalin", "adult", "advancement", "advantage", "advertisement", "advertising", "advice", "affair", "affect", "afghanistan", "africa", "aftermath", "afternoon", "aftershave", "aftershock", "afterthought", "age", "agency", "agenda", "agent", "aglet", "agreement", "air", "airbag", "airbus", "airfare", "airforce", "airline", "airmail", "airplane", "airport", "airship", "alarm", "alb", "albatross", "alcohol", "alcove", "alder", "algebra", "algeria", "alibi", "allergist", "alley", "alligator", "alloy", "almanac", "almond", "alpaca", "alpenglow", "alpenhorn", "alpha", "alphabet", "alternative", "altitude", "alto", "aluminium", "aluminum", "ambassador", "ambition", "ambulance", "amendment", "america", "amount", "amusement", "anagram", "analgesia", "analog", "analysis", "analyst", "anatomy", "anesthesiology", "anethesiologist", "anger", "angiosperm", "angle", "angora", "angstrom", "anguish", "animal", "anime", "ankle", "anklet", "annual", "anorak", "answer", "ant", "antarctica", "anteater", "antechamber", "antelope", "anthony", "anthropology", "antler", "anxiety", "anybody", "anything", "anywhere", "apartment", "ape", "aperitif", "apology", "apparatus", "apparel", "appeal", "appearance", "appendix", "apple", "applewood", "appliance", "application", "appointment", "approval", "april", "apron", "apse", "aquarius", "aquifer", "arch", "archaeology", "archeology", "archer", "architect", "architecture", "arch-rival", "area", "argentina", "argument", "aries", "arithmetic", "arm", "armadillo", "armament", "armchair", "armoire", "armor", "arm-rest", "army", "arrival", "arrow", "art", "artichoke", "article", "artificer", "ascot", "ash", "ashram", "ashtray", "asia", "asparagus", "aspect", "asphalt", "assignment", "assistance", "assistant", "associate", "association", "assumption", "asterisk", "astrakhan", "astrolabe", "astrologer", "astrology", "astronomy", "atelier", "athelete", "athlete", "atm", "atmosphere", "atom", "atrium", "attachment", "attack", "attempt", "attendant", "attention", "attenuation", "attic", "attitude", "attorney", "attraction", "audience", "auditorium", "august", "aunt", "australia", "author", "authorisation", "authority", "authorization", "automaton", "avalanche", "avenue", "average", "awareness", "azimuth", "babe", "babies", "baboon", "babushka", "baby", "back", "backbone", "backdrop", "backpack", "bacon", "bad", "badge", "badger", "bafflement", "bag", "bagel", "bagpipe", "bagpipes", "bail", "bait", "bake", "baker", "bakery", "bakeware", "balaclava", "balalaika", "balance", "balcony", "balinese", "ball", "balloon", "ballpark", "bamboo", "banana", "band", "bandana", "bandanna", "bandolier", "bangladesh", "bangle", "banjo", "bank", "bankbook", "banker", "banquette", "baobab", "bar", "barbara", "barbeque", "barber", "barbiturate", "barge", "baritone", "barium", "barn", "barometer", "barracks", "barstool", "base", "baseball", "basement", "basin", "basis", "basket", "basketball", "bass", "bassinet", "bassoon", "bat", "bath", "bather", "bathhouse", "bathrobe", "bathroom", "bathtub", "batter", "battery", "batting", "battle", "battleship", "bay", "bayou", "beach", "bead", "beak", "beam", "bean", "beanie", "beanstalk", "bear", "beard", "beast", "beat", "beautician", "beauty", "beaver", "bed", "bedroom", "bee", "beech", "beef", "beer", "beet", "beetle", "beggar", "beginner", "begonia", "behavior", "beheading", "behest", "belfry", "belief", "believe", "bell", "belligerency", "bellows", "belly", "belt", "bench", "bend", "beneficiary", "benefit", "bengal", "beret", "berry", "bestseller", "best-seller", "betty", "beverage", "beyond", "bibliography", "bicycle", "bid", "bidet", "bifocals", "big", "big-rig", "bijou", "bike", "bikini", "bill", "billboard", "bin", "biology", "biplane", "birch", "bird", "birdbath", "birdcage", "birdhouse", "bird-watcher", "birth", "birthday", "bit", "bite", "black", "blackberry", "blackboard", "blackfish", "bladder", "blade", "blame", "blank", "blanket", "blazer", "blight", "blinker", "blister", "blizzard", "block", "blocker", "blood", "bloodflow", "bloom", "bloomers", "blossom", "blouse", "blow", "blowgun", "blowhole", "blue", "blueberry", "boar", "board", "boat", "boat-building", "boatload", "boatyard", "bobcat", "body", "bog", "bolero", "bolt", "bomb", "bomber", "bondsman", "bone", "bongo", "bonnet", "bonsai", "bonus", "boogeyman", "book", "bookcase", "bookend", "booklet", "booster", "boot", "bootee", "bootie", "boots", "booty", "border", "bore", "bosom", "botany", "bottle", "bottling", "bottom", "bottom-line", "boudoir", "bough", "boundary", "bow", "bower", "bowl", "bowler", "bowling", "bowtie", "box", "boxer", "boxspring", "boy", "boyfriend", "bra", "brace", "bracelet", "bracket", "brain", "brake", "branch", "brand", "brandy", "brass", "brassiere", "bratwurst", "brazil", "bread", "breadcrumb", "break", "breakfast", "breakpoint", "breast", "breastplate", "breath", "breeze", "bribery", "brick", "bricklaying", "bridge", "brief", "briefs", "brilliant", "british", "broccoli", "brochure", "broiler", "broker", "brome", "bronchitis", "bronco", "bronze", "brooch", "brood", "brook", "broom", "brother", "brother-in-law", "brow", "brown", "brush", "brushfire", "brushing", "bubble", "bucket", "buckle", "bud", "budget", "buffer", "buffet", "bug", "buggy", "bugle", "building", "bulb", "bull", "bulldozer", "bullet", "bull-fighter", "bumper", "bun", "bunch", "bungalow", "bunghole", "bunkhouse", "burglar", "burlesque", "burma", "burn", "burn-out", "burst", "bus", "bush", "business", "bust", "bustle", "butane", "butcher", "butter", "button", "buy", "buyer", "buzzard", "cabana", "cabbage", "cabin", "cabinet", "cable", "caboose", "cacao", "cactus", "caddy", "cadet", "cafe", "caftan", "cake", "calcification", "calculation", "calculator", "calculus", "calendar", "calf", "calico", "call", "calm", "camel", "cameo", "camera", "camp", "campaign", "campanile", "can", "canada", "canal", "cancel", "cancer", "candelabra", "candidate", "candle", "candy", "cane", "cannon", "canoe", "canon", "canopy", "canteen", "canvas", "cap", "cape", "capital", "capitulation", "capon", "cappelletti", "cappuccino", "capricorn", "captain", "caption", "car", "caravan", "carbon", "card", "cardboard", "cardigan", "care", "cargo", "carload", "carnation", "carol", "carotene", "carp", "carpenter", "carpet", "carport", "carriage", "carrier", "carrot", "carry", "cart", "cartilage", "cartload", "cartoon", "cartridge", "cascade", "case", "casement", "cash", "cashier", "casino", "casserole", "cassock", "cast", "castanet", "castanets", "castle", "cat", "catacomb", "catamaran", "category", "caterpillar", "cathedral", "catsup", "cattle", "cauliflower", "cause", "caution", "cave", "c-clamp", "cd", "ceiling", "celebration", "celeriac", "celery", "celeste", "cell", "cellar", "cello", "celsius", "cement", "cemetery", "cenotaph", "census", "cent", "centenarian", "center", "centimeter", "centurion", "century", "cephalopod", "ceramic", "cereal", "certification", "cesspool", "chador", "chafe", "chain", "chainstay", "chair", "chairlift", "chairman", "chairperson", "chairwoman", "chaise", "chalet", "chalice", "chalk", "champion", "championship", "chance", "chandelier", "change", "channel", "chap", "chapel", "chapter", "character", "chard", "charge", "charity", "charlatan", "charles", "charm", "chart", "chastity", "chasuble", "chateau", "chauffeur", "chauvinist", "check", "checkroom", "cheek", "cheese", "cheetah", "chef", "chemistry", "cheque", "cherries", "cherry", "chess", "chest", "chick", "chicken", "chicory", "chief", "chiffonier", "child", "childhood", "children", "chill", "chime", "chimpanzee", "chin", "china", "chinese", "chino", "chipmunk", "chit-chat", "chivalry", "chive", "chocolate", "choice", "choker", "chop", "chopstick", "chord", "chowder", "christmas", "christopher", "chrome", "chromolithograph", "chronograph", "chronometer", "chub", "chug", "church", "churn", "cicada", "cigarette", "cinema", "circle", "circulation", "circumference", "cirrus", "citizenship", "city", "civilisation", "clam", "clank", "clapboard", "clarinet", "clasp", "class", "classroom", "claus", "clave", "clavicle", "clavier", "cleaner", "cleat", "cleavage", "clef", "cleric", "clerk", "click", "client", "cliff", "climate", "climb", "clip", "clipper", "cloak", "cloakroom", "clock", "clockwork", "clogs", "cloister", "close", "closet", "cloth", "clothes", "clothing", "cloud", "cloudburst", "cloudy", "clove", "clover", "club", "clutch", "coach", "coal", "coast", "coat", "cob", "cobweb", "cockpit", "cockroach", "cocktail", "cocoa", "cod", "codon", "codpiece", "coevolution", "coffee", "coffin", "coil", "coin", "coinsurance", "coke", "cold", "coliseum", "collar", "collection", "college", "collision", "colloquia", "colombia", "colon", "colonisation", "colony", "color", "colt", "column", "columnist", "comb", "combat", "combination", "comfort", "comfortable", "comic", "comma", "command", "commercial", "commission", "committee", "communicant", "communication", "community", "company", "comparison", "competition", "competitor", "complaint", "complement", "complex", "component", "comportment", "composer", "composition", "compost", "compulsion", "computer", "comradeship", "concept", "concert", "conclusion", "concrete", "condition", "condominium", "condor", "conductor", "cone", "confectionery", "conference", "confidence", "confirmation", "conflict", "confusion", "conga", "congo", "congressman", "congressperson", "congresswoman", "conifer", "connection", "consent", "consequence", "console", "consonant", "conspirator", "constant", "constellation", "construction", "consul", "consulate", "contact lens", "contagion", "contest", "context", "continent", "contract", "contrail", "contrary", "contribution", "control", "convection", "conversation", "convert", "convertible", "cook", "cookie", "cooking", "coonskin", "cope", "cop-out", "copper", "co-producer", "copy", "copyright", "copywriter", "cord", "corduroy", "cork", "cormorant", "corn", "cornerstone", "cornet", "corral", "correspondent", "corridor", "corsage", "cost", "costume", "cot", "cottage", "cotton", "couch", "cougar", "cough", "council", "councilman", "councilor", "councilperson", "councilwoman", "counter", "counter-force", "countess", "country", "county", "couple", "courage", "course", "court", "cousin", "covariate", "cover", "coverall", "cow", "cowbell", "cowboy", "crab", "crack", "cracker", "crackers", "cradle", "craftsman", "crash", "crate", "cravat", "craw", "crawdad", "crayfish", "crayon", "cream", "creative", "creator", "creature", "creche", "credenza", "credit", "creditor", "creek", "creme brulee", "crest", "crew", "crib", "cribbage", "cricket", "cricketer", "crime", "criminal", "crinoline", "criteria", "criterion", "criticism", "crocodile", "crocus", "croissant", "crook", "crop", "cross", "cross-contamination", "cross-stitch", "crotch", "croup", "crow", "crowd", "crown", "crude", "crush", "cry", "crystallography", "cub", "cuban", "cuckoo", "cucumber", "cuff-links", "cultivar", "cultivator", "culture", "culvert", "cummerbund", "cup", "cupboard", "cupcake", "cupola", "curio", "curl", "curler", "currency", "current", "cursor", "curtain", "curve", "cushion", "custard", "custodian", "customer", "cut", "cuticle", "cutlet", "cutover", "cutting", "cyclamen", "cycle", "cyclone", "cylinder", "cymbal", "cymbals", "cynic", "cyst", "cytoplasm", "dad", "daffodil", "dagger", "dahlia", "daisy", "damage", "dame", "dance", "dancer", "danger", "daniel", "dark", "dart", "dash", "dashboard", "data", "database", "date", "daughter", "david", "day", "daybed", "dead", "deadline", "deal", "dealer", "dear", "death", "deathwatch", "deborah", "debt", "debtor", "decade", "december", "decimal", "decision", "deck", "declination", "decongestant", "decrease", "decryption", "dedication", "deer", "defense", "deficit", "definition", "deformation", "degree", "delete", "delivery", "demand", "demur", "den", "denim", "dentist", "deodorant", "department", "departure", "dependent", "deployment", "deposit", "depression", "depressive", "depth", "deputy", "derby", "derrick", "description", "desert", "design", "designer", "desire", "desk", "dessert", "destiny", "destroyer", "destruction", "detail", "detainment", "detective", "detention", "determination", "development", "deviance", "device", "dew", "dhow", "diadem", "diamond", "diaphragm", "diarist", "dibble", "dickey", "dictaphone", "diction", "dictionary", "diet", "dietician", "difference", "differential", "difficulty", "digestion", "digger", "digital", "dilapidation", "dill", "dime", "dimension", "dimple", "diner", "dinghy", "dinner", "dinosaur", "diploma", "dipstick", "direction", "director", "dirndl", "dirt", "disadvantage", "disarmament", "disaster", "disco", "disconnection", "discount", "discovery", "discrepancy", "discussion", "disease", "disembodiment", "disengagement", "disguise", "disgust", "dish", "dishes", "dishwasher", "disk", "display", "disposer", "distance", "distribution", "distributor", "district", "divan", "diver", "divide", "divider", "diving", "division", "dock", "doctor", "document", "doe", "dog", "dogsled", "dogwood", "doll", "dollar", "dolman", "dolphin", "domain", "donald", "donkey", "donna", "door", "doorknob", "doorpost", "dorothy", "dory", "dot", "double", "doubling", "doubt", "doubter", "downforce", "downgrade", "downtown", "draft", "dragon", "dragonfly", "dragster", "drain", "drake", "drama", "dramaturge", "draw", "drawbridge", "drawer", "drawing", "dream", "dredger", "dress", "dresser", "dressing", "drill", "drink", "drive", "driver", "driveway", "driving", "drizzle", "dromedary", "drop", "drug", "drum", "drummer", "drunk", "dry", "dryer", "duck", "duckling", "dud", "duffel", "dugout", "dulcimer", "dumbwaiter", "dump truck", "dune buggy", "dungarees", "dungeon", "duplexer", "dust", "dust storm", "duster", "duty", "dwarf", "dwelling", "dynamo", "eagle", "ear", "eardrum", "earmuffs", "earplug", "earrings", "earth", "earthquake", "earthworm", "ease", "easel", "east", "eave", "eavesdropper", "e-book", "ecclesia", "eclipse", "ecliptic", "economics", "ecumenist", "eddy", "edge", "edger", "editor", "editorial", "education", "edward", "eel", "effacement", "effect", "effective", "efficacy", "efficiency", "effort", "egg", "egghead", "eggnog", "eggplant", "egypt", "eight", "ejector", "elbow", "election", "electrocardiogram", "element", "elephant", "elevator", "elixir", "elizabeth", "elk", "ellipse", "elm", "elongation", "embossing", "emergence", "emergent", "emery", "emotion", "emphasis", "employ", "employee", "employer", "employment", "empowerment", "emu", "encirclement", "encyclopedia", "end", "endothelium", "enemy", "energy", "engine", "engineer", "engineering", "english", "enigma", "enquiry", "entertainment", "enthusiasm", "entrance", "entry", "environment", "epauliere", "epee", "ephemera", "ephemeris", "epoch", "eponym", "epoxy", "equinox", "equipment", "era", "e-reader", "error", "escape", "espadrille", "espalier", "establishment", "estate", "estimate", "estrogen", "estuary", "ethernet", "ethiopia", "euphonium", "eurocentrism", "europe", "evaluator", "evening", "evening-wear", "event", "eviction", "evidence", "evocation", "exam", "examination", "examiner", "example", "exchange", "excitement", "exclamation", "excuse", "executor", "exhaust", "ex-husband", "exile", "existence", "exit", "expansion", "expansionism", "experience", "expert", "explanation", "exposition", "expression", "extension", "extent", "extreme", "ex-wife", "eye", "eyeball", "eyebrow", "eyebrows", "eyeglasses", "eyelash", "eyelashes", "eyelid", "eyelids", "eyeliner", "eyestrain", "face", "facelift", "facet", "facilities", "facsimile", "fact", "factor", "factory", "faculty", "fahrenheit", "failure", "fairies", "fairy", "fall", "falling-out", "familiar", "family", "fan", "fang", "fanlight", "fanny", "fanny-pack", "farm", "farmer", "fascia", "fat", "father", "father-in-law", "fatigues", "faucet", "fault", "fawn", "fax", "fear", "feast", "feather", "feature", "february", "fedelini", "fedora", "feed", "feedback", "feeling", "feet", "felony", "female", "fen", "fence", "fencing", "fender", "ferry", "ferryboat", "fertilizer", "few", "fiber", "fiberglass", "fibre", "fiction", "fiddle", "field", "fifth", "fight", "fighter", "figurine", "file", "fill", "filly", "filth", "final", "finance", "find", "finding", "fine", "finger", "fingernail", "finisher", "fir", "fire", "fireman", "fireplace", "firewall", "fish", "fishbone", "fisherman", "fishery", "fishing", "fishmonger", "fishnet", "fisting", "fix", "fixture", "flag", "flame", "flanker", "flare", "flash", "flat", "flatboat", "flavor", "flax", "fleck", "fleece", "flesh", "flight", "flintlock", "flip-flops", "flock", "flood", "floor", "floozie", "flower", "flu", "flugelhorn", "fluke", "flute", "fly", "flytrap", "foam", "fob", "focus", "fog", "fold", "folder", "fondue", "font", "food", "foot", "football", "footnote", "footrest", "foot-rest", "footstool", "foray", "force", "forearm", "forebear", "forecast", "forehead", "forest", "forestry", "forgery", "fork", "form", "formal", "format", "former", "fort", "fortnight", "fortress", "fortune", "forum", "foundation", "fountain", "fowl", "fox", "foxglove", "fragrance", "frame", "france", "fratricide", "fraudster", "frazzle", "freckle", "freedom", "freeplay", "freeze", "freezer", "freight", "freighter", "french", "freon", "fresco", "friction", "friday", "fridge", "friend", "friendship", "frigate", "fringe", "frock", "frog", "front", "frost", "frown", "fruit", "frustration", "fuel", "fulfillment", "full", "function", "fundraising", "funeral", "funny", "fur", "furnace", "furniture", "fusarium", "futon", "future", "gaffer", "gaiters", "gale", "gall-bladder", "galleon", "gallery", "galley", "gallon", "galoshes", "game", "gamebird", "gamma-ray", "gander", "gap", "garage", "garb", "garbage", "garden", "garlic", "garment", "garter", "gas", "gasoline", "gastropod", "gate", "gateway", "gather", "gauge", "gauntlet", "gazebo", "gazelle", "gear", "gearshift", "geese", "gelding", "gem", "gemini", "gemsbok", "gender", "gene", "general", "genetics", "geography", "geology", "geometry", "george", "geranium", "gerbil", "geriatrician", "german", "germany", "geyser", "ghana", "gherkin", "ghost", "giant", "gigantism", "ginseng", "giraffe", "girdle", "girl", "girlfriend", "git", "glad", "gladiolus", "gland", "glass", "glasses", "glen", "glider", "gliding", "glockenspiel", "glove", "gloves", "glue", "glut", "goal", "goat", "gobbler", "godmother", "goggles", "go-kart", "gold", "goldfish", "golf", "gondola", "gong", "good", "goodbye", "good-bye", "goodie", "goose", "gopher", "gore-tex", "gorilla", "gosling", "governance", "government", "governor", "gown", "grab-bag", "grade", "grain", "gram", "granddaughter", "grandfather", "grandmom", "grandmother", "grandson", "granny", "grape", "grapefruit", "graph", "graphic", "grass", "grasshopper", "grassland", "gray", "grease", "great", "great-grandfather", "great-grandmother", "greece", "greek", "green", "greenhouse", "grenade", "grey", "grief", "grill", "grip", "grit", "grocery", "ground", "group", "grouper", "grouse", "growth", "guarantee", "guatemalan", "guest", "guestbook", "guidance", "guide", "guilty", "guitar", "guitarist", "gum", "gumshoes", "gun", "gutter", "guy", "gym", "gymnast", "gynaecology", "gyro", "hacienda", "hacksaw", "hackwork", "hail", "hair", "haircut", "half", "half-brother", "half-sister", "halibut", "hall", "hallway", "hamaki", "hamburger", "hammer", "hammock", "hamster", "hand", "handball", "hand-holding", "handicap", "handle", "handlebar", "handmaiden", "handsaw", "hang", "harbor", "harbour", "hardboard", "hardcover", "hardening", "hardhat", "hard-hat", "hardware", "harm", "harmonica", "harmony", "harp", "harpooner", "harpsichord", "hassock", "hat", "hatbox", "hatchet", "hate", "haunt", "haversack", "hawk", "hay", "head", "headlight", "headline", "headrest", "health", "hearing", "heart", "heartache", "hearth", "hearthside", "heart-throb", "heartwood", "heat", "heater", "heaven", "heavy", "hedge", "hedgehog", "heel", "height", "heirloom", "helen", "helicopter", "helium", "hell", "hellcat", "helmet", "helo", "help", "hemp", "hen", "herb", "heron", "herring", "hexagon", "heyday", "hide", "high", "highlight", "high-rise", "highway", "hill", "himalayan", "hip", "hippodrome", "hippopotamus", "historian", "history", "hit", "hive", "hobbies", "hobbit", "hobby", "hockey", "hoe", "hog", "hold", "hole", "holiday", "home", "homework", "homogenate", "homonym", "honey", "honeybee", "honoree", "hood", "hoof", "hook", "hope", "hops", "horn", "hornet", "horse", "hose", "hosiery", "hospice", "hospital", "host", "hostel", "hostess", "hot", "hot-dog", "hotel", "hour", "hourglass", "house", "houseboat", "housing", "hovel", "hovercraft", "howitzer", "hub", "hubcap", "hugger", "human", "humidity", "humor", "hunger", "hurdler", "hurricane", "hurry", "hurt", "husband", "hut", "hutch", "hyacinth", "hybridisation", "hydrant", "hydraulics", "hydrofoil", "hydrogen", "hyena", "hygienic", "hyphenation", "hypochondria", "hypothermia", "ice", "icebreaker", "icecream", "ice-cream", "icicle", "icon", "idea", "ideal", "igloo", "ikebana", "illegal", "image", "imagination", "impact", "implement", "importance", "impress", "impression", "imprisonment", "improvement", "impudence", "impulse", "inbox", "incandescence", "inch", "income", "increase", "independence", "independent", "index", "india", "indication", "indigence", "indonesia", "industry", "infancy", "inflammation", "inflation", "information", "infusion", "inglenook", "ingrate", "initial", "initiative", "in-joke", "injury", "ink", "in-laws", "inlay", "inn", "innervation", "innocent", "input", "inquiry", "inscription", "insect", "inside", "insolence", "inspection", "inspector", "instance", "instruction", "instrument", "instrumentalist", "instrumentation", "insulation", "insurance", "insurgence", "intelligence", "intention", "interaction", "interactive", "interest", "interferometer", "interior", "interloper", "internal", "internet", "interpreter", "intervenor", "interview", "interviewer", "intestine", "intestines", "introduction", "invention", "inventor", "inventory", "investment", "invite", "invoice", "iPad", "iran", "iraq", "iridescence", "iris", "iron", "ironclad", "island", "israel", "issue", "italy", "jackal", "jacket", "jaguar", "jail", "jailhouse", "jam", "james", "january", "japan", "japanese", "jar", "jasmine", "jason", "jaw", "jeans", "jeep", "jeff", "jelly", "jellyfish", "jennifer", "jet", "jewel", "jewelry", "jiffy", "job", "jockey", "jodhpurs", "joey", "jogging", "john", "join", "joke", "joseph", "jot", "journey", "judge", "judgment", "judo", "juggernaut", "juice", "july", "jumbo", "jump", "jumper", "jumpsuit", "june", "junior", "junk", "junker", "junket", "jury", "justice", "jute", "kale", "kamikaze", "kangaroo", "karate", "karen", "kayak", "kazoo", "kendo", "kenneth", "kenya", "ketch", "ketchup", "kettle", "kettledrum", "kevin", "key", "keyboard", "keyboarding", "keystone", "kick", "kick-off", "kid", "kidney", "kidneys", "kielbasa", "kill", "kilogram", "kilometer", "kilt", "kimberly", "kimono", "kind", "king", "kingfish", "kiosk", "kiss", "kitchen", "kite", "kitten", "kitty", "kleenex", "klomps", "knee", "kneejerk", "knickers", "knife", "knife-edge", "knight", "knitting", "knot", "knowledge", "knuckle", "koala", "kohlrabi", "korean", "lab", "laborer", "lace", "lacquerware", "ladder", "lady", "ladybug", "lake", "lamb", "lamp", "lan", "lanai", "land", "landform", "landmine", "language", "lantern", "lap", "laparoscope", "lapdog", "laptop", "larch", "larder", "lark", "laryngitis", "lasagna", "latency", "latex", "lathe", "latte", "laugh", "laundry", "laura", "law", "lawn", "lawsuit", "lawyer", "layer", "lead", "leader", "leadership", "leaf", "league", "leaker", "learning", "leash", "leather", "leaver", "lecture", "leek", "leg", "legal", "legging", "legume", "lei", "lemon", "lemonade", "lemur", "length", "lentil", "leo", "leopard", "leotard", "leprosy", "let", "letter", "lettuce", "level", "lever", "leverage", "libra", "librarian", "library", "license", "lier", "life", "lift", "light", "lighting", "lightning", "lilac", "lily", "limit", "limo", "line", "linen", "liner", "link", "linseed", "lion", "lip", "lipstick", "liquid", "liquor", "lisa", "list", "literature", "litigation", "litter", "liver", "living", "lizard", "llama", "loaf", "loafer", "loan", "lobotomy", "lobster", "location", "lock", "locker", "locket", "locomotive", "locust", "loft", "log", "loggia", "loincloth", "look", "loss", "lot", "lotion", "lounge", "lout", "love", "low", "loyalty", "luck", "luggage", "lumber", "lumberman", "lunch", "luncheonette", "lunchroom", "lung", "lunge", "lute", "luttuce", "lycra", "lye", "lymphocyte", "lynx", "lyocell", "lyre", "lyric", "macadamia", "macaroni", "machine", "macrame", "macrofauna", "maelstrom", "maestro", "magazine", "magic", "magician", "maid", "maiden", "mail", "mailbox", "mailman", "maintenance", "major", "major-league", "makeup", "malaysia", "male", "mall", "mallet", "mambo", "mammoth", "man", "management", "manager", "mandarin", "mandolin", "mangrove", "manhunt", "maniac", "manicure", "manner", "manor", "mansard", "manservant", "mansion", "mantel", "mantle", "mantua", "manufacturer", "manx", "map", "maple", "maraca", "maracas", "marble", "march", "mare", "margaret", "margin", "maria", "mariachi", "marimba", "mark", "market", "marketing", "marksman", "marriage", "marsh", "marshland", "marxism", "mary", "mascara", "mask", "mass", "massage", "master", "mastication", "mastoid", "mat", "match", "material", "math", "mattock", "mattress", "maximum", "may", "maybe", "mayonnaise", "mayor", "meal", "meaning", "measure", "measurement", "meat", "mechanic", "media", "medicine", "medium", "meet", "meeting", "megalomaniac", "melody", "member", "membership", "memory", "men", "menorah", "mention", "menu", "mercury", "mess", "message", "metal", "metallurgist", "meteor", "meteorology", "meter", "methane", "method", "methodology", "metro", "metronome", "mexican", "mexico", "mezzanine", "mice", "michael", "michelle", "microlending", "microwave", "mid-course", "middle", "middleman", "midi", "midline", "midnight", "midwife", "might", "migrant", "mile", "milk", "milkshake", "millennium", "millimeter", "millisecond", "mime", "mimosa", "mind", "mine", "mini", "minibus", "minion", "mini-skirt", "minister", "minor", "minor-league", "mint", "minute", "mirror", "miscarriage", "miscommunication", "misfit", "misogyny", "misplacement", "misreading", "missile", "mission", "mist", "mistake", "mister", "miter", "mitten", "mix", "mixer", "mixture", "moat", "mobile", "moccasins", "mocha", "mode", "model", "modem", "mole", "mom", "moment", "monastery", "monasticism", "monday", "money", "monger", "monitor", "monkey", "monocle", "monotheism", "monsoon", "monster", "month", "mood", "moon", "moonscape", "moonshine", "mop", "Mormon", "morning", "morocco", "morsel", "mortise", "mosque", "mosquito", "most", "motel", "moth", "mother", "mother-in-law", "motion", "motor", "motorboat", "motorcar", "motorcycle", "mound", "mountain", "mouse", "mouser", "mousse", "moustache", "mouth", "mouton", "move", "mover", "movie", "mower", "mud", "mug", "mukluk", "mule", "multimedia", "muscle", "musculature", "museum", "music", "music-box", "musician", "music-making", "mustache", "mustard", "mutt", "myanmar", "mycoplasma", "nail", "name", "naming", "nancy", "nanoparticle", "napkin", "narcissus", "nation", "naturalisation", "nature", "neat", "neck", "necklace", "necktie", "necromancer", "need", "needle", "negligee", "negotiation", "neologism", "neon", "nepal", "nephew", "nerve", "nest", "net", "netball", "netbook", "netsuke", "network", "neurobiologist", "neuropathologist", "neuropsychiatry", "news", "newspaper", "newsprint", "newsstand", "nexus", "nic", "nicety", "niche", "nickel", "niece", "nigeria", "night", "nightclub", "nightgown", "nightingale", "nightlight", "nitrogen", "node", "noise", "nonbeliever", "nonconformist", "nondisclosure", "noodle", "normal", "norse", "north", "north america", "north korea", "nose", "note", "notebook", "notice", "notify", "notoriety", "nougat", "novel", "november", "nudge", "number", "numeracy", "numeric", "numismatist", "nurse", "nursery", "nurture", "nut", "nylon", "oak", "oar", "oasis", "oatmeal", "obi", "objective", "obligation", "oboe", "observation", "observatory", "occasion", "occupation", "ocean", "ocelot", "octagon", "octave", "octavo", "octet", "october", "octopus", "odometer", "oeuvre", "offence", "offer", "office", "official", "off-ramp", "oil", "okra", "oldie", "olive", "omega", "omelet", "oncology", "one", "onion", "open", "opening", "opera", "operation", "ophthalmologist", "opinion", "opium", "opossum", "opportunist", "opportunity", "opposite", "option", "orange", "orangutan", "orator", "orchard", "orchestra", "orchid", "order", "ordinary", "ordination", "organ", "organisation", "organization", "original", "ornament", "osmosis", "osprey", "ostrich", "others", "otter", "ottoman", "ounce", "outback", "outcome", "outfit", "outhouse", "outlay", "output", "outrigger", "outset", "outside", "oval", "ovary", "oven", "overcharge", "overclocking", "overcoat", "overexertion", "overflight", "overnighter", "overshoot", "owl", "owner", "ox", "oxen", "oxford", "oxygen", "oyster", "pacemaker", "pack", "package", "packet", "pad", "paddle", "paddock", "page", "pagoda", "pail", "pain", "paint", "painter", "painting", "paintwork", "pair", "pajama", "pajamas", "pakistan", "paleontologist", "paleontology", "palm", "pamphlet", "pan", "pancake", "pancreas", "panda", "panic", "pannier", "panpipe", "pansy", "panther", "panties", "pantry", "pants", "pantsuit", "panty", "pantyhose", "paper", "paperback", "parable", "parachute", "parade", "parallelogram", "paramedic", "parcel", "parchment", "parent", "parentheses", "park", "parka", "parrot", "parsnip", "part", "participant", "particle", "particular", "partner", "partridge", "party", "passage", "passbook", "passenger", "passion", "passive", "pasta", "paste", "pastor", "pastoralist", "pastry", "patch", "path", "patience", "patient", "patina", "patio", "patriarch", "patricia", "patrimony", "patriot", "patrol", "pattern", "paul", "pavement", "pavilion", "paw", "pawnshop", "payee", "payment", "pea", "peace", "peach", "peacoat", "peacock", "peak", "peanut", "pear", "pearl", "pedal", "pedestrian", "pediatrician", "peen", "peer", "peer-to-peer", "pegboard", "pelican", "pelt", "pen", "penalty", "pencil", "pendant", "pendulum", "penicillin", "pension", "pentagon", "peony", "people", "pepper", "percentage", "perception", "perch", "performance", "perfume", "period", "periodical", "peripheral", "permafrost", "permission", "permit", "perp", "person", "personality", "perspective", "peru", "pest", "pet", "petal", "petticoat", "pew", "pharmacist", "pharmacopoeia", "phase", "pheasant", "philippines", "philosopher", "philosophy", "phone", "photo", "photographer", "phrase", "physical", "physician", "physics", "pianist", "piano", "piccolo", "pick", "pickax", "picket", "pickle", "picture", "pie", "piece", "pier", "piety", "pig", "pigeon", "pike", "pile", "pilgrimage", "pillbox", "pillow", "pilot", "pimp", "pimple", "pin", "pinafore", "pince-nez", "pine", "pineapple", "pinecone", "ping", "pink", "pinkie", "pinstripe", "pint", "pinto", "pinworm", "pioneer", "pipe", "piracy", "piranha", "pisces", "piss", "pitch", "pitching", "pith", "pizza", "place", "plain", "plane", "planet", "plant", "plantation", "planter", "plaster", "plasterboard", "plastic", "plate", "platform", "platinum", "platypus", "play", "player", "playground", "playroom", "pleasure", "pleated", "plier", "plot", "plough", "plover", "plow", "plowman", "plume", "plunger", "plywood", "pneumonia", "pocket", "pocketbook", "pocket-watch", "poem", "poet", "poetry", "poignance", "point", "poison", "poisoning", "poland", "pole", "polenta", "police", "policeman", "policy", "polish", "politician", "politics", "pollution", "polo", "polyester", "pompom", "poncho", "pond", "pony", "poof", "pool", "popcorn", "poppy", "popsicle", "population", "populist", "porch", "porcupine", "port", "porter", "portfolio", "porthole", "position", "positive", "possession", "possibility", "postage", "postbox", "poster", "pot", "potato", "potential", "potty", "pouch", "poultry", "pound", "pounding", "powder", "power", "precedent", "precipitation", "preface", "preference", "prelude", "premeditation", "premier", "preoccupation", "preparation", "presence", "presentation", "president", "pressroom", "pressure", "pressurisation", "price", "pride", "priest", "priesthood", "primary", "primate", "prince", "princess", "principal", "print", "printer", "priority", "prison", "prize", "prizefight", "probation", "problem", "procedure", "process", "processing", "produce", "producer", "product", "production", "profession", "professional", "professor", "profit", "program", "project", "promotion", "prompt", "proof-reader", "propane", "property", "proposal", "prose", "prosecution", "protection", "protest", "protocol", "prow", "pruner", "pseudoscience", "psychiatrist", "psychoanalyst", "psychologist", "psychology", "ptarmigan", "publisher", "pudding", "puddle", "puffin", "pull", "pulley", "puma", "pump", "pumpkin", "pumpkinseed", "punch", "punishment", "pupa", "pupil", "puppy", "purchase", "puritan", "purple", "purpose", "purse", "push", "pusher", "put", "pvc", "pyjama", "pyramid", "quadrant", "quail", "quality", "quantity", "quart", "quarter", "quartz", "queen", "question", "quicksand", "quiet", "quill", "quilt", "quince", "quit", "quiver", "quotation", "rabbi", "rabbit", "raccoon", "race", "racer", "racing", "racist", "rack", "radar", "radiator", "radio", "radiosonde", "radish", "raffle", "raft", "rag", "rage", "rail", "railway", "raiment", "rain", "rainbow", "raincoat", "rainmaker", "rainstorm", "raise", "rake", "ram", "rambler", "ramie", "ranch", "random", "randomisation", "range", "rank", "raspberry", "rat", "rate", "ratio", "raven", "ravioli", "raw", "rawhide", "ray", "rayon", "reactant", "reaction", "read", "reading", "reality", "reamer", "rear", "reason", "receipt", "reception", "recess", "recipe", "recliner", "recognition", "recommendation", "record", "recorder", "recording", "recover", "recruit", "rectangle", "red", "redesign", "rediscovery", "reduction", "reef", "refectory", "reflection", "refrigerator", "refund", "refuse", "region", "register", "regret", "regular", "regulation", "reindeer", "reinscription", "reject", "relation", "relationship", "relative", "religion", "relish", "reminder", "rent", "repair", "reparation", "repeat", "replace", "replacement", "replication", "reply", "report", "representative", "reprocessing", "republic", "reputation", "request", "requirement", "resale", "research", "resident", "resist", "resolution", "resource", "respect", "respite", "response", "responsibility", "rest", "restaurant", "result", "retailer", "rethinking", "retina", "retouch", "return", "reveal", "revenant", "revenue", "review", "revolution", "revolve", "revolver", "reward", "rheumatism", "rhinoceros", "rhyme", "rhythm", "rice", "richard", "riddle", "ride", "rider", "ridge", "rifle", "right", "rim", "ring", "ringworm", "ripple", "rise", "riser", "risk", "river", "riverbed", "rivulet", "road", "roadway", "roast", "robe", "robert", "robin", "rock", "rocker", "rocket", "rocket-ship", "rod", "role", "roll", "roller", "romania", "ronald", "roof", "room", "rooster", "root", "rope", "rose", "rostrum", "rotate", "roundabout", "route", "router", "routine", "row", "rowboat", "royal", "rub", "rubber", "rubric", "ruckus", "ruffle", "rugby", "rule", "run", "runaway", "runner", "russia", "rutabaga", "ruth", "sabre", "sack", "sad", "saddle", "safe", "safety", "sage", "sagittarius", "sail", "sailboat", "sailor", "salad", "salary", "sale", "salesman", "salmon", "salon", "saloon", "salt", "samovar", "sampan", "sample", "samurai", "sand", "sandals", "sandbar", "sandra", "sandwich", "santa", "sarah", "sardine", "sari", "sarong", "sash", "satellite", "satin", "satire", "satisfaction", "saturday", "sauce", "saudi arabia", "sausage", "save", "saving", "savior", "saviour", "saw", "saxophone", "scale", "scallion", "scanner", "scarecrow", "scarf", "scarification", "scene", "scent", "schedule", "scheme", "schizophrenic", "schnitzel", "school", "schoolhouse", "schooner", "science", "scimitar", "scissors", "scooter", "score", "scorn", "scorpio", "scorpion", "scow", "scraper", "screamer", "screen", "screenwriting", "screw", "screwdriver", "screw-up", "scrim", "scrip", "sculpting", "sculpture", "sea", "seagull", "seal", "seaplane", "search", "seashore", "season", "seat", "second", "secretariat", "secretary", "section", "sectional", "sector", "secure", "security", "seed", "seeder", "segment", "select", "selection", "self", "sell", "semicircle", "semicolon", "senator", "sense", "sentence", "sepal", "september", "septicaemia", "series", "servant", "server", "service", "session", "set", "setting", "settler", "sewer", "sex", "shack", "shade", "shadow", "shadowbox", "shake", "shakedown", "shaker", "shallot", "shame", "shampoo", "shanty", "shape", "share", "shark", "sharon", "shawl", "shearling", "shears", "sheath", "shed", "sheep", "sheet", "shelf", "shell", "sherry", "shield", "shift", "shin", "shine", "shingle", "ship", "shirt", "shirtdress", "shoat", "shock", "shoe", "shoehorn", "shoe-horn", "shoelace", "shoemaker", "shoes", "shoestring", "shofar", "shoot", "shootdown", "shop", "shopper", "shopping", "shore", "shortage", "shorts", "shortwave", "shot", "shoulder", "shovel", "show", "shower", "show-stopper", "shred", "shrimp", "shrine", "siamese", "sibling", "sick", "side", "sideboard", "sideburns", "sidecar", "sidestream", "sidewalk", "siding", "sign", "signature", "signet", "significance", "signup", "silica", "silk", "silkworm", "sill", "silo", "silver", "simple", "sing", "singer", "single", "sink", "sir", "sister", "sister-in-law", "sit", "sitar", "situation", "size", "skate", "skiing", "skill", "skin", "skirt", "skulduggery", "skull", "skullcap", "skullduggery", "skunk", "sky", "skylight", "skyscraper", "skywalk", "slapstick", "slash", "slave", "sled", "sledge", "sleep", "sleet", "sleuth", "slice", "slider", "slime", "slip", "slipper", "slippers", "slope", "sloth", "smash", "smell", "smelting", "smile", "smock", "smog", "smoke", "smuggling", "snail", "snake", "snakebite", "sneakers", "sneeze", "snob", "snorer", "snow", "snowboarding", "snowflake", "snowman", "snowmobiling", "snowplow", "snowstorm", "snowsuit", "snuggle", "soap", "soccer", "society", "sociology", "sock", "socks", "soda", "sofa", "softball", "softdrink", "softening", "software", "soil", "soldier", "solid", "solitaire", "solution", "sombrero", "somersault", "somewhere", "son", "song", "songbird", "sonnet", "soot", "soprano", "sorbet", "sort", "soulmate", "sound", "soup", "source", "sourwood", "sousaphone", "south", "south africa", "south america", "south korea", "sow", "soy", "soybean", "space", "spacing", "spade", "spaghetti", "spain", "spandex", "spank", "spark", "sparrow", "spasm", "speaker", "speakerphone", "spear", "special", "specialist", "specific", "spectacle", "spectacles", "spectrograph", "speech", "speedboat", "spend", "sphere", "sphynx", "spider", "spike", "spinach", "spine", "spiral", "spirit", "spiritual", "spite", "spleen", "split", "sponge", "spoon", "sport", "spot", "spotlight", "spray", "spread", "spring", "sprinter", "sprout", "spruce", "spume", "spur", "spy", "square", "squash", "squatter", "squeegee", "squid", "squirrel", "stable", "stack", "stacking", "stadium", "staff", "stag", "stage", "stain", "stair", "staircase", "stallion", "stamen", "stamina", "stamp", "stance", "standoff", "star", "start", "starter", "state", "statement", "station", "station-wagon", "statistic", "statistician", "steak", "steal", "steam", "steamroller", "steel", "steeple", "stem", "stencil", "step", "step-aunt", "step-brother", "stepdaughter", "step-daughter", "step-father", "step-grandfather", "step-grandmother", "stepmother", "step-mother", "stepping-stone", "steps", "step-sister", "stepson", "step-son", "step-uncle", "steven", "stew", "stick", "stiletto", "still", "stinger", "stitch", "stock", "stocking", "stockings", "stock-in-trade", "stole", "stomach", "stone", "stonework", "stool", "stop", "stopsign", "stopwatch", "storage", "store", "storey", "storm", "story", "storyboard", "story-telling", "stove", "strait", "stranger", "strap", "strategy", "straw", "strawberry", "stream", "street", "streetcar", "stress", "stretch", "strike", "string", "strip", "structure", "struggle", "stud", "student", "studio", "study", "stuff", "stumbling", "sturgeon", "style", "styling", "stylus", "subcomponent", "subconscious", "submarine", "subroutine", "subsidence", "substance", "suburb", "subway", "success", "suck", "sudan", "suede", "suffocation", "sugar", "suggestion", "suit", "suitcase", "sultan", "summer", "sun", "sunbeam", "sunbonnet", "sunday", "sundial", "sunflower", "sunglasses", "sunlamp", "sunroom", "sunshine", "supermarket", "supply", "support", "supporter", "suppression", "surface", "surfboard", "surgeon", "surgery", "surname", "surprise", "susan", "sushi", "suspect", "suspenders", "sustainment", "SUV", "swallow", "swamp", "swan", "swath", "sweat", "sweater", "sweats", "sweatshirt", "sweatshop", "sweatsuit", "swedish", "sweets", "swell", "swim", "swimming", "swimsuit", "swing", "swiss", "switch", "switchboard", "swivel", "sword", "swordfish", "sycamore", "sympathy", "syndicate", "synergy", "synod", "syria", "syrup", "system", "tabby", "tabernacle", "table", "tablecloth", "tabletop", "tachometer", "tackle", "tadpole", "tail", "tailor", "tailspin", "taiwan", "tale", "talk", "tam", "tambour", "tambourine", "tam-o'-shanter", "tandem", "tangerine", "tank", "tanker", "tankful", "tank-top", "tanzania", "tap", "target", "tassel", "taste", "tatami", "tattler", "tattoo", "taurus", "tavern", "tax", "taxi", "taxicab", "tea", "teacher", "teaching", "team", "tear", "technician", "technologist", "technology", "teen", "teeth", "telephone", "telescreen", "teletype", "television", "teller", "temp", "temper", "temperature", "temple", "tempo", "temporariness", "temptress", "tendency", "tenement", "tennis", "tenor", "tension", "tent", "tepee", "term", "terracotta", "terrapin", "territory", "test", "text", "textbook", "texture", "thailand", "thanks", "thaw", "theater", "theism", "theme", "theoretician", "theory", "therapist", "thermals", "thermometer", "thigh", "thing", "thinking", "thistle", "thomas", "thong", "thongs", "thorn", "thought", "thread", "thrill", "throat", "throne", "thrush", "thumb", "thunder", "thunderbolt", "thunderhead", "thunderstorm", "thursday", "tiara", "tic", "ticket", "tie", "tiger", "tight", "tights", "tile", "till", "timbale", "time", "timeline", "timeout", "timer", "timpani", "tin", "tinderbox", "tinkle", "tintype", "tip", "tire", "tissue", "titanium", "title", "toad", "toast", "toe", "toenail", "toga", "togs", "toilet", "tom", "tomato", "tomography", "tomorrow", "tom-tom", "ton", "tongue", "toot", "tooth", "toothbrush", "toothpaste", "toothpick", "top", "top-hat", "topic", "topsail", "toque", "torchiere", "toreador", "tornado", "torso", "tortellini", "tortoise", "tosser", "total", "tote", "touch", "tough", "tough-guy", "tour", "tourist", "towel", "tower", "town", "townhouse", "tow-truck", "toy", "trachoma", "track", "tracksuit", "tractor", "trade", "tradition", "traditionalism", "traffic", "trail", "trailer", "train", "trainer", "training", "tram", "tramp", "transaction", "translation", "transmission", "transom", "transport", "transportation", "trapdoor", "trapezium", "trapezoid", "trash", "travel", "tray", "treatment", "tree", "trellis", "tremor", "trench", "trial", "triangle", "tribe", "trick", "trigonometry", "trim", "trinket", "trip", "tripod", "trolley", "trombone", "trooper", "trouble", "trousers", "trout", "trove", "trowel", "truck", "truckit", "trumpet", "trunk", "trust", "truth", "try", "t-shirt", "tsunami", "tub", "tuba", "tube", "tuesday", "tugboat", "tulip", "tummy", "tuna", "tune", "tune-up", "tunic", "tunnel", "turban", "turkey", "turkish", "turn", "turnip", "turnover", "turnstile", "turret", "turtle", "tussle", "tutu", "tuxedo", "tv", "twig", "twilight", "twine", "twist", "twister", "two", "typewriter", "typhoon", "tyvek", "uganda", "ukraine", "ukulele", "umbrella", "unblinking", "uncle", "underclothes", "underground", "underneath", "underpants", "underpass", "undershirt", "understanding", "underwear", "underwire", "unibody", "uniform", "union", "unit", "united kingdom", "university", "urn", "use", "user", "usher", "utensil", "uzbekistan", "vacation", "vacuum", "vagrant", "valance", "valley", "valuable", "value", "van", "vane", "vanity", "variation", "variety", "vase", "vast", "vault", "vaulting", "veal", "vegetable", "vegetarian", "vehicle", "veil", "vein", "veldt", "vellum", "velodrome", "velvet", "venezuela", "venezuelan", "venom", "veranda", "verdict", "vermicelli", "verse", "version", "vertigo", "verve", "vessel", "vest", "vestment", "vibe", "vibraphone", "vibration", "video", "vietnam", "view", "villa", "village", "vineyard", "vinyl", "viola", "violet", "violin", "virginal", "virgo", "virtue", "virus", "viscose", "vise", "vision", "visit", "visitor", "visor", "vixen", "voice", "volcano", "volleyball", "volume", "voyage", "vulture", "wad", "wafer", "waffle", "waist", "waistband", "waiter", "waitress", "walk", "walker", "walkway", "wall", "wallaby", "wallet", "walnut", "walrus", "wampum", "wannabe", "war", "warden", "warlock", "warm-up", "warning", "wash", "washbasin", "washcloth", "washer", "washtub", "wasp", "waste", "wastebasket", "watch", "watchmaker", "water", "waterbed", "waterfall", "waterskiing", "waterspout", "wave", "wax", "way", "weakness", "wealth", "weapon", "weasel", "weather", "web", "wedding", "wedge", "wednesday", "weed", "weeder", "weedkiller", "week", "weekend", "weekender", "weight", "weird", "well", "west", "western", "wet-bar", "wetsuit", "whale", "wharf", "wheel", "whip", "whirlpool", "whirlwind", "whisker", "whiskey", "whistle", "white", "whole", "wholesale", "wholesaler", "whorl", "wife", "wilderness", "will", "william", "willow", "wind", "windage", "wind-chime", "window", "windscreen", "windshield", "wine", "wing", "wingman", "wingtip", "winner", "winter", "wire", "wiseguy", "wish", "wisteria", "witch", "witch-hunt", "withdrawal", "witness", "wolf", "woman", "wombat", "women", "wood", "woodland", "woodshed", "woodwind", "wool", "woolen", "word", "work", "workbench", "worker", "workhorse", "worklife", "workshop", "world", "worm", "worthy", "wound", "wrap", "wraparound", "wrecker", "wren", "wrench", "wrestler", "wrinkle", "wrist", "writer", "writing", "wrong", "xylophone", "yacht", "yak", "yam", "yard", "yarmulke", "yarn", "yawl", "year", "yellow", "yesterday", "yew", "yin", "yogurt", "yoke", "young", "youth", "yurt", "zampone", "zebra", "zebrafish", "zephyr", "ziggurat", "zinc", "zipper", "zither", "zone", "zoo", "zoologist", "zoology", "zoot-suit", "zucchinis" ]
15,801
47,391
0.625973
[ "MIT" ]
flapperleenie/thevocals
python/words.py
47,403
Python
""" This file offers the methods to automatically retrieve the graph Elusimicrobia bacterium RIFOXYC2_FULL_34_12. The graph is automatically retrieved from the STRING repository. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen import Graph # pylint: disable=import-error def ElusimicrobiaBacteriumRifoxyc2Full3412( directed: bool = False, preprocess: bool = True, load_nodes: bool = True, verbose: int = 2, cache: bool = True, cache_path: str = "graphs/string", version: str = "links.v11.5", **additional_graph_kwargs: Dict ) -> Graph: """Return new instance of the Elusimicrobia bacterium RIFOXYC2_FULL_34_12 graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False Wether to load the graph as directed or undirected. By default false. preprocess: bool = True Whether to preprocess the graph to be loaded in optimal time and memory. load_nodes: bool = True, Whether to load the nodes vocabulary or treat the nodes simply as a numeric range. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache: bool = True Whether to use cache, i.e. download files only once and preprocess them only once. cache_path: str = "graphs" Where to store the downloaded graphs. version: str = "links.v11.5" The version of the graph to retrieve. The available versions are: - homology.v11.5 - physical.links.v11.5 - links.v11.5 additional_graph_kwargs: Dict Additional graph kwargs. Returns ----------------------- Instace of Elusimicrobia bacterium RIFOXYC2_FULL_34_12 graph. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ return AutomaticallyRetrievedGraph( graph_name="ElusimicrobiaBacteriumRifoxyc2Full3412", repository="string", version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
34.12381
223
0.686855
[ "MIT" ]
AnacletoLAB/ensmallen
bindings/python/ensmallen/datasets/string/elusimicrobiabacteriumrifoxyc2full3412.py
3,583
Python
import os import math import argparse import pickle import torch import torch.distributed as dist from torch.nn import Parameter import torch.nn.functional as F from sparse_coo_tensor_cpp import sparse_coo_tensor_gpu, spmm_gpu import utils from dist_data import DistData run = 0 def outer_product2(inputs, ag): torch.cuda.synchronize() g_timer.start(f'gcn_mm_ep{cur_epoch}') grad_weight = torch.mm(inputs, ag) # (H^(l-1))^T * (A * G^l) torch.cuda.synchronize() g_timer.stop(f'gcn_mm_ep{cur_epoch}')#, 'comp') g_timer.start(f'gcn_allreduce_ep{cur_epoch}') dist.all_reduce(grad_weight, op=dist.ReduceOp.SUM, group=g_env.world_group) torch.cuda.synchronize() g_timer.stop(f'gcn_allreduce_ep{cur_epoch}')#, 'comm') return grad_weight def p2p_broadcast(t, src): for dst in range(g_env.world_size): if src == dst or g_env.rank not in (src, dst): # g_logger.log('p2p bcast skip', src, dst) continue dst_adj_nz_col = g_data.nz_col_dict[(dst, src)] # non zero needed_rows_idx = dst_adj_nz_col if g_env.rank == src: p2p_buf = t[needed_rows_idx] elif g_env.rank == dst: p2p_buf = torch.zeros((needed_rows_idx.size(0), t.size(1)), device=g_env.device) # g_logger.log('p2p data ready', src, dst, 'needed size',p2p_buf.size(0), 'full size', t.size(0)) dist.broadcast(p2p_buf, src, group=g_env.p2p_group_dict[(src, dst)]) # g_logger.log('p2p bcast done', src, dst) if g_env.rank == dst: t[needed_rows_idx] = p2p_buf # g_logger.log('p2p dst done', src, dst) return cur_epoch = 0 cache_features = [None]*8 cache_layer2 = [None]*8 cache_backward_layer1 = [None]*8 cache_backward_layer2 = [None]*8 cache_enabled = True def broad_func(node_count, am_partitions, inputs, btype=None): global cache_features device = g_env.device n_per_proc = math.ceil(float(node_count) / g_env.world_size) z_loc = torch.zeros((am_partitions[0].size(0), inputs.size(1)), device=device) inputs_recv = torch.zeros((n_per_proc, inputs.size(1)), device=device) for i in range(g_env.world_size): layer1_use_cache = cur_epoch >= 1 layer2_use_cache = cur_epoch >= 50 and cur_epoch % 5 != 0 # layer2_use_cache = False backward_layer2_use_cache = cur_epoch >= 50 and cur_epoch % 5 != 0 backward_layer2_use_cache = False backward_layer1_use_cache = cur_epoch >= 50 and cur_epoch % 5 != 0 backward_layer1_use_cache = False if i == g_env.rank: inputs_recv = inputs.clone() elif i == g_env.world_size - 1: inputs_recv = torch.zeros((am_partitions[i].size(1), inputs.size(1)), device=device) g_timer.barrier_all() torch.cuda.synchronize() g_timer.start(f'gcn_broadcast_{btype}_ep{cur_epoch}') if not cache_enabled: dist.broadcast(inputs_recv, src=i, group=g_env.world_group) else: if btype == 'layer1': if layer1_use_cache: # g_logger.log(cur_epoch, i, 'use cache', btype) if g_env.rank != i: # g_logger.log(cur_epoch, i, 'no copy', btype, type(inputs_recv), inputs_recv.size()) inputs_recv = cache_features[i] else: # g_logger.log(cur_epoch, i, 'do nothing', btype, type(inputs_recv), inputs_recv.size()) pass else: dist.broadcast(inputs_recv, src=i, group=g_env.world_group) # if cache_features[i] is not None: # g_logger.log(cur_epoch, i,'normal broadcast', torch.sum(inputs_recv), torch.sum(cache_features[i] )) cache_features[i] = inputs_recv.clone() elif btype == 'layer2': if layer2_use_cache: if g_env.rank != i: inputs_recv = cache_layer2[i] else: dist.broadcast(inputs_recv, src=i, group=g_env.world_group) # if cache_layer2[i] is not None: # g_logger.log(cur_epoch, i,'normal broadcast', torch.sum(inputs_recv), torch.sum(cache_layer2[i] )) cache_layer2[i] = inputs_recv.clone() elif btype == 'backward_layer2': if backward_layer2_use_cache: if g_env.rank != i: inputs_recv = cache_backward_layer2[i] else: dist.broadcast(inputs_recv, src=i, group=g_env.world_group) cache_backward_layer2[i] = inputs_recv.clone() elif btype == 'backward_layer1': if backward_layer1_use_cache: if g_env.rank != i: inputs_recv = cache_backward_layer1[i] else: dist.broadcast(inputs_recv, src=i, group=g_env.world_group) cache_backward_layer1[i] = inputs_recv.clone() else: dist.broadcast(inputs_recv, src=i, group=g_env.world_group) # p2p_broadcast(inputs_recv, i) torch.cuda.synchronize() # comm or comp? g_timer.stop(f'gcn_broadcast_{btype}_ep{cur_epoch}')#,'comm') g_logger.log(f'[gcn_broadcast_{btype}_ep{cur_epoch}] size: {utils.mem_report(inputs_recv)} MBytes') g_timer.barrier_all() torch.cuda.synchronize() g_timer.start(f'gcn_spmm_ep{cur_epoch}') spmm_gpu(am_partitions[i].indices()[0].int(), am_partitions[i].indices()[1].int(), am_partitions[i].values(), am_partitions[i].size(0), am_partitions[i].size(1), inputs_recv, z_loc) torch.cuda.synchronize() g_timer.stop(f'gcn_spmm_ep{cur_epoch}')#, 'comp') g_timer.barrier_all() return z_loc class GCNFunc(torch.autograd.Function): @staticmethod def forward(ctx, inputs, weight, adj_matrix, am_partitions, activation_func, btype): ctx.save_for_backward(inputs, weight, adj_matrix) ctx.am_partitions = am_partitions ctx.activation_func = activation_func ctx.btype = btype z = broad_func(adj_matrix.size(0), am_partitions, inputs, btype=btype) torch.cuda.synchronize() g_timer.start(f'gcn_mm_ep{cur_epoch}') z = torch.mm(z, weight) torch.cuda.synchronize() g_timer.stop(f'gcn_mm_ep{cur_epoch}') #, 'comp') z.requires_grad = True ctx.z = z return activation_func(z) @staticmethod def backward(ctx, grad_output): inputs, weight, adj_matrix = ctx.saved_tensors btype = ctx.btype am_partitions = ctx.am_partitions with torch.set_grad_enabled(True): func_eval = ctx.activation_func(ctx.z) sigmap = torch.autograd.grad(outputs=func_eval, inputs=ctx.z, grad_outputs=grad_output)[0] grad_output = sigmap # First backprop equation ag = broad_func(adj_matrix.size(0), am_partitions, grad_output, btype='backward_'+btype) torch.cuda.synchronize() g_timer.start(f'gcn_mm_ep{cur_epoch}') grad_input = torch.mm(ag, weight.t()) torch.cuda.synchronize() g_timer.stop(f'gcn_mm_ep{cur_epoch}')#, 'comp') # Second backprop equation (reuses the A * G^l computation) grad_weight = outer_product2(inputs.t(), ag) return grad_input, grad_weight, None, None, None, None, None, None def train(inputs, weight1, weight2, adj_matrix, am_partitions, optimizer, local_train_mask, local_labels, device): outputs = GCNFunc.apply(inputs, weight1, adj_matrix, am_partitions, F.relu, 'layer1') outputs = GCNFunc.apply(outputs, weight2, adj_matrix, am_partitions, lambda x:F.log_softmax(x, dim=1), 'layer2') optimizer.zero_grad() if list(local_labels[local_train_mask].size())[0] > 0: loss = F.nll_loss(outputs[local_train_mask], local_labels[local_train_mask]) loss.backward() else: fake_loss = (outputs * torch.cuda.FloatTensor(outputs.size(), device=device).fill_(0)).sum() fake_loss.backward() optimizer.step() return outputs def test(outputs, vertex_count): logits, accs = outputs, [] for mask in [g_data.g.train_mask, g_data.g.val_mask, g_data.g.test_mask]: pred = logits[mask].max(1)[1] acc = pred.eq(g_data.g.labels[mask]).sum().item() / mask.sum().item() accs.append(acc) return accs def main(): global run global cur_epoch inputs_loc, adj_matrix_loc, am_pbyp = g_data.local_features, g_data.local_adj, g_data.local_adj_parts # am_partition: adjacency matrix partition device = g_env.device torch.cuda.synchronize() for i in range(args.run_count): run = i torch.manual_seed(0) weight1_nonleaf = torch.rand(g_data.g.num_features, args.mid_layer, requires_grad=True, device=device) weight1_nonleaf.retain_grad() weight2_nonleaf = torch.rand(args.mid_layer, g_data.g.num_classes, requires_grad=True, device=device) weight2_nonleaf.retain_grad() weight1 = Parameter(weight1_nonleaf) weight2 = Parameter(weight2_nonleaf) optimizer = torch.optim.Adam([weight1, weight2], lr=0.01) local_train_mask = torch.split(g_data.g.train_mask.bool(), am_pbyp[0].size(0), dim=0)[g_env.rank] local_labels = torch.split(g_data.g.labels, am_pbyp[0].size(0), dim=0)[g_env.rank] for epoch in range(args.epochs): cur_epoch = epoch g_timer.start('train') outputs = train(inputs_loc, weight1, weight2, adj_matrix_loc, am_pbyp, optimizer, local_train_mask, local_labels, device) g_timer.stop('train') # if epoch%10==0: # g_logger.log("Epoch: {:03d}".format(epoch), oneline=True) if (epoch+1)%5==0: n_per_proc = math.ceil(g_data.g.features.size(0) / g_env.world_size) output_parts = [torch.zeros(n_per_proc, g_data.g.num_classes, device=g_env.device) for i in range(g_env.world_size)] if outputs.size(0) != n_per_proc: pad_row = n_per_proc - outputs.size(0) outputs = torch.cat((outputs, torch.cuda.FloatTensor(pad_row, g_data.g.num_classes, device=g_env.device)), dim=0) dist.all_gather(output_parts, outputs) # output_parts[g_env.rank] = outputs padding = g_data.g.features.size(0) - n_per_proc * (g_env.world_size - 1) output_parts[g_env.world_size - 1] = output_parts[g_env.world_size - 1][:padding,:] outputs = torch.cat(output_parts, dim=0) train_acc, val_acc, test_acc = test(outputs, am_pbyp[0].size(1)) g_logger.log( 'Epoch: {:03d}/{:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'.format(epoch+1, args.epochs, train_acc, val_acc, test_acc), rank=0) # g_logger.log(g_timer.summary_all(), rank=0) return outputs if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--local_rank", type=int) parser.add_argument("--world_size", type=int, default=8) parser.add_argument("--backend", type=str, default="nccl") parser.add_argument("--epochs", type=int, default=200) parser.add_argument("--run_count", type=int, default=1) parser.add_argument("--graphname", type=str, default="SmallerReddit") parser.add_argument("--timing", type=bool, default=True) parser.add_argument("--mid_layer", type=int, default=16) args = parser.parse_args() print(args) g_env = utils.DistEnv(args.local_rank, args.world_size, args.backend) g_timer = utils.DistTimer(g_env) g_logger = utils.DistLogger(g_env) g_logger.log(f'dist env inited: {g_env.backend} {g_env.world_size}') g_data = DistData(g_env, args.graphname) g_logger.log(f'dist data inited: {args.graphname}') main() timer_log = g_timer.sync_duration_dicts() mem_log = g_logger.sync_duration_dicts() if args.local_rank == 0: with open('./timer.log', 'wb') as f: pickle.dump(timer_log, f) with open('./mem.log', 'wb') as f: pickle.dump(mem_log, f)
39.9
162
0.627051
[ "MIT" ]
chineshboy/dist-gnn
1D_CAGNET/dist_1d.py
12,369
Python
#This code aims to return an "n" result of the Fibonnachi Sequence. #Below are two fucntions, each of which return the same results by following different algorithms. def getFibNExt (n): fibAr = [0, 1, 0] for i in range(n-1): fibAr[2] = fibAr[0] fibAr[0] += fibAr[1] fibAr[1] = fibAr[2] return fibAr[0] def getFibNSimp (n): if ( n == 2): return 1 elif ( n <= 1): return 0 else: #Since the fibonnachi numbers are a recursive sum of all the numbers of the set prior to them we can rely on recursion to get the value of the set. return getFibNSimp(n-1) + getFibNSimp(n-2) def outputFibN(): validSelection = False fibSelect = int(input("Enter a position in the Fibonnachi series to extract: ")) while not validSelection: select = input("For the simple fibonnachi algorithm enter (s), for the extended algorithm press (e), if you want a list until... ") if (select == "e"): print("The",fibSelect,"° Fibonnachi number is: ",getFibNExt(fibSelect)) validSelection = True elif (select == "s"): print("The", fibSelect, "° Fibonnachi number is: ", getFibNSimp(fibSelect)) validSelection = True else: validSelection = False print("Invalid selection, please press (e) for the extended algorithm or (s) for the simple algorithm.") outputFibN()
39.694444
155
0.627012
[ "MIT" ]
DAM-96/Python-CodeSamples
fibonnachiSequence.py
1,431
Python
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['GroupMembershipsArgs', 'GroupMemberships'] @pulumi.input_type class GroupMembershipsArgs: def __init__(__self__, *, group_id: pulumi.Input[str], users: pulumi.Input[Sequence[pulumi.Input[str]]]): """ The set of arguments for constructing a GroupMemberships resource. :param pulumi.Input[str] group_id: ID of a Okta group. :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for. """ pulumi.set(__self__, "group_id", group_id) pulumi.set(__self__, "users", users) @property @pulumi.getter(name="groupId") def group_id(self) -> pulumi.Input[str]: """ ID of a Okta group. """ return pulumi.get(self, "group_id") @group_id.setter def group_id(self, value: pulumi.Input[str]): pulumi.set(self, "group_id", value) @property @pulumi.getter def users(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: """ The list of Okta user IDs which the group should have membership managed for. """ return pulumi.get(self, "users") @users.setter def users(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): pulumi.set(self, "users", value) @pulumi.input_type class _GroupMembershipsState: def __init__(__self__, *, group_id: Optional[pulumi.Input[str]] = None, users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ Input properties used for looking up and filtering GroupMemberships resources. :param pulumi.Input[str] group_id: ID of a Okta group. :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for. """ if group_id is not None: pulumi.set(__self__, "group_id", group_id) if users is not None: pulumi.set(__self__, "users", users) @property @pulumi.getter(name="groupId") def group_id(self) -> Optional[pulumi.Input[str]]: """ ID of a Okta group. """ return pulumi.get(self, "group_id") @group_id.setter def group_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "group_id", value) @property @pulumi.getter def users(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The list of Okta user IDs which the group should have membership managed for. """ return pulumi.get(self, "users") @users.setter def users(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "users", value) class GroupMemberships(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, group_id: Optional[pulumi.Input[str]] = None, users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None): """ Resource to manage a set of memberships for a specific group. This resource will allow you to bulk manage group membership in Okta for a given group. This offers an interface to pass multiple users into a single resource call, for better API resource usage. Effectively this is the same as using the `group.Membership` resource several times with a single group and many different users. If you need a relationship of a single user to many groups, please use the `UserGroupMemberships` resource. When using this with a `user.User` resource, you should add a lifecycle ignore for group memberships to avoid conflicts in desired state. ## Example Usage ```python import pulumi import pulumi_okta as okta test_group = okta.group.Group("testGroup", description="testing, testing") test_group_memberships = okta.GroupMemberships("testGroupMemberships", group_id=test_group.id, users=[ okta_user["test1"]["id"], okta_user["test2"]["id"], ]) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] group_id: ID of a Okta group. :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for. """ ... @overload def __init__(__self__, resource_name: str, args: GroupMembershipsArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Resource to manage a set of memberships for a specific group. This resource will allow you to bulk manage group membership in Okta for a given group. This offers an interface to pass multiple users into a single resource call, for better API resource usage. Effectively this is the same as using the `group.Membership` resource several times with a single group and many different users. If you need a relationship of a single user to many groups, please use the `UserGroupMemberships` resource. When using this with a `user.User` resource, you should add a lifecycle ignore for group memberships to avoid conflicts in desired state. ## Example Usage ```python import pulumi import pulumi_okta as okta test_group = okta.group.Group("testGroup", description="testing, testing") test_group_memberships = okta.GroupMemberships("testGroupMemberships", group_id=test_group.id, users=[ okta_user["test1"]["id"], okta_user["test2"]["id"], ]) ``` :param str resource_name: The name of the resource. :param GroupMembershipsArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(GroupMembershipsArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, group_id: Optional[pulumi.Input[str]] = None, users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = GroupMembershipsArgs.__new__(GroupMembershipsArgs) if group_id is None and not opts.urn: raise TypeError("Missing required property 'group_id'") __props__.__dict__["group_id"] = group_id if users is None and not opts.urn: raise TypeError("Missing required property 'users'") __props__.__dict__["users"] = users super(GroupMemberships, __self__).__init__( 'okta:index/groupMemberships:GroupMemberships', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, group_id: Optional[pulumi.Input[str]] = None, users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'GroupMemberships': """ Get an existing GroupMemberships resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] group_id: ID of a Okta group. :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _GroupMembershipsState.__new__(_GroupMembershipsState) __props__.__dict__["group_id"] = group_id __props__.__dict__["users"] = users return GroupMemberships(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="groupId") def group_id(self) -> pulumi.Output[str]: """ ID of a Okta group. """ return pulumi.get(self, "group_id") @property @pulumi.getter def users(self) -> pulumi.Output[Sequence[str]]: """ The list of Okta user IDs which the group should have membership managed for. """ return pulumi.get(self, "users")
42.485106
441
0.644531
[ "ECL-2.0", "Apache-2.0" ]
pulumi/pulumi-okta
sdk/python/pulumi_okta/group_memberships.py
9,984
Python
#-*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import logging import os import posixpath import warnings import django from django.core.exceptions import ValidationError from django.core.files.uploadedfile import UploadedFile from django.db import models from django.template.defaultfilters import filesizeformat from django.utils.encoding import force_str, force_text from django.utils.six import string_types from django.utils.translation import ugettext_lazy as _ from .storage import private_storage logger = logging.getLogger(__name__) class PrivateFileField(models.FileField): """ Filefield with private storage, custom filename and size checks. Extra settings: - ``upload_subfolder``: a lambda to find the subfolder, depending on the instance. - ``content_types``: list of allowed content types. - ``max_file_size``: maximum file size. """ default_error_messages = { 'invalid_file_type': _('File type not supported.'), 'file_too_large': _('The file may not be larger than {max_size}.'), } def __init__(self, *args, **kwargs): self.upload_subfolder = kwargs.pop('upload_subfolder', None) self.content_types = kwargs.pop("content_types", None) or () self.max_file_size = kwargs.pop("max_file_size", None) kwargs.setdefault('storage', private_storage) super(PrivateFileField, self).__init__(*args, **kwargs) def clean(self, *args, **kwargs): data = super(PrivateFileField, self).clean(*args, **kwargs) file = data.file if isinstance(file, UploadedFile): # content_type is only available for uploaded files, # and not for files which are already stored in the model. content_type = file.content_type if self.content_types and content_type not in self.content_types: logger.debug('Rejected uploaded file type: %s', content_type) raise ValidationError(self.error_messages['invalid_file_type']) if self.max_file_size and file.size > self.max_file_size: raise ValidationError(self.error_messages['file_too_large'].format( max_size=filesizeformat(self.max_file_size), size=filesizeformat(file.size) )) return data def generate_filename(self, instance, filename): path_parts = [] if self.upload_to: # Support the upload_to callable that Django provides if callable(self.upload_to): dirname, filename = os.path.split(self.upload_to(instance, filename)) path_parts.append(dirname) else: dirname = force_text(datetime.datetime.now().strftime(force_str(self.upload_to))) path_parts.append(dirname) # Add our custom subdir function. upload_subfolder = self.upload_subfolder if upload_subfolder: # Should return a list, so joining can be done in a storage-specific manner. extra_dirs = upload_subfolder(instance) # Avoid mistakes by developers, no "s/u/b/p/a/t/h/" if isinstance(extra_dirs, string_types): warnings.warn("{}.{}.upload_subfolder should return a list" " to avoid path-separator issues.".format( instance.__class__.__name__, self.name), UserWarning) extra_dirs = os.path.split(extra_dirs) path_parts.extend([self.storage.get_valid_name(dir) for dir in extra_dirs]) path_parts.append(self._get_clean_filename(filename)) if django.VERSION >= (1, 10): filename = posixpath.join(*path_parts) return self.storage.generate_filename(filename) else: return os.path.join(*path_parts) def _get_clean_filename(self, filename): # As of Django 1.10+, file names are no longer cleaned locally, but cleaned by the storage. # This compatibility function makes sure all Django versions generate a safe filename. return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
40.403846
99
0.666111
[ "Apache-2.0" ]
OrsoBruno96/django-private-storage
private_storage/fields.py
4,202
Python
import torch.nn as nn import math import torch import torch.nn.functional as F def conv_bn(inp, oup, stride, k_size=3): return nn.Sequential( nn.Conv2d(inp, oup, k_size, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.PReLU() ) def conv_1x1_bn(inp, oup): return nn.Sequential( nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.PReLU() ) class DWC(nn.Module): def __init__(self, in_channels, out_channels): super(DWC, self).__init__() #self.depthwise = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=(7,6), #stride=1, padding=0, groups=in_channels, bias=False) self.batch_norm_in = nn.BatchNorm2d(in_channels) self.depthwise = nn.AvgPool2d((7, 6), stride=1, padding=0) self.pointwise = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=False) def forward(self, x): x = self.depthwise(x) #x = self.batch_norm_in(x) x = self.pointwise(x) return x class Max_AvgPool(nn.Module): def __init__(self, kernel_size=(3,3), stride=2, padding=1, dim=128): super(Max_AvgPool, self).__init__() self.Maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) self.Avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, x): x = self.Maxpool(x) + self.Avgpool(x) # add some channelwise gating? return x class Max_AvgPool(nn.Module): def __init__(self, kernel_size=(3,3), stride=2, padding=1, dim=128): super(Max_AvgPool, self).__init__() self.Maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) self.Avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, x): x = self.Maxpool(x) + self.Avgpool(x) # add some channelwise gating? return x class gated_conv1x1(nn.Module): def __init__(self, inc=128, outc=128): super(gated_conv1x1, self).__init__() self.inp = int(inc/2) self.oup = int(outc/2) self.conv1x1_1 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=False) self.gate_1 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=True) self.conv1x1_2 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=False) self.gate_2 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=True) def forward(self, x): x_1 = x[:, :self.inp, :, :] x_2 = x[:, self.inp:, :, :] a_1 = self.conv1x1_1(x_1) g_1 = F.sigmoid(self.gate_1(x_1)) a_2 = self.conv1x1_2(x_2) g_2 = F.sigmoid(self.gate_2(x_2)) ret = torch.cat((a_1*g_1, a_2*g_2), 1) return ret class InvertedResidual_dwc(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual_dwc, self).__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup self.conv = [] if expand_ratio == 1: self.conv.append(nn.Conv2d(inp, hidden_dim, kernel_size=(3, 3), stride=stride, padding=1, groups=hidden_dim)) self.conv.append(nn.BatchNorm2d(hidden_dim)) self.conv.append(nn.PReLU()) #self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1)) #self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup)) self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(oup)) else: #self.conv.append(gated_conv1x1(inc=inp,outc=hidden_dim)) self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(hidden_dim)) self.conv.append(nn.PReLU()) self.conv.append(nn.Conv2d(hidden_dim, hidden_dim, kernel_size=(3, 3), stride=stride, padding=1, groups=hidden_dim)) self.conv.append(nn.BatchNorm2d(hidden_dim)) self.conv.append(nn.PReLU()) #self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup)) self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(oup)) self.conv = nn.Sequential(*self.conv) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup self.conv = [] if expand_ratio == 1: self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1)) #self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup)) self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(oup)) else: #self.conv.append(gated_conv1x1(inc=inp,outc=hidden_dim)) self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(hidden_dim)) self.conv.append(nn.PReLU()) self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1)) #self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup)) self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(oup)) self.conv = nn.Sequential(*self.conv) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class Net(nn.Module): #mobileNet v2 def __init__(self, embedding_size=128, input_size=224, width_mult=1.): super(Net, self).__init__() block = InvertedResidual block_dwc = InvertedResidual_dwc input_channel = 64 last_channel = 256 interverted_residual_setting = [ # t, c, n, s [1, 48, 1, 1], # depthwise conv for first row [2, 48, 2, 1], [4, 48, 2, 2], [2, 48, 2, 1], [4, 48, 5, 1], [2, 48, 2, 2], [2, 48, 6, 2], ] # building first layer input_channel = int(input_channel * width_mult) self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel self.features = [conv_bn(3, input_channel, 2)] # building inverted residual cnt = 0 for t, c, n, s in interverted_residual_setting: output_channel = int(c * width_mult) for i in range(n): if cnt>1: if i == n - 1: # reduce the featuremap in the last. self.features.append(block_dwc(input_channel, output_channel, s, expand_ratio=t)) else: self.features.append(block_dwc(input_channel, output_channel, 1, expand_ratio=t)) input_channel = output_channel else: if i == n - 1: # reduce the featuremap in the last. self.features.append(block_dwc(input_channel, output_channel, s, expand_ratio=t)) else: self.features.append(block_dwc(input_channel, output_channel, 1, expand_ratio=t)) input_channel = output_channel cnt+=1 # building last several layers self.features.append(gated_conv1x1(input_channel, self.last_channel)) # make it nn.Sequential self.features_sequential = nn.Sequential(*self.features) # Global depthwise conv #self.GDCconv = DWC(self.last_channel, embedding_size) self._initialize_weights() def forward(self, x): x = self.features_sequential(x).view(-1, 256*4) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): n = m.weight.size(1) m.weight.data.normal_(0, 0.01) m.bias.data.zero_()
37.769231
128
0.594705
[ "MIT" ]
BOGUENSONG/EXTD_Pytorch
mobileFacenet_48_PReLU.py
8,838
Python
# Databricks notebook source # COMMAND ---------- # Instrument for unit tests. This is only executed in local unit tests, not in Databricks. if 'dbutils' not in locals(): import databricks_test databricks_test.inject_variables() # COMMAND ---------- assert dbutils.widgets.get("input") == "input_value"
24.230769
90
0.698413
[ "MIT" ]
Bhaskers-Blu-Org2/DataOps
Python/packages/databricks-test/tests/patch_notebook.py
315
Python
import subprocess from collections import OrderedDict from io import StringIO from itertools import product def _parse_categorical(line): # Categorical Lines consist of: # # <name><w*>{<values>}<w*>[<default>]<*w>#Comment # where: # <name> - name of parameter. # <values> - comma seperated list of values (i.e. a,b,c,d...,z) # <default> - default value enclosed in braces. # <w*> - zero or more whitespace characters # has_comment = False # comment = "" if '#' in line: comment_begins = line.find('#') line = line[:comment_begins] # comment = line[comment_begins:] # has_comment = True if line.count('{') != 1 or line.count('}') != 1: raise ValueError('Malformed parameter line %s' % line) first_bracket = line.find('{') second_bracket = line.find('}') domain_values = line[first_bracket + 1:second_bracket] cat_values = domain_values.split(',') if len(cat_values) < 1: raise ValueError('Expected at least one value in %s' % line) name = line[:first_bracket].strip() values = [value.strip() for value in cat_values] # Stripped the code for the default value since we don't need it here od = OrderedDict() od['name'] = name od['values'] = values return od def parse_hyperparameter_string(param_string): params = OrderedDict() lines = param_string.split('\n') for line in lines: # Logic kind of copied from SMAC ACLIB version 2.06.01-dev, # but a little bit more restrictive # file: ca.ubc.cs.beta.aclib.configspace.ParamConfigurationSpace.java # line 497-546 # type = "" if not line.strip(): continue elif line.count('|') == 1: pass # print "WARNING: Conditionality is not parsed yet." # od = _parse_conditional(line) elif line.strip()[0] == '{': continue elif line.count('[') == 2: continue elif line.count('{') == 1 and line.count('}') == 1: od = _parse_categorical(line) else: raise ValueError('Cannot parse the following line %s' % line) params[od['name']] = od['values'] return params def construct_cli_call(cli_target, params): cli_call = StringIO() cli_call.write(cli_target) params = OrderedDict(sorted(params.items(), key=lambda t: t[0])) for param in params: cli_call.write(' -' + param + " \"'" + str(params[param]) + "'\"") return cli_call.getvalue() def command_line_function(cli_target, params): call = construct_cli_call(cli_target, params) ret = subprocess.check_output(call, shell=True) return ret def build_grid(hyperparameters): """Build a grid represented as a list of parameter dictionaries.""" parameter_dicts = [] for parameters in product(*hyperparameters.values()): parameter_tuples = zip(hyperparameters.keys(), parameters) parameter_dict = dict(parameter_tuples) parameter_dicts.append(parameter_dict) return parameter_dicts
32.946809
77
0.624475
[ "Apache-2.0" ]
Fanxingye/Autotabular
autotabular/metalearning/optimizers/optimizer_base.py
3,097
Python
import transmission_rpc from bgmi.config import ( TRANSMISSION_RPC_PASSWORD, TRANSMISSION_RPC_PORT, TRANSMISSION_RPC_URL, TRANSMISSION_RPC_USERNAME, ) from bgmi.downloader.base import BaseDownloadService from bgmi.utils import print_info, print_warning class TransmissionRPC(BaseDownloadService): @staticmethod def get_client(): return transmission_rpc.Client( host=TRANSMISSION_RPC_URL, port=TRANSMISSION_RPC_PORT, username=TRANSMISSION_RPC_USERNAME, password=TRANSMISSION_RPC_PASSWORD, ) def download(self): tc = self.get_client() print(tc.add_torrent) tc.add_torrent(self.torrent, download_dir=self.save_path) print_info( "Add torrent into the download queue, the file will be saved at {}".format( self.save_path ) ) def check_download(self, name): pass @classmethod def download_status(cls, status=None): print_info("Print download status in database") BaseDownloadService.download_status(status=status) print("") print_info("Print download status in transmission-rpc") tc = cls.get_client() for torrent in tc.get_torrents(): print_info(f" * {torrent.status}: {torrent}", indicator=False) @staticmethod def install(): try: __import__("transmission_rpc") except ImportError: print_warning("Please run `pip install transmission-rpc`")
29.169811
87
0.655886
[ "MIT" ]
Sg4Dylan/BGmi
bgmi/downloader/transmission.py
1,546
Python
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import absolute_import, division, print_function, unicode_literals import os from builtins import str from collections import namedtuple from textwrap import dedent from future.utils import PY3 from pants.base.build_file import BuildFile from pants.base.file_system_project_tree import FileSystemProjectTree from pants.build_graph.address import BuildFileAddress from pants.build_graph.build_configuration import BuildConfiguration from pants.build_graph.build_file_aliases import BuildFileAliases from pants.build_graph.build_file_parser import BuildFileParser from pants.build_graph.target import Target from pants_test.base_test import BaseTest class ErrorTarget(Target): def __init__(self, *args, **kwargs): assert False, "This fake target should never be initialized in this test!" class BaseTestWithParser(BaseTest): def setUp(self): super(BaseTestWithParser, self).setUp() build_configuration = BuildConfiguration() build_configuration.register_aliases(self.alias_groups) self.build_file_parser = BuildFileParser(build_configuration, self.build_root) class BuildFileParserBasicsTest(BaseTestWithParser): @property def alias_groups(self): return BuildFileAliases(targets={'jvm_binary': ErrorTarget, 'java_library': ErrorTarget}) def create_buildfile(self, path): return BuildFile(FileSystemProjectTree(self.build_root), path) def assert_parser_error(self, build_file, err_string): with self.assertRaises(BuildFileParser.BuildFileParserError) as ctx: self.build_file_parser.parse_build_file(build_file) self.assertIn(err_string, str(ctx.exception)) def test_name_injection(self): # Target with no name is fine: target gets name of directory. self.add_to_build_file('foo/bar/BUILD', '\njava_library()\n') build_file = self.create_buildfile('foo/bar/BUILD') addresses = list(self.build_file_parser.parse_build_file(build_file).keys()) self.assertEqual(1, len(addresses)) self.assertEqual('bar', addresses[0].target_name) # Two targets with no name in the same BUILD file cause an error. self.add_to_build_file('foo/bar/BUILD', '\njvm_binary()\n') self.assert_parser_error(build_file, "defines address 'bar' more than once") # Test that omitting the name in a target at the build root is disallowed. self.add_to_build_file('BUILD', '\njava_library()\n') build_file = self.create_buildfile('BUILD') self.assert_parser_error(build_file, "Targets in root-level BUILD files must be named explicitly") def test_addressable_exceptions(self): self.add_to_build_file('b/BUILD', 'java_library(name="foo", "bad_arg")') build_file_b = self.create_buildfile('b/BUILD') expected_msg = ('positional argument follows keyword argument' if PY3 else 'non-keyword arg after keyword arg') self.assert_parser_error(build_file_b, expected_msg) self.add_to_build_file('d/BUILD', dedent( """ java_library( name="foo", dependencies=[ object(), ] ) """ )) build_file_d = self.create_buildfile('d/BUILD') self.assert_parser_error(build_file_d, 'dependencies passed to Target constructors must be strings') def test_noop_parse(self): self.add_to_build_file('BUILD', '') build_file = self.create_buildfile('BUILD') address_map = set(self.build_file_parser.parse_build_file(build_file)) self.assertEqual(len(address_map), 0) def test_invalid_unicode_in_build_file(self): """Demonstrate that unicode characters causing parse errors raise real parse errors.""" self.add_to_build_file('BUILD', dedent( """ jvm_binary(name = ‘hello’, # Parse error due to smart quotes (non ascii characters) source = 'HelloWorld.java' main = 'foo.HelloWorld', ) """ )) build_file = self.create_buildfile('BUILD') self.assert_parser_error(build_file, 'invalid character' if PY3 else 'invalid syntax') def test_unicode_string_in_build_file(self): """Demonstrates that a string containing unicode should work in a BUILD file.""" self.add_to_build_file('BUILD', dedent( """ java_library( name='foo', sources=['א.java'] ) """ )) build_file = self.create_buildfile('BUILD') self.build_file_parser.parse_build_file(build_file) class BuildFileParserTargetTest(BaseTestWithParser): @property def alias_groups(self): return BuildFileAliases(targets={'fake': ErrorTarget}) def create_buildfile(self, path): return BuildFile(FileSystemProjectTree(self.build_root), path) def test_trivial_target(self): self.add_to_build_file('BUILD', 'fake(name="foozle")') build_file = self.create_buildfile('BUILD') address_map = self.build_file_parser.parse_build_file(build_file) self.assertEqual(len(address_map), 1) address, proxy = address_map.popitem() self.assertEqual(address, BuildFileAddress(build_file=build_file, target_name='foozle')) self.assertEqual(proxy.addressed_name, 'foozle') self.assertEqual(proxy.addressed_type, ErrorTarget) def test_sibling_build_files(self): self.add_to_build_file('BUILD', dedent( """ fake(name="base", dependencies=[ ':foo', ]) """)) self.add_to_build_file('BUILD.foo', dedent( """ fake(name="foo", dependencies=[ ':bat', ]) """)) self.add_to_build_file('./BUILD.bar', dedent( """ fake(name="bat") """)) bar_build_file = self.create_buildfile('BUILD.bar') base_build_file = self.create_buildfile('BUILD') foo_build_file = self.create_buildfile('BUILD.foo') address_map = self.build_file_parser.address_map_from_build_files( BuildFile.get_build_files_family(FileSystemProjectTree(self.build_root), ".")) addresses = address_map.keys() self.assertEqual({bar_build_file.relpath, base_build_file.relpath, foo_build_file.relpath}, set([address.rel_path for address in addresses])) self.assertEqual({'//:base', '//:foo', '//:bat'}, set([address.spec for address in addresses])) def test_build_file_duplicates(self): # This workspace has two targets in the same file with the same name. self.add_to_build_file('BUILD', 'fake(name="foo")\n') self.add_to_build_file('BUILD', 'fake(name="foo")\n') with self.assertRaises(BuildFileParser.AddressableConflictException): base_build_file = self.create_buildfile('BUILD') self.build_file_parser.parse_build_file(base_build_file) def test_sibling_build_files_duplicates(self): # This workspace is malformed, you can't shadow a name in a sibling BUILD file self.add_to_build_file('BUILD', dedent( """ fake(name="base", dependencies=[ ':foo', ]) """)) self.add_to_build_file('BUILD.foo', dedent( """ fake(name="foo", dependencies=[ ':bat', ]) """)) self.add_to_build_file('./BUILD.bar', dedent( """ fake(name="base") """)) with self.assertRaises(BuildFileParser.SiblingConflictException): self.build_file_parser.address_map_from_build_files( BuildFile.get_build_files_family(FileSystemProjectTree(self.build_root), '.')) class BuildFileParserExposedObjectTest(BaseTestWithParser): @property def alias_groups(self): return BuildFileAliases(objects={'fake_object': object()}) def test_exposed_object(self): self.add_to_build_file('BUILD', """fake_object""") build_file = BuildFile(FileSystemProjectTree(self.build_root), 'BUILD') address_map = self.build_file_parser.parse_build_file(build_file) self.assertEqual(len(address_map), 0) class BuildFileParserExposedContextAwareObjectFactoryTest(BaseTestWithParser): Jar = namedtuple('Jar', ['org', 'name', 'rev']) Repository = namedtuple('Repository', ['name', 'url', 'push_db_basedir']) Artifact = namedtuple('Artifact', ['org', 'name', 'repo']) class JarLibrary(Target): def __init__(self, jars=None, **kwargs): super(BuildFileParserExposedContextAwareObjectFactoryTest.JarLibrary, self).__init__(**kwargs) self.jars = jars or [] class JvmLibrary(Target): def __init__(self, provides=None, **kwargs): super(BuildFileParserExposedContextAwareObjectFactoryTest.JvmLibrary, self).__init__(**kwargs) self.provides = provides class JavaLibrary(JvmLibrary): pass class ScalaLibrary(JvmLibrary): pass @classmethod def make_lib(cls, parse_context): def real_make_lib(org, name, rev): dep = parse_context.create_object('jar', org=org, name=name, rev=rev) parse_context.create_object('jar_library', name=name, jars=[dep]) return real_make_lib @classmethod def create_java_libraries(cls, parse_context): def real_create_java_libraries(base_name, org='com.twitter', provides_java_name=None, provides_scala_name=None): def provides_artifact(provides_name): if provides_name is None: return None jvm_repo = cls.Repository( name='maven-central', url='http://maven.example.com', push_db_basedir=os.path.join('build-support', 'ivy', 'pushdb'), ) return parse_context.create_object('artifact', org=org, name=provides_name, repo=jvm_repo) parse_context.create_object('java_library', name='{}-java'.format(base_name), provides=provides_artifact(provides_java_name)) parse_context.create_object('scala_library', name='{}-scala'.format(base_name), provides=provides_artifact(provides_scala_name)) return real_create_java_libraries def setUp(self): super(BuildFileParserExposedContextAwareObjectFactoryTest, self).setUp() self._paths = set() def path_relative_util(self, parse_context): def real_path_relative_util(path): self._paths.add(os.path.join(parse_context.rel_path, path)) return real_path_relative_util @property def alias_groups(self): return BuildFileAliases( targets={ 'jar_library': self.JarLibrary, 'java_library': self.JavaLibrary, 'scala_library': self.ScalaLibrary, }, context_aware_object_factories={ 'make_lib': self.make_lib, 'create_java_libraries': self.create_java_libraries, 'path_util': self.path_relative_util, }, objects={ 'artifact': self.Artifact, 'jar': self.Jar, } ) def test_context_aware_object_factories(self): contents = dedent(""" create_java_libraries(base_name="create-java-libraries", provides_java_name="test-java", provides_scala_name="test-scala") make_lib("com.foo.test", "does_not_exists", "1.0") path_util("baz") """) self.create_file('3rdparty/BUILD', contents) build_file = BuildFile(FileSystemProjectTree(self.build_root), '3rdparty/BUILD') address_map = self.build_file_parser.parse_build_file(build_file) registered_proxies = set(address_map.values()) self.assertEqual(len(registered_proxies), 3) targets_created = {} for target_proxy in registered_proxies: targets_created[target_proxy.addressed_name] = target_proxy.addressed_type self.assertEqual({'does_not_exists', 'create-java-libraries-scala', 'create-java-libraries-java'}, set(targets_created.keys())) self.assertEqual(targets_created['does_not_exists'], self.JarLibrary) self.assertEqual(targets_created['create-java-libraries-java'], self.JavaLibrary) self.assertEqual(targets_created['create-java-libraries-scala'], self.ScalaLibrary) self.assertEqual({'3rdparty/baz'}, self._paths) def test_raises_parse_error(self): self.add_to_build_file('BUILD', 'foo(name = = "baz")') build_file = BuildFile(FileSystemProjectTree(self.build_root), 'BUILD') with self.assertRaises(BuildFileParser.ParseError): self.build_file_parser.parse_build_file(build_file) # Test some corner cases for the context printing # Error at beginning of BUILD file build_file = self.add_to_build_file('begin/BUILD', dedent(""" *?&INVALID! = 'foo' target( name='bar', dependencies= [ ':baz', ], ) """)) with self.assertRaises(BuildFileParser.ParseError): self.build_file_parser.parse_build_file(build_file) # Error at end of BUILD file build_file = self.add_to_build_file('end/BUILD', dedent(""" target( name='bar', dependencies= [ ':baz', ], ) *?&INVALID! = 'foo' """)) with self.assertRaises(BuildFileParser.ParseError): self.build_file_parser.parse_build_file(build_file) # Error in the middle of BUILD file > 6 lines build_file = self.add_to_build_file('middle/BUILD', dedent(""" target( name='bar', *?&INVALID! = 'foo' dependencies = [ ':baz', ], ) """)) with self.assertRaises(BuildFileParser.ParseError): self.build_file_parser.parse_build_file(build_file) # Error in very short build file. build_file = self.add_to_build_file('short/BUILD', dedent(""" target(name='bar', dependencies = [':baz'],) *?&INVALID! = 'foo' """)) with self.assertRaises(BuildFileParser.ParseError): self.build_file_parser.parse_build_file(build_file) def test_raises_execute_error(self): self.add_to_build_file('BUILD', 'undefined_alias(name="baz")') build_file = BuildFile(FileSystemProjectTree(self.build_root), 'BUILD') with self.assertRaises(BuildFileParser.ExecuteError): self.build_file_parser.parse_build_file(build_file) def test_build_file_parser_error_hierarcy(self): """Exception handling code depends on the fact that all explicit exceptions from BuildFileParser are subclassed from the BuildFileParserError base class. """ def assert_build_file_parser_error(e): self.assertIsInstance(e, BuildFileParser.BuildFileParserError) assert_build_file_parser_error(BuildFileParser.BuildFileScanError()) assert_build_file_parser_error(BuildFileParser.AddressableConflictException()) assert_build_file_parser_error(BuildFileParser.SiblingConflictException()) assert_build_file_parser_error(BuildFileParser.ParseError()) assert_build_file_parser_error(BuildFileParser.ExecuteError())
36.407143
100
0.677523
[ "Apache-2.0" ]
omerzach/pants
tests/python/pants_test/build_graph/test_build_file_parser.py
15,296
Python
from __future__ import absolute_import, division, print_function import six import matplotlib import matplotlib.pyplot as plt from matplotlib.testing.decorators import image_comparison from mpl_toolkits.axes_grid1 import host_subplot from mpl_toolkits.axes_grid1 import make_axes_locatable from mpl_toolkits.axes_grid1 import AxesGrid from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar from matplotlib.colors import LogNorm from itertools import product import numpy as np @image_comparison(baseline_images=['divider_append_axes']) def test_divider_append_axes(): # the random data np.random.seed(0) x = np.random.randn(1000) y = np.random.randn(1000) fig, axScatter = plt.subplots() # the scatter plot: axScatter.scatter(x, y) # create new axes on the right and on the top of the current axes # The first argument of the new_vertical(new_horizontal) method is # the height (width) of the axes to be created in inches. divider = make_axes_locatable(axScatter) axHistbot = divider.append_axes("bottom", 1.2, pad=0.1, sharex=axScatter) axHistright = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter) axHistleft = divider.append_axes("left", 1.2, pad=0.1, sharey=axScatter) axHisttop = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter) # now determine nice limits by hand: binwidth = 0.25 xymax = max(np.max(np.abs(x)), np.max(np.abs(y))) lim = (int(xymax/binwidth) + 1) * binwidth bins = np.arange(-lim, lim + binwidth, binwidth) axHisttop.hist(x, bins=bins) axHistbot.hist(x, bins=bins) axHistleft.hist(y, bins=bins, orientation='horizontal') axHistright.hist(y, bins=bins, orientation='horizontal') axHistbot.invert_yaxis() axHistleft.invert_xaxis() axHisttop.xaxis.set_ticklabels(()) axHistbot.xaxis.set_ticklabels(()) axHistleft.yaxis.set_ticklabels(()) axHistright.yaxis.set_ticklabels(()) @image_comparison(baseline_images=['twin_axes_empty_and_removed'], extensions=["png"], tol=1) def test_twin_axes_empty_and_removed(): # Purely cosmetic font changes (avoid overlap) matplotlib.rcParams.update({"font.size": 8}) matplotlib.rcParams.update({"xtick.labelsize": 8}) matplotlib.rcParams.update({"ytick.labelsize": 8}) generators = [ "twinx", "twiny", "twin" ] modifiers = [ "", "host invisible", "twin removed", "twin invisible", "twin removed\nhost invisible" ] # Unmodified host subplot at the beginning for reference h = host_subplot(len(modifiers)+1, len(generators), 2) h.text(0.5, 0.5, "host_subplot", horizontalalignment="center", verticalalignment="center") # Host subplots with various modifications (twin*, visibility) applied for i, (mod, gen) in enumerate(product(modifiers, generators), len(generators)+1): h = host_subplot(len(modifiers)+1, len(generators), i) t = getattr(h, gen)() if "twin invisible" in mod: t.axis[:].set_visible(False) if "twin removed" in mod: t.remove() if "host invisible" in mod: h.axis[:].set_visible(False) h.text(0.5, 0.5, gen + ("\n" + mod if mod else ""), horizontalalignment="center", verticalalignment="center") plt.subplots_adjust(wspace=0.5, hspace=1) def test_axesgrid_colorbar_log_smoketest(): fig = plt.figure() grid = AxesGrid(fig, 111, # modified to be only subplot nrows_ncols=(1, 1), label_mode="L", cbar_location="top", cbar_mode="single", ) Z = 10000 * np.random.rand(10, 10) im = grid[0].imshow(Z, interpolation="nearest", norm=LogNorm()) grid.cbar_axes[0].colorbar(im) @image_comparison( baseline_images=['inset_locator'], style='default', extensions=['png'], remove_text=True) def test_inset_locator(): def get_demo_image(): from matplotlib.cbook import get_sample_data import numpy as np f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False) z = np.load(f) # z is a numpy array of 15x15 return z, (-3, 4, -4, 3) fig, ax = plt.subplots(figsize=[5, 4]) # prepare the demo image Z, extent = get_demo_image() Z2 = np.zeros([150, 150], dtype="d") ny, nx = Z.shape Z2[30:30 + ny, 30:30 + nx] = Z # extent = [-3, 4, -4, 3] ax.imshow(Z2, extent=extent, interpolation="nearest", origin="lower") axins = zoomed_inset_axes(ax, 6, loc=1) # zoom = 6 axins.imshow(Z2, extent=extent, interpolation="nearest", origin="lower") axins.yaxis.get_major_locator().set_params(nbins=7) axins.xaxis.get_major_locator().set_params(nbins=7) # sub region of the original image x1, x2, y1, y2 = -1.5, -0.9, -2.5, -1.9 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) plt.xticks(visible=False) plt.yticks(visible=False) # draw a bbox of the region of the inset axes in the parent axes and # connecting lines between the bbox and the inset axes area mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5") asb = AnchoredSizeBar(ax.transData, 0.5, '0.5', loc=8, pad=0.1, borderpad=0.5, sep=5, frameon=False) ax.add_artist(asb) @image_comparison(baseline_images=['zoomed_axes', 'inverted_zoomed_axes'], extensions=['png']) def test_zooming_with_inverted_axes(): fig, ax = plt.subplots() ax.plot([1, 2, 3], [1, 2, 3]) ax.axis([1, 3, 1, 3]) inset_ax = zoomed_inset_axes(ax, zoom=2.5, loc=4) inset_ax.axis([1.1, 1.4, 1.1, 1.4]) fig, ax = plt.subplots() ax.plot([1, 2, 3], [1, 2, 3]) ax.axis([3, 1, 3, 1]) inset_ax = zoomed_inset_axes(ax, zoom=2.5, loc=4) inset_ax.axis([1.4, 1.1, 1.4, 1.1])
35.381503
79
0.639601
[ "Apache-2.0" ]
NalediMadlopha/google-python-exercises
venv/lib/python2.7/site-packages/mpl_toolkits/tests/test_axes_grid1.py
6,121
Python
""" Off Multipage Cheatsheet https://github.com/daniellewisDL/streamlit-cheat-sheet @daniellewisDL : https://github.com/daniellewisDL """ import streamlit as st from pathlib import Path import base64 from modules.toc import * # Initial page config st.set_page_config( page_title='Code Compendium Intro Page', layout="wide", # initial_sidebar_state="expanded", ) # col2.title("Table of contents") # col2.write("http://localhost:8502/#display-progress-and-status") # toc.header("Header 1") # toc.header("Header 2") # toc.subheader("Subheader 1") # toc.subheader("Subheader 2") # toc.generate() # Thanks to streamlitopedia for the following code snippet def img_to_bytes(img_path): img_bytes = Path(img_path).read_bytes() encoded = base64.b64encode(img_bytes).decode() return encoded # sidebar # def cs_sidebar(): # st.sidebar.markdown('''[<img src='data:image/png;base64,{}' class='img-fluid' width=32 height=32>](https://streamlit.io/)'''.format(img_to_bytes("logomark_website.png")), unsafe_allow_html=True) # st.sidebar.header('Streamlit cheat sheet') # st.sidebar.markdown(''' # <small>Summary of the [docs](https://docs.streamlit.io/en/stable/api.html), as of [Streamlit v1.0.0](https://www.streamlit.io/).</small> # ''', unsafe_allow_html=True) # st.sidebar.markdown('__How to install and import__') # st.sidebar.code('$ pip install streamlit') # st.sidebar.markdown('Import convention') # st.sidebar.code('>>> import streamlit as st') # st.sidebar.markdown('__Add widgets to sidebar__') # st.sidebar.code(''' # st.sidebar.<widget> # >>> a = st.sidebar.radio(\'R:\',[1,2]) # ''') # st.sidebar.markdown('__Command line__') # st.sidebar.code(''' # $ streamlit --help # $ streamlit run your_script.py # $ streamlit hello # $ streamlit config show # $ streamlit cache clear # $ streamlit docs # $ streamlit --version # ''') # st.sidebar.markdown('__Pre-release features__') # st.sidebar.markdown('[Beta and experimental features](https://docs.streamlit.io/en/stable/api.html#beta-and-experimental-features)') # st.sidebar.code(''' # pip uninstall streamlit # pip install streamlit-nightly --upgrade # ''') # st.sidebar.markdown('''<small>[st.cheat_sheet v1.0.0](https://github.com/daniellewisDL/streamlit-cheat-sheet) | Oct 2021</small>''', unsafe_allow_html=True) # return None ########################## # Main body of cheat sheet ########################## def cs_body(): col1 = st.columns(1) col1.header('Ryan Paik') col1.markdown( ''' *“You don't learn to walk by following rules. You learn by doing, and by falling over.”* -Richard Branson ----- ''') col1.subheader("Welcome to my Code Compendium.") col1.markdwon(''' This website/webapp is my personal cheatsheet for of all the code snippets that I have needed over the past 2 years. This ended up being a quick detour into Streamlit that I fell in love with while I was building flask api's. ----- **Programming is only as deep as you want to dive in.** This webapp features the basic code snippets from all the "googling" from programming I have done. I have taken the plunge and have created my own markdown notebooks organizing information from quick solution tidbits to documentation for programming languages. Please visit my github for practical code and my research notebooks: *[rypaik (Ryan Paik) · GitHub](https://github.com/rypaik)* If you would like access to my Gist please email me. [email protected] ----- **Bio:** Currently a Sophomore at University of Illinois at Urbana-Champaign Working Nights on my degree from the System Engineering Program **Hobbies:** Trying to become a real guitar hero minus the game system, playing Valorant with the St Mark's crew, getting interesting eats no matter where I am, and playing toss with my baseball field rat of a cousin. The newest hobby is figuring out what I can build with all the new breakthroughs in technology. **Currently Working On** Frameworks and Languages:     - Flask, Django, FastAPI, PyTorch, Streamlit, OpenCV, shell scripting, Python, C++ Databases:     - Postgres, Redis, MongoDB, and applicable ORMs When I can get up for Air:     - React, swift(ios), Rust, GO!!     - Find a team to get a paper In Arxiv **This site will be constantly updated as long as I program. Feel free to pass on the URL.** ''') # col2.subheader('Display interactive widgets') # col2.code(''' # st.button('Hit me') # st.download_button('On the dl', data) # st.checkbox('Check me out') # st.radio('Radio', [1,2,3]) # st.selectbox('Select', [1,2,3]) # st.multiselect('Multiselect', [1,2,3]) # st.slider('Slide me', min_value=0, max_value=10) # st.select_slider('Slide to select', options=[1,'2']) # st.text_input('Enter some text') # st.number_input('Enter a number') # st.text_area('Area for textual entry') # st.date_input('Date input') # st.time_input('Time entry') # st.file_uploader('File uploader') # st.color_picker('Pick a color') # ''') # col2.write('Use widgets\' returned values in variables:') # col2.code(''' # >>> for i in range(int(st.number_input('Num:'))): foo() # >>> if st.sidebar.selectbox('I:',['f']) == 'f': b() # >>> my_slider_val = st.slider('Quinn Mallory', 1, 88) # >>> st.write(slider_val) # ''') # # Control flow # col2.subheader('Control flow') # col2.code(''' # st.stop() # ''') # # Lay out your app # col2.subheader('Lay out your app') # col2.code(''' # st.form('my_form_identifier') # st.form_submit_button('Submit to me') # st.container() # st.columns(spec) # >>> col1, col2 = st.columns(2) # >>> col1.subheader('Columnisation') # st.expander('Expander') # >>> with st.expander('Expand'): # >>> st.write('Juicy deets') # ''') # col2.write('Batch widgets together in a form:') # col2.code(''' # >>> with st.form(key='my_form'): # >>> text_input = st.text_input(label='Enter some text') # >>> submit_button = st.form_submit_button(label='Submit') # ''') # # Display code # col2.subheader('Display code') # col2.code(''' # st.echo() # >>> with st.echo(): # >>> st.write('Code will be executed and printed') # ''') # # Display progress and status # col2.subheader('Display progress and status') # col2.code(''' # st.progress(progress_variable_1_to_100) # st.spinner() # >>> with st.spinner(text='In progress'): # >>> time.sleep(5) # >>> st.success('Done') # st.balloons() # st.error('Error message') # st.warning('Warning message') # st.info('Info message') # st.success('Success message') # st.exception(e) # ''') # # Placeholders, help, and options # col2.subheader('Placeholders, help, and options') # col2.code(''' # st.empty() # >>> my_placeholder = st.empty() # >>> my_placeholder.text('Replaced!') # st.help(pandas.DataFrame) # st.get_option(key) # st.set_option(key, value) # st.set_page_config(layout='wide') # ''') # # Mutate data # col2.subheader('Mutate data') # col2.code(''' # DeltaGenerator.add_rows(data) # >>> my_table = st.table(df1) # >>> my_table.add_rows(df2) # >>> my_chart = st.line_chart(df1) # >>> my_chart.add_rows(df2) # ''') # # Optimize performance # col2.subheader('Optimize performance') # col2.code(''' # @st.cache # >>> @st.cache # ... def fetch_and_clean_data(url): # ... # Mutate data at url # ... return data # >>> # Executes d1 as first time # >>> d1 = fetch_and_clean_data(ref1) # >>> # Does not execute d1; returns cached value, d1==d2 # >>> d2 = fetch_and_clean_data(ref1) # >>> # Different arg, so function d1 executes # >>> d3 = fetch_and_clean_data(ref2) # ''') # col2.subheader('Other key parts of the API') # col2.markdown(''' # <small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br> # <small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br> # <small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br> # <small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br> # ''', unsafe_allow_html=True) # Column 3 TOC Generator # col3.subheader('test') # toc = Toc(col3) # # col2.title("Table of contents") # col3.write("http://localhost:8502/#display-progress-and-status", unsafe_allow_html=True) # toc.header("Header 1") # toc.header("Header 2") # toc.generate() # toc.subheader("Subheader 1") # toc.subheader("Subheader 2") # toc.generate() # return None # Run main() # if __name__ == '__main__': # main() # def main(): def app(): # cs_sidebar() cs_body() return None
27.259259
228
0.65942
[ "MIT" ]
rypaik/Streamlit_Ref
.history/pages/intro_20220303154534.py
8,853
Python
from setuptools import setup setup( name='geompy', version='0.1.0', description='Tools for Euclidean Geometry.', url='https://github.com/qthequartermasterman/geometry', author='Andrew P. Sansom', author_email='[email protected]', license='MIT', packages=['geompy'], install_requires=[ 'sympy', 'numpy', 'networkx', 'matplotlib', 'scikit-image', 'symengine', 'methodtools' ], extras_require={ 'gym_environments': ["gym"] }, classifiers=[ '' ], )
20.068966
59
0.558419
[ "MIT" ]
qthequartermasterman/geometry
setup.py
582
Python
import sys import decimal from svg.path import Path, Line, Arc, CubicBezier, QuadraticBezier from svg.path import parse_path import xml.etree.ElementTree as ET def frange(x, y, jump): while x < y: yield x x += jump ns = {'svg': 'http://www.w3.org/2000/svg'} inputfile = sys.argv[1]; tree = ET.parse(inputfile) root = tree.getroot() paths = root.findall('./svg:g/svg:path', ns) SVG1 = '''<?xml version="1.0" encoding="UTF-8" standalone="no"?> <svg xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" width="500" height="500"> <g> ''' SVG2 = ''' </g> </svg> ''' svgpath1 = '''<path style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" d="''' svgpath2 = ''' Z"/> ''' print(SVG1) for p in paths: new_path = Path() path = parse_path(p.attrib["d"]) start = None for px in list(frange(0, 1, decimal.Decimal('0.05'))): if start is None: start = path.point(float(px)) print(start) else: end = path.point(float(px)) new_path.append(Line(start, end)) start = end print(svgpath1 + new_path.d() + svgpath2) print(SVG2)
30.270833
391
0.659326
[ "MIT" ]
RodrigoD27avila/TCC2
tests/teste1/parse_svg.py
1,453
Python
"""This module contains common functions-helpers of the client and agents. Copyright (c) 2018 http://reportportal.io . Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import logging from pkg_resources import DistributionNotFound, get_distribution from platform import machine, processor, system logger = logging.getLogger(__name__) def gen_attributes(rp_attributes): """Generate list of attributes for the API request. Example of input list: ['tag_name:tag_value1', 'tag_value2'] Output of the function for the given input list: [{'key': 'tag_name', 'value': 'tag_value1'}, {'value': 'tag_value2'}] :param rp_attributes: List of attributes(tags) :return: Correctly created list of dictionaries to be passed to RP """ attrs = [] for rp_attr in rp_attributes: try: key, value = rp_attr.split(':') attr_dict = {'key': key, 'value': value} except ValueError as exc: logger.debug(str(exc)) attr_dict = {'value': rp_attr} if all(attr_dict.values()): attrs.append(attr_dict) continue logger.debug('Failed to process "{0}" attribute, attribute value' ' should not be empty.'.format(rp_attr)) return attrs def get_launch_sys_attrs(): """Generate attributes for the launch containing system information. :return: dict {'os': 'Windows', 'cpu': 'AMD', 'machine': 'Windows10_pc'} """ return { 'os': system(), 'cpu': processor() or 'unknown', 'machine': machine(), 'system': True # This one is the flag for RP to hide these attributes } def get_package_version(package_name): """Get version of the given package. :param package_name: Name of the package :return: Version of the package """ try: package_version = get_distribution(package_name).version except DistributionNotFound: package_version = 'not found' return package_version
33.285714
78
0.655482
[ "Apache-2.0" ]
jyejare/client-Python
reportportal_client/helpers.py
2,563
Python
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-09-08 12:24 from __future__ import unicode_literals from django.db import migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [("campusonline", "0010_auto_20171002_1450")] forward = [ """ CREATE FOREIGN TABLE "campusonline"."veranstaltungen" ( PK_LV numeric, REIHUNG numeric, TYP varchar, NUMMER varchar, TITEL varchar, DATUM timestamptz, ZEIT_VON timestamptz, ZEIT_BIS timestamptz, PK_GEB numeric, GEBAEUDE varchar, PK_RAUM numeric, RAUM varchar, RAUM_BEZ varchar, TERMINART varchar, ANZEIGE_BIS timestamptz ) SERVER sqlalchemy OPTIONS ( tablename 'VERANSTALTUNGEN_HEUTE_V', db_url '{}' ); """.format( settings.MULTICORN.get("campusonline") ), """ CREATE MATERIALIZED VIEW "public"."campusonline_event" AS SELECT md5(concat(reihung,typ,titel,datum,zeit_von,zeit_bis,pk_geb,pk_raum)) AS id, pk_lv::integer AS course_id, reihung::integer AS order, typ AS category, titel AS title, datum AS date, zeit_von AS start, zeit_bis AS end, pk_geb::integer AS building_id, pk_raum::integer AS room_id, anzeige_bis AS show_end FROM "campusonline"."veranstaltungen" WITH DATA; """, ] reverse = [ """ DROP MATERIALIZED VIEW IF EXISTS "public"."campusonline_event"; """, """ DROP FOREIGN TABLE IF EXISTS "campusonline"."veranstaltungen"; """, ] operations = [migrations.RunSQL(forward, reverse)]
28.954545
88
0.560963
[ "BSD-2-Clause" ]
medunigraz/outpost.django.campusonline
src/outpost/django/campusonline/migrations/0011_events.py
1,911
Python
def neighbord_analysis(x_as, column = 0): """ Given an array xas this function compute the distance between the elements the mean distance and the variance Author: Michele Monti Args: x_as: the name of the list or data set that you want: Kwargs: column: is the column of the data set that you need to analyze Returns: mean_distance: the mean distance between neighbords, std_dev: stdeviation of the distances between neighbords. diff_neighbor: the difference between the first-neighbours in a list """ x_as = np.array(x_as) correct_axis = x_as if shape(x_as) > 1: correct_axis = x_as[:,column] diff_neighbor = [itm - correct_axis[idx - 1] for idx, itm in enumerate(correct_axis)][1:] mean_distance = np.mean(diff_neighbor) std_dev = np.std(diff_neighbor) return(diff_neighbor, mean_distance, std_dev)
29.068966
110
0.742586
[ "BSD-2-Clause" ]
Repythory/Libraries
amolf/numerical_data_analysis/NeighbourAnalysis.py
843
Python
""" Copyright (c) 2018 Doyub Kim I am making my contributions/submissions to this project solely in my personal capacity and am not conveying any rights to any intellectual property of any third parties. """ import pyjet import unittest import numpy as np class ParticleSystemData2Tests(unittest.TestCase): def testInit(self): ps = pyjet.ParticleSystemData2() self.assertEqual(ps.numberOfParticles, 0) ps2 = pyjet.ParticleSystemData2(100) self.assertEqual(ps2.numberOfParticles, 100) def testResize(self): ps = pyjet.ParticleSystemData2() ps.resize(12) self.assertEqual(ps.numberOfParticles, 12) def testAddScalarData(self): ps = pyjet.ParticleSystemData2() ps.resize(12) a0 = ps.addScalarData(2.0) a1 = ps.addScalarData(9.0) self.assertEqual(ps.numberOfParticles, 12) self.assertEqual(a0, 0) self.assertEqual(a1, 1) as0 = np.array(ps.scalarDataAt(a0)) for val in as0: self.assertEqual(val, 2.0) as1 = np.array(ps.scalarDataAt(a1)) for val in as1: self.assertEqual(val, 9.0) def testAddVectorData(self): ps = pyjet.ParticleSystemData2() ps.resize(12) a0 = ps.addVectorData((2.0, 4.0)) a1 = ps.addVectorData((9.0, -2.0)) self.assertEqual(ps.numberOfParticles, 12) self.assertEqual(a0, 3) self.assertEqual(a1, 4) as0 = np.array(ps.vectorDataAt(a0)) for val in as0: self.assertEqual(val.tolist(), [2.0, 4.0]) as1 = np.array(ps.vectorDataAt(a1)) for val in as1: self.assertEqual(val.tolist(), [9.0, -2.0]) def testAddParticles(self): ps = pyjet.ParticleSystemData2() ps.resize(12) ps.addParticles([(1.0, 2.0), (4.0, 5.0)], [(7.0, 8.0), (8.0, 7.0)], [(5.0, 4.0), (2.0, 1.0)]) self.assertEqual(ps.numberOfParticles, 14) p = np.array(ps.positions) v = np.array(ps.velocities) f = np.array(ps.forces) self.assertEqual([1.0, 2.0], p[12].tolist()) self.assertEqual([4.0, 5.0], p[13].tolist()) self.assertEqual([7.0, 8.0], v[12].tolist()) self.assertEqual([8.0, 7.0], v[13].tolist()) self.assertEqual([5.0, 4.0], f[12].tolist()) self.assertEqual([2.0, 1.0], f[13].tolist()) class ParticleSystemData3Tests(unittest.TestCase): def testInit(self): ps = pyjet.ParticleSystemData3() self.assertEqual(ps.numberOfParticles, 0) ps2 = pyjet.ParticleSystemData3(100) self.assertEqual(ps2.numberOfParticles, 100) def testResize(self): ps = pyjet.ParticleSystemData3() ps.resize(12) self.assertEqual(ps.numberOfParticles, 12) def testAddScalarData(self): ps = pyjet.ParticleSystemData3() ps.resize(12) a0 = ps.addScalarData(2.0) a1 = ps.addScalarData(9.0) self.assertEqual(ps.numberOfParticles, 12) self.assertEqual(a0, 0) self.assertEqual(a1, 1) as0 = np.array(ps.scalarDataAt(a0)) for val in as0: self.assertEqual(val, 2.0) as1 = np.array(ps.scalarDataAt(a1)) for val in as1: self.assertEqual(val, 9.0) def testAddVectorData(self): ps = pyjet.ParticleSystemData3() ps.resize(12) a0 = ps.addVectorData((2.0, 4.0, -1.0)) a1 = ps.addVectorData((9.0, -2.0, 5.0)) self.assertEqual(ps.numberOfParticles, 12) self.assertEqual(a0, 3) self.assertEqual(a1, 4) as0 = np.array(ps.vectorDataAt(a0)) for val in as0: self.assertEqual(val.tolist(), [2.0, 4.0, -1.0]) as1 = np.array(ps.vectorDataAt(a1)) for val in as1: self.assertEqual(val.tolist(), [9.0, -2.0, 5.0]) def testAddParticles(self): ps = pyjet.ParticleSystemData3() ps.resize(12) ps.addParticles([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)], [(7.0, 8.0, 9.0), (8.0, 7.0, 6.0)], [(5.0, 4.0, 3.0), (2.0, 1.0, 3.0)]) self.assertEqual(ps.numberOfParticles, 14) p = np.array(ps.positions) v = np.array(ps.velocities) f = np.array(ps.forces) self.assertEqual([1.0, 2.0, 3.0], p[12].tolist()) self.assertEqual([4.0, 5.0, 6.0], p[13].tolist()) self.assertEqual([7.0, 8.0, 9.0], v[12].tolist()) self.assertEqual([8.0, 7.0, 6.0], v[13].tolist()) self.assertEqual([5.0, 4.0, 3.0], f[12].tolist()) self.assertEqual([2.0, 1.0, 3.0], f[13].tolist()) def main(): pyjet.Logging.mute() unittest.main() if __name__ == '__main__': main()
29.567901
78
0.571399
[ "MIT" ]
Whitemane/fluid-engine-dev
src/tests/python_tests/particle_system_data_tests.py
4,790
Python
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ The RPM-related plugins need to be composed in a specific way with one another, and with the plugin that handles shadowing proxied binaries. This here is the easiest implementation, which is simple at the cost of tight coupling. TECH DEBT ALERT: As we add support for other plugins and package managers, this will no longer be adequate. Let's be careful not to make this a kitchen-sink method, and instead devise a more flexible means of composing plugins. Specifically: - The repo server and versionlock plugins can be tightly coupled with no harm to maintainability (i.e. the implementations may stay the same, but could be hidden behind a tiny common wrapper like this one) - To combine the "shadowed proxied binaries" plugin with the package manager plugin(s), one would need a declaration layer for plugins, explicit or implicit sequencing for who gets to declare first, and an evaluation layer that consumes the declarations and outputs an `Iterable[NspawnPlugins]`. To make this more specific, this would likely involve giving a true class interface to the plugins, and using that to encode the desired dataflow. """ from typing import Iterable from antlir.common import set_new_key from antlir.fs_utils import ANTLIR_DIR, RPM_DEFAULT_SNAPSHOT_FOR_INSTALLER_DIR from antlir.nspawn_in_subvol.args import ( AttachAntlirDirMode, NspawnPluginArgs, _NspawnOpts, ) from antlir.nspawn_in_subvol.common import AttachAntlirDirError from . import NspawnPlugin from .attach_antlir_dir import AttachAntlirDir from .repo_servers import RepoServers from .shadow_paths import ShadowPaths from .yum_dnf_versionlock import YumDnfVersionlock def _get_snapshot_dir(opts: _NspawnOpts, plugin_args: NspawnPluginArgs): # Shadow RPM installers by default, when running as "root". It is ugly # to condition this on "root", but in practice, it is a significant # costs savings for non-root runs -- no need to start repo servers and # shadow bind mounts that (`sudo` aside) would not get used. if ( plugin_args.attach_antlir_dir != AttachAntlirDirMode.OFF and not opts.layer.path(ANTLIR_DIR).exists() and opts.subvolume_on_disk # pyre-fixme[16]: `SubvolumeOnDisk` has no attribute # `build_appliance_path`. and opts.subvolume_on_disk.build_appliance_path ): return ( opts.subvolume_on_disk.build_appliance_path / RPM_DEFAULT_SNAPSHOT_FOR_INSTALLER_DIR.strip_leading_slashes() ) if plugin_args.attach_antlir_dir == AttachAntlirDirMode.EXPLICIT_ON: raise AttachAntlirDirError( "ERROR: Could not attach antlir dir. Please" "check to make sure that you do not have an existing antlir " "directory in your image and that the image has a " "discoverable build appliance (usually through its flavor)." ) return opts.layer.path(RPM_DEFAULT_SNAPSHOT_FOR_INSTALLER_DIR) def rpm_nspawn_plugins( *, opts: _NspawnOpts, plugin_args: NspawnPluginArgs ) -> Iterable[NspawnPlugin]: serve_rpm_snapshots = set(plugin_args.serve_rpm_snapshots) shadow_paths = [*plugin_args.shadow_paths] default_snapshot_dir = _get_snapshot_dir(opts, plugin_args) shadow_paths_allow_unmatched = [] if ( plugin_args.shadow_proxied_binaries and opts.user.pw_name == "root" and default_snapshot_dir.exists() ): # This can run as non-root since `_set_up_rpm_repo_snapshots` makes # this a world-readable directory. for prog_name in sorted(default_snapshot_dir.listdir()): # Here, we need container, not host paths snapshot_dir = RPM_DEFAULT_SNAPSHOT_FOR_INSTALLER_DIR / prog_name serve_rpm_snapshots.add(snapshot_dir) shadow_paths.append( (prog_name, snapshot_dir / prog_name / "bin" / prog_name) ) if plugin_args.attach_antlir_dir == AttachAntlirDirMode.DEFAULT_ON: shadow_paths_allow_unmatched.append(prog_name) return ( # pyre-fixme[60]: Concatenation not yet support for multiple variadic # tuples:... *( [AttachAntlirDir()] # In default-on mode, do NOT try to attach the BA's `ANTLIR_DIR` # when the layer itself also has a `ANTLIR_DIR` -- first, this # would fail an assert in `AttachAntlirDir`, and second the # user likely wants to use the layer's `/__antlir__` anyway. if ( plugin_args.attach_antlir_dir == AttachAntlirDirMode.DEFAULT_ON and not opts.layer.path(ANTLIR_DIR).exists() ) or plugin_args.attach_antlir_dir == AttachAntlirDirMode.EXPLICIT_ON else [] ), # This handles `ShadowPaths` even though it's not # RPM-specific because the two integrate -- a stacked diff # will add a default behavior to shadow the OS # `yum` / `dnf` binaries with wrappers that talk to our # repo servers in `nspawn_in_subvol` containers. *( [ ShadowPaths( shadow_paths, shadow_paths_allow_unmatched, ) ] if shadow_paths else [] ), *( [ *( [ YumDnfVersionlock( plugin_args.snapshots_and_versionlocks, serve_rpm_snapshots, ) ] if plugin_args.snapshots_and_versionlocks else [] ), RepoServers(serve_rpm_snapshots), ] if serve_rpm_snapshots else () ), )
39.441558
79
0.659532
[ "MIT" ]
baioc/antlir
antlir/nspawn_in_subvol/plugins/rpm.py
6,074
Python
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import os from pants.backend.python.subsystems.ipython import IPython from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest from pants.backend.python.util_rules.pex import Pex, PexRequest from pants.backend.python.util_rules.pex_environment import PexEnvironment from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest from pants.backend.python.util_rules.python_sources import ( PythonSourceFiles, PythonSourceFilesRequest, ) from pants.core.goals.repl import ReplImplementation, ReplRequest from pants.engine.addresses import Addresses from pants.engine.fs import Digest, MergeDigests from pants.engine.rules import Get, MultiGet, collect_rules, rule from pants.engine.unions import UnionRule from pants.util.logging import LogLevel class PythonRepl(ReplImplementation): name = "python" @rule(level=LogLevel.DEBUG) async def create_python_repl_request(repl: PythonRepl, pex_env: PexEnvironment) -> ReplRequest: # Note that we get an intermediate PexRequest here (instead of going straight to a Pex) so # that we can get the interpreter constraints for use in local_dists_request. requirements_pex_request = await Get( PexRequest, PexFromTargetsRequest, PexFromTargetsRequest.for_requirements( (tgt.address for tgt in repl.targets), internal_only=True ), ) requirements_request = Get(Pex, PexRequest, requirements_pex_request) local_dists_request = Get( LocalDistsPex, LocalDistsPexRequest( Addresses(tgt.address for tgt in repl.targets), interpreter_constraints=requirements_pex_request.interpreter_constraints, ), ) sources_request = Get( PythonSourceFiles, PythonSourceFilesRequest(repl.targets, include_files=True) ) requirements_pex, local_dists, sources = await MultiGet( requirements_request, local_dists_request, sources_request ) merged_digest = await Get( Digest, MergeDigests( (requirements_pex.digest, local_dists.pex.digest, sources.source_files.snapshot.digest) ), ) complete_pex_env = pex_env.in_workspace() args = complete_pex_env.create_argv( repl.in_chroot(requirements_pex.name), python=requirements_pex.python ) chrooted_source_roots = [repl.in_chroot(sr) for sr in sources.source_roots] extra_env = { **complete_pex_env.environment_dict(python_configured=requirements_pex.python is not None), "PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots), "PEX_PATH": repl.in_chroot(local_dists.pex.name), } return ReplRequest(digest=merged_digest, args=args, extra_env=extra_env) class IPythonRepl(ReplImplementation): name = "ipython" @rule(level=LogLevel.DEBUG) async def create_ipython_repl_request( repl: IPythonRepl, ipython: IPython, pex_env: PexEnvironment ) -> ReplRequest: # Note that we get an intermediate PexRequest here (instead of going straight to a Pex) so # that we can get the interpreter constraints for use in ipython_request/local_dists_request. requirements_pex_request = await Get( PexRequest, PexFromTargetsRequest, PexFromTargetsRequest.for_requirements( (tgt.address for tgt in repl.targets), internal_only=True ), ) requirements_request = Get(Pex, PexRequest, requirements_pex_request) sources_request = Get( PythonSourceFiles, PythonSourceFilesRequest(repl.targets, include_files=True) ) ipython_request = Get( Pex, PexRequest( output_filename="ipython.pex", main=ipython.main, requirements=ipython.pex_requirements(), interpreter_constraints=requirements_pex_request.interpreter_constraints, internal_only=True, ), ) requirements_pex, sources, ipython_pex = await MultiGet( requirements_request, sources_request, ipython_request ) local_dists = await Get( LocalDistsPex, LocalDistsPexRequest( [tgt.address for tgt in repl.targets], interpreter_constraints=requirements_pex_request.interpreter_constraints, sources=sources, ), ) merged_digest = await Get( Digest, MergeDigests( ( requirements_pex.digest, local_dists.pex.digest, local_dists.remaining_sources.source_files.snapshot.digest, ipython_pex.digest, ) ), ) complete_pex_env = pex_env.in_workspace() args = list( complete_pex_env.create_argv(repl.in_chroot(ipython_pex.name), python=ipython_pex.python) ) if ipython.options.ignore_cwd: args.append("--ignore-cwd") chrooted_source_roots = [repl.in_chroot(sr) for sr in sources.source_roots] extra_env = { **complete_pex_env.environment_dict(python_configured=ipython_pex.python is not None), "PEX_PATH": os.pathsep.join( [ repl.in_chroot(requirements_pex_request.output_filename), repl.in_chroot(local_dists.pex.name), ] ), "PEX_EXTRA_SYS_PATH": os.pathsep.join(chrooted_source_roots), } return ReplRequest(digest=merged_digest, args=args, extra_env=extra_env) def rules(): return [ *collect_rules(), UnionRule(ReplImplementation, PythonRepl), UnionRule(ReplImplementation, IPythonRepl), ]
34.442424
99
0.705965
[ "Apache-2.0" ]
Eric-Arellano/pants
src/python/pants/backend/python/goals/repl.py
5,683
Python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Executes Keras benchmarks and accuracy tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl import flags from absl.testing import flagsaver import tensorflow as tf # pylint: disable=g-bad-import-order FLAGS = flags.FLAGS class KerasBenchmark(tf.test.Benchmark): """Base benchmark class with methods to simplify testing.""" local_flags = None def __init__(self, output_dir=None, default_flags=None, flag_methods=None): self.output_dir = output_dir self.default_flags = default_flags or {} self.flag_methods = flag_methods or {} if not output_dir: output_dir = '/tmp/' def _get_model_dir(self, folder_name): return os.path.join(self.output_dir, folder_name) def _setup(self): """Sets up and resets flags before each test.""" tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) if KerasBenchmark.local_flags is None: for flag_method in self.flag_methods: flag_method() # Loads flags to get defaults to then override. List cannot be empty. flags.FLAGS(['foo']) # Overrides flag values with defaults for the class of tests. for k, v in self.default_flags.items(): setattr(FLAGS, k, v) saved_flag_values = flagsaver.save_flag_values() KerasBenchmark.local_flags = saved_flag_values else: flagsaver.restore_flag_values(KerasBenchmark.local_flags) def _report_benchmark(self, stats, wall_time_sec, top_1_max=None, top_1_min=None, log_steps=None, total_batch_size=None, warmup=1): """Report benchmark results by writing to local protobuf file. Args: stats: dict returned from keras models with known entries. wall_time_sec: the during of the benchmark execution in seconds top_1_max: highest passing level for top_1 accuracy. top_1_min: lowest passing level for top_1 accuracy. log_steps: How often the log was created for stats['step_timestamp_log']. total_batch_size: Global batch-size. warmup: number of entries in stats['step_timestamp_log'] to ignore. """ metrics = [] if 'accuracy_top_1' in stats: metrics.append({'name': 'accuracy_top_1', 'value': stats['accuracy_top_1'], 'min_value': top_1_min, 'max_value': top_1_max}) metrics.append({'name': 'top_1_train_accuracy', 'value': stats['training_accuracy_top_1']}) if (warmup and 'step_timestamp_log' in stats and len(stats['step_timestamp_log']) > warmup): # first entry in the time_log is start of step 1. The rest of the # entries are the end of each step recorded time_log = stats['step_timestamp_log'] elapsed = time_log[-1].timestamp - time_log[warmup].timestamp num_examples = ( total_batch_size * log_steps * (len(time_log) - warmup - 1)) examples_per_sec = num_examples / elapsed metrics.append({'name': 'exp_per_second', 'value': examples_per_sec}) if 'avg_exp_per_second' in stats: metrics.append({'name': 'avg_exp_per_second', 'value': stats['avg_exp_per_second']}) self.report_benchmark(iters=-1, wall_time=wall_time_sec, metrics=metrics)
39.018692
80
0.659641
[ "Apache-2.0" ]
LinMiaoShuSheng/models
official/resnet/keras/keras_benchmark.py
4,175
Python
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test Package set up.""" __author__ = '[email protected] (Ali Afshar)' import oauth2client.util def setup_package(): """Run on testing package.""" oauth2client.util.positional_parameters_enforcement = 'EXCEPTION'
34.909091
74
0.75651
[ "Apache-2.0" ]
1ap/google-api-python-client
tests/__init__.py
768
Python
# -*- coding: utf-8 -*- # # pytest-dasktest documentation build configuration file, created by # sphinx-quickstart on Thu Oct 1 00:43:18 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.ifconfig', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pytest-dasktest' copyright = u'2015, Marius van Niekerk' author = u'Marius van Niekerk' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1.0' # The full version, including alpha/beta/rc tags. release = '0.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'pytest-cookiecutterplugin_namedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'pytest-cookiecutterplugin_name.tex', u'pytest-\\{\\{cookiecutter.plugin\\_name\\}\\} Documentation', u'\\{\\{cookiecutter.full\\_name\\}\\}', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pytest-cookiecutterplugin_name', u'pytest-dasktest Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'pytest-cookiecutterplugin_name', u'pytest-dasktest Documentation', author, 'pytest-cookiecutterplugin_name', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
32.801394
116
0.719567
[ "MIT" ]
mariusvniekerk/pytest-dask
docs/conf.py
9,414
Python
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import sys sys.path.append('./') from update import BasicUpdateBlock, SmallUpdateBlock from extractor import BasicEncoder, SmallEncoder from corr import CorrBlock, AlternateCorrBlock from util import bilinear_sampler, coords_grid, upflow8 try: autocast = torch.cuda.amp.autocast except: # dummy autocast for PyTorch < 1.6 class autocast: def __init__(self, enabled): pass def __enter__(self): pass def __exit__(self, *args): pass class RAFT(nn.Module): def __init__(self, args): super(RAFT, self).__init__() self.args = args if args.small: self.hidden_dim = hdim = 96 self.context_dim = cdim = 64 args.corr_levels = 4 args.corr_radius = 3 else: self.hidden_dim = hdim = 128 self.context_dim = cdim = 128 args.corr_levels = 4 args.corr_radius = 4 if 'dropout' not in self.args: self.args.dropout = 0 if 'alternate_corr' not in self.args: self.args.alternate_corr = False # feature network, context network, and update block if args.small: self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout) self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout) self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim) else: self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout) self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout) self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim) def freeze_bn(self): for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() def initialize_flow(self, img): """ Flow is represented as difference between two coordinate grids flow = coords1 - coords0""" N, C, H, W = img.shape coords0 = coords_grid(N, H//8, W//8).to(img.device) coords1 = coords_grid(N, H//8, W//8).to(img.device) # optical flow computed as difference: flow = coords1 - coords0 return coords0, coords1 def upsample_flow(self, flow, mask): """ Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """ N, _, H, W = flow.shape mask = mask.view(N, 1, 9, 8, 8, H, W) mask = torch.softmax(mask, dim=2) up_flow = F.unfold(8 * flow, [3,3], padding=1) up_flow = up_flow.view(N, 2, 9, 1, 1, H, W) up_flow = torch.sum(mask * up_flow, dim=2) up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) return up_flow.reshape(N, 2, 8*H, 8*W) def forward(self, image1): """ get featmap for one frame """ image1 = 2 * (image1 / 255.0) - 1.0 image1 = image1.contiguous() hdim = self.hidden_dim cdim = self.context_dim # run the feature network with autocast(enabled=self.args.mixed_precision): fmap1 = self.fnet(image1) fmap1 = fmap1.float() return fmap1 def old_forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False): """ Estimate optical flow between pair of frames """ image1 = 2 * (image1 / 255.0) - 1.0 image2 = 2 * (image2 / 255.0) - 1.0 image1 = image1.contiguous() image2 = image2.contiguous() hdim = self.hidden_dim cdim = self.context_dim # run the feature network with autocast(enabled=self.args.mixed_precision): fmap1, fmap2 = self.fnet([image1, image2]) fmap1 = fmap1.float() fmap2 = fmap2.float() if self.args.alternate_corr: corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius) else: corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius) # run the context network with autocast(enabled=self.args.mixed_precision): cnet = self.cnet(image1) net, inp = torch.split(cnet, [hdim, cdim], dim=1) net = torch.tanh(net) inp = torch.relu(inp) coords0, coords1 = self.initialize_flow(image1) if flow_init is not None: coords1 = coords1 + flow_init flow_predictions = [] for itr in range(iters): coords1 = coords1.detach() corr = corr_fn(coords1) # index correlation volume flow = coords1 - coords0 with autocast(enabled=self.args.mixed_precision): net, up_mask, delta_flow = self.update_block(net, inp, corr, flow) # F(t+1) = F(t) + \Delta(t) coords1 = coords1 + delta_flow # upsample predictions if up_mask is None: flow_up = upflow8(coords1 - coords0) else: flow_up = self.upsample_flow(coords1 - coords0, up_mask) flow_predictions.append(flow_up) if test_mode: corr = corr_fn(coords1) # index correlation volume # feat = torch.cat([inp, corr], dim=1) feat = inp return coords1 - coords0, flow_up, (feat, fmap1, fmap2) return flow_predictions
34.024691
102
0.583817
[ "MIT" ]
aharley/track_check_repeat
nets/raft_core/backraft.py
5,512
Python
#!/usr/bin/env python import subprocess from pathlib import Path from distutils.cmd import Command from setuptools import setup, find_packages # pylint: disable=unused-import import fastentrypoints # noqa: F401 # pylint: enable=unused-import import howdoi class Lint(Command): """A custom command to run Flake8 on all Python source files. """ description = 'run Flake8 on Python source files' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): commands = {'Flake8': 'flake8 --config=.flake8rc .'.split(), 'Pylint': 'pylint --rcfile=.pylintrc howdoi'.split()} for linter, command in commands.items(): try: print(f'\nRunning {linter}...') subprocess.check_call(command) print(f'No lint errors found by {linter}') except FileNotFoundError: print(f'{linter} not installed') except subprocess.CalledProcessError: pass def read(*names): values = {} for name in names: value = '' for extension in ('.txt', '.md'): filename = name + extension if Path(filename).is_file(): with open(filename) as in_file: # pylint: disable=unspecified-encoding value = in_file.read() break values[name] = value return values # pylint: disable=consider-using-f-string long_description = """ %(README)s # News %(CHANGES)s """ % read('README', 'CHANGES') # pylint: enable=consider-using-f-string setup( name='howdoi', version=howdoi.__version__, description='Instant coding answers via the command line', long_description=long_description, long_description_content_type='text/markdown', classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Documentation", ], keywords='howdoi help console command line answer', author='Benjamin Gleitzman', author_email='[email protected]', maintainer='Benjamin Gleitzman', maintainer_email='[email protected]', url='https://github.com/gleitz/howdoi', license='MIT', packages=find_packages(), entry_points={ 'console_scripts': [ 'howdoi = howdoi.howdoi:command_line_runner', ] }, install_requires=[ 'Pygments', 'cssselect', 'lxml', 'pyquery', 'requests', 'cachelib', 'appdirs', 'keep', ], cmdclass={ 'lint': Lint } )
26.627273
87
0.593377
[ "MIT" ]
AliRaza954/howdoi
setup.py
2,929
Python
from __future__ import print_function from __future__ import absolute_import # Copyright (c) 2003-2016 CORE Security Technologies # # This software is provided under under a slightly modified version # of the Apache Software License. See the accompanying LICENSE file # for more information. # # -*- mode: python; tab-width: 4 -*- # # Copyright (C) 2001 Michael Teo <[email protected]> # nmb.py - NetBIOS library # # This software is provided 'as-is', without any express or implied warranty. # In no event will the author be held liable for any damages arising from the # use of this software. # # Permission is granted to anyone to use this software for any purpose, # including commercial applications, and to alter it and redistribute it # freely, subject to the following restrictions: # # 1. The origin of this software must not be misrepresented; you must not # claim that you wrote the original software. If you use this software # in a product, an acknowledgment in the product documentation would be # appreciated but is not required. # # 2. Altered source versions must be plainly marked as such, and must not be # misrepresented as being the original software. # # 3. This notice cannot be removed or altered from any source distribution. # # Altered source done by Alberto Solino (@agsolino) import socket import string import re import select import errno from random import randint from struct import pack, unpack import time from .structure import Structure CVS_REVISION = '$Revision: 526 $' # Taken from socket module reference INADDR_ANY = '0.0.0.0' BROADCAST_ADDR = '<broadcast>' # Default port for NetBIOS name service NETBIOS_NS_PORT = 137 # Default port for NetBIOS session service NETBIOS_SESSION_PORT = 139 # Default port for SMB session service SMB_SESSION_PORT = 445 # Owner Node Type Constants NODE_B = 0x0000 NODE_P = 0x2000 NODE_M = 0x4000 NODE_RESERVED = 0x6000 NODE_GROUP = 0x8000 NODE_UNIQUE = 0x0 # Name Type Constants TYPE_UNKNOWN = 0x01 TYPE_WORKSTATION = 0x00 TYPE_CLIENT = 0x03 TYPE_SERVER = 0x20 TYPE_DOMAIN_MASTER = 0x1B TYPE_DOMAIN_CONTROLLER = 0x1C TYPE_MASTER_BROWSER = 0x1D TYPE_BROWSER = 0x1E TYPE_NETDDE = 0x1F TYPE_STATUS = 0x21 # Opcodes values OPCODE_QUERY = 0 OPCODE_REGISTRATION = 0x5 OPCODE_RELEASE = 0x6 OPCODE_WACK = 0x7 OPCODE_REFRESH = 0x8 OPCODE_REQUEST = 0 OPCODE_RESPONSE = 0x10 # NM_FLAGS NM_FLAGS_BROADCAST = 0x1 NM_FLAGS_UNICAST = 0 NM_FLAGS_RA = 0x8 NM_FLAGS_RD = 0x10 NM_FLAGS_TC = 0x20 NM_FLAGS_AA = 0x40 # QUESTION_TYPE QUESTION_TYPE_NB = 0x20 # NetBIOS general Name Service Resource Record QUESTION_TYPE_NBSTAT = 0x21 # NetBIOS NODE STATUS Resource Record # QUESTION_CLASS QUESTION_CLASS_IN = 0x1 # Internet class # RR_TYPE Resource Record Type code RR_TYPE_A = 0x1 # IP address Resource Record RR_TYPE_NS = 0x2 # Name Server Resource Record RR_TYPE_NULL = 0xA # NULL Resource Record RR_TYPE_NB = 0x20 # NetBIOS general Name Service Resource Record RR_TYPE_NBSTAT = 0x21 # NetBIOS NODE STATUS Resource Record # Resource Record Class RR_CLASS_IN = 1 # Internet class # RCODE values RCODE_FMT_ERR = 0x1 # Format Error. Request was invalidly formatted. RCODE_SRV_ERR = 0x2 # Server failure. Problem with NBNS, cannot process name. RCODE_IMP_ERR = 0x4 # Unsupported request error. Allowable only for challenging NBNS when gets an Update type # registration request. RCODE_RFS_ERR = 0x5 # Refused error. For policy reasons server will not register this name from this host. RCODE_ACT_ERR = 0x6 # Active error. Name is owned by another node. RCODE_CFT_ERR = 0x7 # Name in conflict error. A UNIQUE name is owned by more than one node. # NAME_FLAGS NAME_FLAGS_PRM = 0x0200 # Permanent Name Flag. If one (1) then entry is for the permanent node name. Flag is zero # (0) for all other names. NAME_FLAGS_ACT = 0x0400 # Active Name Flag. All entries have this flag set to one (1). NAME_FLAG_CNF = 0x0800 # Conflict Flag. If one (1) then name on this node is in conflict. NAME_FLAG_DRG = 0x1000 # Deregister Flag. If one (1) then this name is in the process of being deleted. NAME_TYPES = { TYPE_UNKNOWN: 'Unknown', TYPE_WORKSTATION: 'Workstation', TYPE_CLIENT: 'Client', TYPE_SERVER: 'Server', TYPE_MASTER_BROWSER: 'Master Browser', TYPE_BROWSER: 'Browser Server', TYPE_DOMAIN_MASTER: 'Domain Master' , TYPE_NETDDE: 'NetDDE Server'} # NetBIOS Session Types NETBIOS_SESSION_MESSAGE = 0x0 NETBIOS_SESSION_REQUEST = 0x81 NETBIOS_SESSION_POSITIVE_RESPONSE = 0x82 NETBIOS_SESSION_NEGATIVE_RESPONSE = 0x83 NETBIOS_SESSION_RETARGET_RESPONSE = 0x84 NETBIOS_SESSION_KEEP_ALIVE = 0x85 def strerror(errclass, errcode): if errclass == ERRCLASS_OS: return 'OS Error', str(errcode) elif errclass == ERRCLASS_QUERY: return 'Query Error', QUERY_ERRORS.get(errcode, 'Unknown error') elif errclass == ERRCLASS_SESSION: return 'Session Error', SESSION_ERRORS.get(errcode, 'Unknown error') else: return 'Unknown Error Class', 'Unknown Error' class NetBIOSError(Exception): pass class NetBIOSTimeout(Exception): def __init__(self, message = 'The NETBIOS connection with the remote host timed out.'): Exception.__init__(self, message) class NBResourceRecord: def __init__(self, data = 0): self._data = data try: if self._data: self.rr_name = (re.split('\x00',data))[0] offset = len(self.rr_name)+1 self.rr_type = unpack('>H', self._data[offset:offset+2])[0] self.rr_class = unpack('>H', self._data[offset+2: offset+4])[0] self.ttl = unpack('>L',self._data[offset+4:offset+8])[0] self.rdlength = unpack('>H', self._data[offset+8:offset+10])[0] self.rdata = self._data[offset+10:offset+10+self.rdlength] offset = self.rdlength - 2 self.unit_id = data[offset:offset+6] else: self.rr_name = '' self.rr_type = 0 self.rr_class = 0 self.ttl = 0 self.rdlength = 0 self.rdata = '' self.unit_id = '' except Exception: raise NetBIOSError( 'Wrong packet format ' ) def set_rr_name(self, name): self.rr_name = name def set_rr_type(self, name): self.rr_type = name def set_rr_class(self,cl): self.rr_class = cl def set_ttl(self,ttl): self.ttl = ttl def set_rdata(self,rdata): self.rdata = rdata self.rdlength = len(rdata) def get_unit_id(self): return self.unit_id def get_rr_name(self): return self.rr_name def get_rr_class(self): return self.rr_class def get_ttl(self): return self.ttl def get_rdlength(self): return self.rdlength def get_rdata(self): return self.rdata def rawData(self): return self.rr_name + pack('!HHLH',self.rr_type, self.rr_class, self.ttl, self.rdlength) + self.rdata class NBNodeStatusResponse(NBResourceRecord): def __init__(self, data = 0): NBResourceRecord.__init__(self,data) self.num_names = 0 self.node_names = [ ] self.statstics = '' self.mac = '00-00-00-00-00-00' try: if data: self._data = self.get_rdata() self.num_names = unpack('>B',self._data[:1])[0] offset = 1 for i in range(0, self.num_names): name = self._data[offset:offset + 15] type,flags = unpack('>BH', self._data[offset + 15: offset + 18]) offset += 18 self.node_names.append(NBNodeEntry(name, type ,flags)) self.set_mac_in_hexa(self.get_unit_id()) except Exception: raise NetBIOSError( 'Wrong packet format ' ) def set_mac_in_hexa(self, data): data_aux = '' for d in data: if data_aux == '': data_aux = '%02x' % ord(d) else: data_aux += '-%02x' % ord(d) self.mac = string.upper(data_aux) def get_num_names(self): return self.num_names def get_mac(self): return self.mac def set_num_names(self, num): self.num_names = num def get_node_names(self): return self.node_names def add_node_name(self,node_names): self.node_names.append(node_names) self.num_names += 1 def rawData(self): res = pack('!B', self.num_names ) for i in range(0, self.num_names): res += self.node_names[i].rawData() class NBPositiveNameQueryResponse(NBResourceRecord): def __init__(self, data = 0): NBResourceRecord.__init__(self, data) self.addr_entries = [ ] if data: self._data = self.get_rdata() _qn_length, qn_name, qn_scope = decode_name(data) self._netbios_name = string.rstrip(qn_name[:-1]) + qn_scope self._name_type = ord(qn_name[-1]) self._nb_flags = unpack('!H', self._data[:2]) offset = 2 while offset<len(self._data): self.addr_entries.append('%d.%d.%d.%d' % unpack('4B', (self._data[offset:offset+4]))) offset += 4 def get_netbios_name(self): return self._netbios_name def get_name_type(self): return self._name_type def get_addr_entries(self): return self.addr_entries class NetBIOSPacket: """ This is a packet as defined in RFC 1002 """ def __init__(self, data = 0): self.name_trn_id = 0x0 # Transaction ID for Name Service Transaction. # Requestor places a unique value for each active # transaction. Responder puts NAME_TRN_ID value # from request packet in response packet. self.opcode = 0 # Packet type code self.nm_flags = 0 # Flags for operation self.rcode = 0 # Result codes of request. self.qdcount = 0 # Unsigned 16 bit integer specifying the number of entries in the question section of a Name self.ancount = 0 # Unsigned 16 bit integer specifying the number of # resource records in the answer section of a Name # Service packet. self.nscount = 0 # Unsigned 16 bit integer specifying the number of # resource records in the authority section of a # Name Service packet. self.arcount = 0 # Unsigned 16 bit integer specifying the number of # resource records in the additional records # section of a Name Service packeT. self.questions = '' self.answers = '' if data == 0: self._data = '' else: try: self._data = data self.opcode = ord(data[2]) >> 3 self.nm_flags = ((ord(data[2]) & 0x3) << 4) | ((ord(data[3]) & 0xf0) >> 4) self.name_trn_id = unpack('>H', self._data[:2])[0] self.rcode = ord(data[3]) & 0x0f self.qdcount = unpack('>H', self._data[4:6])[0] self.ancount = unpack('>H', self._data[6:8])[0] self.nscount = unpack('>H', self._data[8:10])[0] self.arcount = unpack('>H', self._data[10:12])[0] self.answers = self._data[12:] except Exception: raise NetBIOSError( 'Wrong packet format ' ) def set_opcode(self, opcode): self.opcode = opcode def set_trn_id(self, trn): self.name_trn_id = trn def set_nm_flags(self, nm_flags): self.nm_flags = nm_flags def set_rcode(self, rcode): self.rcode = rcode def addQuestion(self, question, qtype, qclass): self.qdcount += 1 self.questions += question + pack('!HH',qtype,qclass) def get_trn_id(self): return self.name_trn_id def get_rcode(self): return self.rcode def get_nm_flags(self): return self.nm_flags def get_opcode(self): return self.opcode def get_qdcount(self): return self.qdcount def get_ancount(self): return self.ancount def get_nscount(self): return self.nscount def get_arcount(self): return self.arcount def rawData(self): secondWord = self.opcode << 11 secondWord |= self.nm_flags << 4 secondWord |= self.rcode data = pack('!HHHHHH', self.name_trn_id, secondWord , self.qdcount, self.ancount, self.nscount, self.arcount) + self.questions + self.answers return data def get_answers(self): return self.answers class NBHostEntry: def __init__(self, nbname, nametype, ip): self.__nbname = nbname self.__nametype = nametype self.__ip = ip def get_nbname(self): return self.__nbname def get_nametype(self): return self.__nametype def get_ip(self): return self.__ip def __repr__(self): return '<NBHostEntry instance: NBname="' + self.__nbname + '", IP="' + self.__ip + '">' class NBNodeEntry: def __init__(self, nbname, nametype, flags): self.__nbname = string.ljust(nbname,17) self.__nametype = nametype self.__flags = flags self.__isgroup = flags & 0x8000 self.__nodetype = flags & 0x6000 self.__deleting = flags & 0x1000 self.__isconflict = flags & 0x0800 self.__isactive = flags & 0x0400 self.__ispermanent = flags & 0x0200 def get_nbname(self): return self.__nbname def get_nametype(self): return self.__nametype def is_group(self): return self.__isgroup def get_nodetype(self): return self.__nodetype def is_deleting(self): return self.__deleting def is_conflict(self): return self.__isconflict def is_active(self): return self.__isactive def is_permanent(self): return self.__ispermanent def set_nbname(self, name): self.__nbname = string.ljust(name,17) def set_nametype(self, type): self.__nametype = type def set_flags(self,flags): self.__flags = flags def __repr__(self): s = '<NBNodeEntry instance: NBname="' + self.__nbname + '" NameType="' + NAME_TYPES[self.__nametype] + '"' if self.__isactive: s += ' ACTIVE' if self.__isgroup: s += ' GROUP' if self.__isconflict: s += ' CONFLICT' if self.__deleting: s += ' DELETING' return s def rawData(self): return self.__nbname + pack('!BH',self.__nametype, self.__flags) class NetBIOS: # Creates a NetBIOS instance without specifying any default NetBIOS domain nameserver. # All queries will be sent through the servport. def __init__(self, servport = NETBIOS_NS_PORT): self.__servport = NETBIOS_NS_PORT self.__nameserver = None self.__broadcastaddr = BROADCAST_ADDR self.mac = '00-00-00-00-00-00' def _setup_connection(self, dstaddr): port = randint(10000, 60000) af, socktype, proto, _canonname, _sa = socket.getaddrinfo(dstaddr, port, socket.AF_INET, socket.SOCK_DGRAM)[0] s = socket.socket(af, socktype, proto) has_bind = 1 for _i in range(0, 10): # We try to bind to a port for 10 tries try: s.bind(( INADDR_ANY, randint(10000, 60000) )) s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) has_bind = 1 except socket.error: pass if not has_bind: raise NetBIOSError( 'Cannot bind to a good UDP port', ERRCLASS_OS, errno.EAGAIN) self.__sock = s # Set the default NetBIOS domain nameserver. def set_nameserver(self, nameserver): self.__nameserver = nameserver # Return the default NetBIOS domain nameserver, or None if none is specified. def get_nameserver(self): return self.__nameserver # Set the broadcast address to be used for query. def set_broadcastaddr(self, broadcastaddr): self.__broadcastaddr = broadcastaddr # Return the broadcast address to be used, or BROADCAST_ADDR if default broadcast address is used. def get_broadcastaddr(self): return self.__broadcastaddr # Returns a NBPositiveNameQueryResponse instance containing the host information for nbname. # If a NetBIOS domain nameserver has been specified, it will be used for the query. # Otherwise, the query is broadcasted on the broadcast address. def gethostbyname(self, nbname, qtype = TYPE_WORKSTATION, scope = None, timeout = 1): return self.__queryname(nbname, self.__nameserver, qtype, scope, timeout) # Returns a list of NBNodeEntry instances containing node status information for nbname. # If destaddr contains an IP address, then this will become an unicast query on the destaddr. # Raises NetBIOSTimeout if timeout (in secs) is reached. # Raises NetBIOSError for other errors def getnodestatus(self, nbname, destaddr = None, type = TYPE_WORKSTATION, scope = None, timeout = 1): if destaddr: return self.__querynodestatus(nbname, destaddr, type, scope, timeout) else: return self.__querynodestatus(nbname, self.__nameserver, type, scope, timeout) def getnetbiosname(self, ip): entries = self.getnodestatus('*',ip) entries = filter(lambda x:x.get_nametype() == TYPE_SERVER, entries) return entries[0].get_nbname().strip() def getmacaddress(self): return self.mac def __queryname(self, nbname, destaddr, qtype, scope, timeout, retries = 0): self._setup_connection(destaddr) trn_id = randint(1, 32000) p = NetBIOSPacket() p.set_trn_id(trn_id) netbios_name = nbname.upper() qn_label = encode_name(netbios_name, qtype, scope) p.addQuestion(qn_label, QUESTION_TYPE_NB, QUESTION_CLASS_IN) p.set_nm_flags(NM_FLAGS_RD) if not destaddr: p.set_nm_flags(p.get_nm_flags() | NM_FLAGS_BROADCAST) destaddr = self.__broadcastaddr req = p.rawData() tries = retries while 1: self.__sock.sendto(req, ( destaddr, self.__servport )) try: ready, _, _ = select.select([ self.__sock.fileno() ], [ ] , [ ], timeout) if not ready: if tries: # Retry again until tries == 0 tries -= 1 else: raise NetBIOSTimeout else: data, _ = self.__sock.recvfrom(65536, 0) res = NetBIOSPacket(data) if res.get_trn_id() == p.get_trn_id(): if res.get_rcode(): if res.get_rcode() == 0x03: return None else: raise NetBIOSError( 'Negative name query response', ERRCLASS_QUERY, res.get_rcode()) if res.get_ancount() != 1: raise NetBIOSError( 'Malformed response') return NBPositiveNameQueryResponse(res.get_answers()) except select.error as ex: if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: raise NetBIOSError( 'Error occurs while waiting for response', ERRCLASS_OS, ex[0]) raise def __querynodestatus(self, nbname, destaddr, type, scope, timeout): self._setup_connection(destaddr) trn_id = randint(1, 32000) p = NetBIOSPacket() p.set_trn_id(trn_id) netbios_name = string.upper(nbname) qn_label = encode_name(netbios_name, type, scope) p.addQuestion(qn_label, QUESTION_TYPE_NBSTAT, QUESTION_CLASS_IN) if not destaddr: p.set_nm_flags(NM_FLAGS_BROADCAST) destaddr = self.__broadcastaddr req = p.rawData() tries = 3 while 1: try: self.__sock.sendto(req, 0, ( destaddr, self.__servport )) ready, _, _ = select.select([ self.__sock.fileno() ], [ ] , [ ], timeout) if not ready: if tries: # Retry again until tries == 0 tries -= 1 else: raise NetBIOSTimeout else: try: data, _ = self.__sock.recvfrom(65536, 0) except Exception as e: raise NetBIOSError("recvfrom error: %s" % str(e)) self.__sock.close() res = NetBIOSPacket(data) if res.get_trn_id() == p.get_trn_id(): if res.get_rcode(): if res.get_rcode() == 0x03: # I'm just guessing here raise NetBIOSError("Cannot get data from server") else: raise NetBIOSError( 'Negative name query response', ERRCLASS_QUERY, res.get_rcode()) answ = NBNodeStatusResponse(res.get_answers()) self.mac = answ.get_mac() return answ.get_node_names() except select.error as ex: if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: raise NetBIOSError( 'Error occurs while waiting for response', ERRCLASS_OS, ex[0]) except socket.error as ex: raise NetBIOSError('Connection error: %s' % str(ex)) # Perform first and second level encoding of name as specified in RFC 1001 (Section 4) def encode_name(name, type, scope): if name == '*': name += '\0' * 15 elif len(name) > 15: name = name[:15] + chr(type) else: name = string.ljust(name, 15) + chr(type) encoded_name = chr(len(name) * 2) + re.sub('.', _do_first_level_encoding, name) if scope: encoded_scope = '' for s in string.split(scope, '.'): encoded_scope = encoded_scope + chr(len(s)) + s return encoded_name + encoded_scope + '\0' else: return encoded_name + '\0' # Internal method for use in encode_name() def _do_first_level_encoding(m): s = ord(m.group(0)) return string.uppercase[s >> 4] + string.uppercase[s & 0x0f] def decode_name(name): name_length = ord(name[0]) assert name_length == 32 decoded_name = re.sub('..', _do_first_level_decoding, name[1:33]) if name[33] == '\0': return 34, decoded_name, '' else: decoded_domain = '' offset = 34 while 1: domain_length = ord(name[offset]) if domain_length == 0: break decoded_domain = '.' + name[offset:offset + domain_length] offset += domain_length return offset + 1, decoded_name, decoded_domain def _do_first_level_decoding(m): s = m.group(0) return chr(((ord(s[0]) - ord('A')) << 4) | (ord(s[1]) - ord('A'))) class NetBIOSSessionPacket: def __init__(self, data = 0): self.type = 0x0 self.flags = 0x0 self.length = 0x0 if data == 0: self._trailer = '' else: try: self.type = ord(data[0]) if self.type == NETBIOS_SESSION_MESSAGE: self.length = ord(data[1]) << 16 | (unpack('!H', data[2:4])[0]) else: self.flags = ord(data[1]) self.length = unpack('!H', data[2:4])[0] self._trailer = data[4:] except: raise NetBIOSError( 'Wrong packet format ' ) def set_type(self, type): self.type = type def get_type(self): return self.type def rawData(self): if self.type == NETBIOS_SESSION_MESSAGE: data = pack('!BBH',self.type,self.length >> 16,self.length & 0xFFFF) + self._trailer else: data = pack('!BBH',self.type,self.flags,self.length) + self._trailer return data def set_trailer(self,data): self._trailer = data self.length = len(data) def get_length(self): return self.length def get_trailer(self): return self._trailer class NetBIOSSession: def __init__(self, myname, remote_name, remote_host, remote_type = TYPE_SERVER, sess_port = NETBIOS_SESSION_PORT, timeout = None, local_type = TYPE_WORKSTATION, sock = None): if len(myname) > 15: self.__myname = string.upper(myname[:15]) else: self.__myname = string.upper(myname) self.__local_type = local_type assert remote_name # if destination port SMB_SESSION_PORT and remote name *SMBSERVER, we're changing it to its IP address # helping solving the client mistake ;) if remote_name == '*SMBSERVER' and sess_port == SMB_SESSION_PORT: remote_name = remote_host # If remote name is *SMBSERVER let's try to query its name.. if can't be guessed, continue and hope for the best if remote_name == '*SMBSERVER': nb = NetBIOS() try: res = nb.getnetbiosname(remote_host) except: res = None pass if res is not None: remote_name = res if len(remote_name) > 15: self.__remote_name = string.upper(remote_name[:15]) else: self.__remote_name = string.upper(remote_name) self.__remote_type = remote_type self.__remote_host = remote_host if sock is not None: # We are acting as a server self._sock = sock else: self._sock = self._setup_connection((remote_host, sess_port)) if sess_port == NETBIOS_SESSION_PORT: self._request_session(remote_type, local_type, timeout) def get_myname(self): return self.__myname def get_mytype(self): return self.__local_type def get_remote_host(self): return self.__remote_host def get_remote_name(self): return self.__remote_name def get_remote_type(self): return self.__remote_type def close(self): self._sock.close() def get_socket(self): return self._sock class NetBIOSUDPSessionPacket(Structure): TYPE_DIRECT_UNIQUE = 16 TYPE_DIRECT_GROUP = 17 FLAGS_MORE_FRAGMENTS = 1 FLAGS_FIRST_FRAGMENT = 2 FLAGS_B_NODE = 0 structure = ( ('Type','B=16'), # Direct Unique Datagram ('Flags','B=2'), # FLAGS_FIRST_FRAGMENT ('ID','<H'), ('_SourceIP','>L'), ('SourceIP','"'), ('SourcePort','>H=138'), ('DataLegth','>H-Data'), ('Offset','>H=0'), ('SourceName','z'), ('DestinationName','z'), ('Data',':'), ) def getData(self): addr = self['SourceIP'].split('.') addr = [int(x) for x in addr] addr = (((addr[0] << 8) + addr[1] << 8) + addr[2] << 8) + addr[3] self['_SourceIP'] = addr return Structure.getData(self) def get_trailer(self): return self['Data'] class NetBIOSUDPSession(NetBIOSSession): def _setup_connection(self, peer): af, socktype, proto, canonname, sa = socket.getaddrinfo(peer[0], peer[1], 0, socket.SOCK_DGRAM)[0] sock = socket.socket(af, socktype, proto) sock.connect(sa) sock = socket.socket(af, socktype, proto) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((INADDR_ANY, 138)) self.peer = peer return sock def _request_session(self, remote_type, local_type, timeout = None): pass def next_id(self): if hasattr(self, '__dgram_id'): answer = self.__dgram_id else: self.__dgram_id = randint(1,65535) answer = self.__dgram_id self.__dgram_id += 1 return answer def send_packet(self, data): # Yes... I know... self._sock.connect(self.peer) p = NetBIOSUDPSessionPacket() p['ID'] = self.next_id() p['SourceIP'] = self._sock.getsockname()[0] p['SourceName'] = encode_name(self.get_myname(), self.get_mytype(), '')[:-1] p['DestinationName'] = encode_name(self.get_remote_name(), self.get_remote_type(), '')[:-1] p['Data'] = data self._sock.sendto(str(p), self.peer) self._sock.close() self._sock = self._setup_connection(self.peer) def recv_packet(self, timeout = None): # The next loop is a workaround for a bigger problem: # When data reaches higher layers, the lower headers are lost, # and with them, for example, the source IP. Hence, SMB users # can't know where packets are comming from... we need a better # solution, right now, we will filter everything except packets # coming from the remote_host specified in __init__() while 1: data, peer = self._sock.recvfrom(8192) # print "peer: %r self.peer: %r" % (peer, self.peer) if peer == self.peer: break return NetBIOSUDPSessionPacket(data) class NetBIOSTCPSession(NetBIOSSession): def __init__(self, myname, remote_name, remote_host, remote_type = TYPE_SERVER, sess_port = NETBIOS_SESSION_PORT, timeout = None, local_type = TYPE_WORKSTATION, sock = None, select_poll = False): self.__select_poll = select_poll if self.__select_poll: self.read_function = self.polling_read else: self.read_function = self.non_polling_read NetBIOSSession.__init__(self, myname, remote_name, remote_host, remote_type = remote_type, sess_port = sess_port, timeout = timeout, local_type = local_type, sock=sock) def _setup_connection(self, peer): try: af, socktype, proto, canonname, sa = socket.getaddrinfo(peer[0], peer[1], 0, socket.SOCK_STREAM)[0] sock = socket.socket(af, socktype, proto) sock.connect(sa) except socket.error as e: raise socket.error("Connection error (%s:%s)" % (peer[0], peer[1]), e) return sock def send_packet(self, data): p = NetBIOSSessionPacket() p.set_type(NETBIOS_SESSION_MESSAGE) p.set_trailer(data) self._sock.send(p.rawData()) def recv_packet(self, timeout = None): data = self.__read(timeout) return NetBIOSSessionPacket(data) def _request_session(self, remote_type, local_type, timeout = None): p = NetBIOSSessionPacket() remote_name = encode_name(self.get_remote_name(), remote_type, '') myname = encode_name(self.get_myname(), local_type, '') p.set_type(NETBIOS_SESSION_REQUEST) p.set_trailer(remote_name + myname) self._sock.send(p.rawData()) while 1: p = self.recv_packet(timeout) if p.get_type() == NETBIOS_SESSION_NEGATIVE_RESPONSE: raise NetBIOSError( 'Cannot request session', ERRCLASS_SESSION, ord(p.get_trailer()[0])) elif p.get_type() == NETBIOS_SESSION_POSITIVE_RESPONSE: break else: # Ignore all other messages, most probably keepalive messages pass def polling_read(self, read_length, timeout): data = '' if timeout is None: timeout = 3600 time_left = timeout CHUNK_TIME = 0.025 bytes_left = read_length while bytes_left > 0: try: ready, _, _ = select.select([self._sock.fileno() ], [ ], [ ], 0) if not ready: if time_left <= 0: raise NetBIOSTimeout else: time.sleep(CHUNK_TIME) time_left -= CHUNK_TIME continue received = self._sock.recv(bytes_left) if len(received) == 0: raise NetBIOSError( 'Error while reading from remote', ERRCLASS_OS, None) data = data + received bytes_left = read_length - len(data) except select.error as ex: if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: raise NetBIOSError( 'Error occurs while reading from remote', ERRCLASS_OS, ex[0]) return data def non_polling_read(self, read_length, timeout): data = '' bytes_left = read_length while bytes_left > 0: try: ready, _, _ = select.select([self._sock.fileno() ], [ ], [ ], timeout) if not ready: raise NetBIOSTimeout received = self._sock.recv(bytes_left) if len(received) == 0: raise NetBIOSError( 'Error while reading from remote', ERRCLASS_OS, None) data = data + received bytes_left = read_length - len(data) except select.error as ex: if ex[0] != errno.EINTR and ex[0] != errno.EAGAIN: raise NetBIOSError( 'Error occurs while reading from remote', ERRCLASS_OS, ex[0]) return data def __read(self, timeout = None): data = self.read_function(4, timeout) type, flags, length = unpack('>ccH', data) if ord(type) == NETBIOS_SESSION_MESSAGE: length |= ord(flags) << 16 else: if ord(flags) & 0x01: length |= 0x10000 data2 = self.read_function(length, timeout) return data + data2 ERRCLASS_QUERY = 0x00 ERRCLASS_SESSION = 0xf0 ERRCLASS_OS = 0xff QUERY_ERRORS = { 0x01: 'Request format error. Please file a bug report.', 0x02: 'Internal server error', 0x03: 'Name does not exist', 0x04: 'Unsupported request', 0x05: 'Request refused' } SESSION_ERRORS = { 0x80: 'Not listening on called name', 0x81: 'Not listening for calling name', 0x82: 'Called name not present', 0x83: 'Sufficient resources', 0x8f: 'Unspecified error' } def main(): def get_netbios_host_by_name(name): n = NetBIOS() n.set_broadcastaddr('255.255.255.255') # To avoid use "<broadcast>" in socket for qtype in (TYPE_WORKSTATION, TYPE_CLIENT, TYPE_SERVER, TYPE_DOMAIN_MASTER, TYPE_DOMAIN_CONTROLLER): try: addrs = n.gethostbyname(name, qtype = qtype).get_addr_entries() except NetBIOSTimeout: continue else: return addrs raise Exception("Host not found") n = get_netbios_host_by_name("some-host") print(n) if __name__ == '__main__': main()
36.561546
199
0.589649
[ "BSD-3-Clause" ]
AymenSe/turicreate
deps/src/curl-7.65.1/tests/python_dependencies/impacket/nmb.py
35,940
Python
#! /usr/bin/env python # coding:utf-8 import unittest from kovot.response import Response from kovot.response import ResponseTransformer from kovot.response import ResponseSelector class ResponseTest(unittest.TestCase): def test_response(self): text = "京都にいます" score = 1.2 res = Response(text=text, score=score) self.assertEqual(res.text, text) self.assertEqual(res.score, score) class TransformerTest(unittest.TestCase): def test_transformer(self): text = "京都にいます" score = 1.2 res = Response(text=text, score=score) transformer = ResponseTransformer() self.assertEqual(transformer.transform(res), res) class SelectorTest(unittest.TestCase): def test_select(self): x = Response(text="ひとつめ", score=1.2) y = Response(text="ふたつめ", score=3.2) z = Response(text="みっつめ", score=0.8) selector = ResponseSelector() self.assertEqual(selector.select([x, y, z]), [y, x, z]) def test_select_with_num(self): x = Response(text="ひとつめ", score=1.2) y = Response(text="ふたつめ", score=3.2) z = Response(text="みっつめ", score=0.8) selector = ResponseSelector() self.assertEqual(selector.select([x, y, z], num=2), [y, x])
29.840909
63
0.626809
[ "MIT" ]
kazh98/kovot
test/test_response.py
1,385
Python
class RGB: def __init__(self, red=0, green=0, blue=0): self.r = 0 self.g = 0 self.b = 0 self.red = red self.green = green self.blue = blue @property def red(self): return self.r @red.setter def red(self, value): if isinstance(value, int): if 0 <= value <= 255: self.r = value else: raise ValueError("Int value of R is out of range 0-255: {0}".format(value)) elif isinstance(value, float): if 0.0 <= value <= 1.0: self.r = int(value*255) else: raise ValueError("Float value of R is out of range 0.0-1.0: {0}".format(value)) else: raise TypeError("Color must be int or float") @property def red_float(self): return self.r/255.0 @property def green(self): return self.g @green.setter def green(self, value): if isinstance(value, int): if 0 <= value <= 255: self.g = value else: raise ValueError("Int value of G is out of range 0-255: {0}".format(value)) elif isinstance(value, float): if 0.0 <= value <= 1.0: self.g = int(value*255) else: raise ValueError("Float value of G is out of range 0.0-1.0: {0}".format(value)) else: raise TypeError("Color must be int or float") @property def green_float(self): return self.g/255.0 @property def blue(self): return self.b @blue.setter def blue(self, value): if isinstance(value, int): if 0 <= value <= 255: self.b = value else: raise ValueError("Int value of B is out of range 0-255: {0}".format(value)) elif isinstance(value, float): if 0.0 <= value <= 1.0: self.b = int(value*255) else: raise ValueError("Float value of B is out of range 0.0-1.0: {0}".format(value)) else: raise TypeError("Color must be int or float") @property def blue_float(self): return self.b/255.0
28.329114
95
0.503575
[ "MIT" ]
reggyred/siiembeddedsign
src/embled/colors.py
2,238
Python
import random # create the initial array regionsEMEA = ["Central Eastern Europe", "France", "Germany", "Middle East / Africa", "United Kingdom", "Western Europe"] # randomly pick region after region num = len(regionsEMEA) for x in range(num): numRegions = len(regionsEMEA) pos = random.randint(0,numRegions-1) selected = regionsEMEA[pos] print(selected) regionsEMEA.pop(pos)
28.357143
121
0.712846
[ "MIT" ]
jansche/EMEARegionsRandomizer
randEMEA.py
397
Python
import collections from typing import Iterator import itertools from stream_lib.stream_api import Stream, T class ItertoolsStream(Stream[T]): @staticmethod def stream(*iterables: Iterator[T]): if len(iterables) == 1: return ItertoolsStream(*iterables) else: return ItertoolsStream(itertools.zip_longest(*iterables)) def __init__(self, delegate: Iterator[T]): assert isinstance(delegate, collections.Iterable) if not isinstance(delegate, collections.Iterator): delegate = iter(delegate) self._delegate = delegate def __iter__(self): self._delegate = iter(self._delegate) return self._delegate def __next__(self): return next(self._delegate) def map(self, func): return self._stream(map(func, self)) def flatmap(self, func): return self.map(func).flatten() def flatten(self): return self._stream(itertools.chain.from_iterable(self)) def filter(self, predicate): return self._stream(filter(predicate, self)) def slice(self, start, stop, step=1): return self._stream(itertools.islice(self, start, stop, step)) def limit(self, size): return self.slice(0, size)
26.829787
70
0.660587
[ "MIT" ]
flegac/deep-experiments
stream-lib/stream_lib/itertools_stream.py
1,261
Python
from .history.pyplot_history import pyplot_history from .history.plotly_history import plotly_history def plot_history(history, engine="pyplot", **kwargs): if engine == "pyplot": return pyplot_history(history, **kwargs) elif engine == "plotly": return plotly_history(history, **kwargs) else: raise Exception("Unknown plotting engine")
31.083333
53
0.710456
[ "MIT" ]
krzpiesiewicz/pytorch-fit
pytorch_fit/visuals/plot.py
373
Python
import logging from typing import Callable, TypeVar, List, Optional, Dict import ray from ray.exceptions import RayActorError from ray.util.sgd.v2.worker_group import WorkerGroup from ray.util.sgd.v2.session import init_session, get_session, shutdown_session T = TypeVar("T") logger = logging.getLogger(__name__) class BackendConfig: """Parent class for configurations of training backend.""" @property def backend_cls(self): raise NotImplementedError class SGDBackendError(Exception): """Errors with BackendExecutor that should not be exposed to user.""" class BackendExecutor: """Main execution class for training backends. This class holds a worker group and is responsible for executing the training function on the workers, and collecting intermediate results from ``sgd.report()``. Args: backend_config (BackendConfig): The configurations for this specific backend. num_workers (int): Number of workers to use for training. num_cpus_per_worker (float): Number of CPUs to use per worker. num_gpus_per_worker (float): Number of GPUs to use per worker. """ def __init__(self, backend_config: BackendConfig, num_workers: int = 1, num_cpus_per_worker: float = 1, num_gpus_per_worker: float = 0): self._backend_config = backend_config self._backend = self._backend_config.backend_cls() self._num_workers = num_workers self._num_cpus_per_worker = num_cpus_per_worker self._num_gpus_per_worker = num_gpus_per_worker self.worker_group = InactiveWorkerGroup() def start(self, initialization_hook: Optional[Callable[[], None]] = None): """Starts the worker group.""" self.worker_group = WorkerGroup(self._num_workers, self._num_cpus_per_worker, self._num_gpus_per_worker) if initialization_hook: self.worker_group.execute(initialization_hook) self._backend.on_start(self.worker_group, self._backend_config) def start_training(self, train_func: Callable[[], T]) -> None: """Executes a training function on all workers in a separate thread. ``finish_training`` should be called after this. Args: train_func (Callable): The training function to run on each worker. """ # First initialize the session. def initialize_session(world_rank, train_func): try: init_session(training_func=train_func, world_rank=world_rank) except ValueError: raise SGDBackendError( "Attempting to start training but a " "previous training run is still ongoing. " "You must call `finish_training` before " "calling `start_training` again.") futures = [] for world_rank in range(len(self.worker_group)): futures.append( self.worker_group.execute_single_async( world_rank, initialize_session, world_rank=world_rank, train_func=train_func)) ray.get(futures) # Run the training function asynchronously in its own thread. def train_async(): session = get_session() session.start() self.worker_group.execute_async(train_async) def fetch_next_result(self) -> Optional[List[Dict]]: """Fetch next results produced by ``sgd.report()`` from each worker. Assumes ``start_training`` has already been called. Returns: A list of dictionaries of values passed to ``sgd.report()`` from each worker. Each item corresponds to an intermediate result a single worker. If there are no more items to fetch, returns None. """ def get_next(): # Get the session for this worker. try: session = get_session() except ValueError: # Session is not initialized yet. raise SGDBackendError("`fetch_next_result` has been called " "before `start_training`. Please call " "`start_training` before " "`fetch_next_result`.") try: result = session.get_next() except RuntimeError: # Training thread has not been started yet. raise SGDBackendError("`fetch_next_result` has been called " "before `start_training`. Please call " "`start_training` before " "`fetch_next_result`.") return result futures = self.worker_group.execute_async(get_next) results = self.get_with_failure_handling(futures) # Check if any worker returned None. if any(r is None for r in results): # Either all workers have results or none of them do. if not all(r is None for r in results): raise RuntimeError("Some workers returned results while " "others didn't. Make sure that " "`sgd.report()` is called the same number " "of times on all workers.") else: results = None return results def finish_training(self) -> List[T]: """Finish training and return final results. Propagate any exceptions. Blocks until training is finished on all workers. Assumes `start_training` has already been called. Returns: A list of return values from calling ``train_func`` on each worker. Each item corresponds to the return value from a single worker. """ def end_training(): # Get the session for this worker. try: session = get_session() except ValueError: # Session is not initialized yet. raise SGDBackendError("`finish_training` has been called " "before `start_training`. Please call " "`start_training` before " "`finish_training`.") try: # session.finish raises any Exceptions from training. output = session.finish() finally: # Shutdown session even if session.finish() raises an # Exception. shutdown_session() return output futures = self.worker_group.execute_async(end_training) return self.get_with_failure_handling(futures) def get_with_failure_handling(self, remote_values): """Gets the remote values while handling for worker failures. Args: remote_values (list): List of object refs representing functions that may fail in the middle of execution. For example, running a SGD training loop in multiple parallel actor calls. Returns: The resolved objects represented by the passed in ObjectRefs. """ unfinished = remote_values try: while len(unfinished) > 0: finished, unfinished = ray.wait(unfinished) # If a failure occurs the ObjectRef will be marked as finished. # Calling ray.get will expose the failure as a RayActorError. ray.get(finished) except RayActorError as exc: logger.exception(str(exc)) self.handle_failure() return return ray.get(remote_values) def handle_failure(self): # TODO: Fault-tolerance/elastic training here. self.shutdown() raise RuntimeError("Worker crashed during training. " "Training unsuccessful.") def shutdown(self): """Shuts down the workers in the worker group.""" try: self._backend.on_shutdown(self.worker_group, self._backend_config) except RayActorError: logger.warning("Graceful shutdown of backend failed. This is " "expected if one of the workers has crashed.") self.worker_group.shutdown() self.worker_group = InactiveWorkerGroup() class BackendInterface: def on_start(self, worker_group: WorkerGroup, backend_config: BackendConfig): raise NotImplementedError def on_shutdown(self, worker_group: WorkerGroup, backend_config: BackendConfig): raise NotImplementedError class InactiveWorkerGroupError(Exception): """Raised when underlying worker group is inactive.""" class InactiveWorkerGroup(): # TODO: fix inheritence. perhaps create WorkerGroupInterface. def __getattribute__(self, *args, **kwargs): raise InactiveWorkerGroupError() def __len__(self): raise InactiveWorkerGroupError()
37.365462
79
0.592863
[ "Apache-2.0" ]
cuongnvan/ray
python/ray/util/sgd/v2/backends/backend.py
9,304
Python
from django.test import TestCase from authors.apps.authentication.models import User class UserModelTest(TestCase): """ Test Suite for the User model class, User authentication. """ def test_create_user(self): """ Test User model can create a user successfully """ self.assertIsInstance( User.objects.create_user(username="username", email="[email protected]", password="password"), User)
27.736842
64
0.580645
[ "BSD-3-Clause" ]
andela/ah-bird-box
authors/apps/authentication/tests/test_create_user.py
527
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- # MIT License # # Copyright 2018-2020 New York University Abu Dhabi # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """The CAMeL Tools transliteration utility. Usage: camel_transliterate (-s SCHEME | --scheme=SCHEME) [-m MARKER | --marker=MARKER] [-I | --ignore-markers] [-S | --strip-markers] [-o OUTPUT | --output=OUTPUT] [FILE] camel_transliterate (-l | --list) camel_transliterate (-v | --version) camel_transliterate (-h | --help) Options: -s SCHEME --scheme Scheme used for transliteration. -o OUTPUT --output=OUTPUT Output file. If not specified, output will be printed to stdout. -m MARKER --marker=MARKER Marker used to prefix tokens not to be transliterated. [default: @@IGNORE@@] -I --ignore-markers Transliterate marked words as well. -S --strip-markers Remove markers in output. -l --list Show a list of available transliteration schemes. -h --help Show this screen. -v --version Show version. """ from __future__ import print_function, absolute_import import sys from docopt import docopt import six import camel_tools as camelt from camel_tools.utils.stringutils import force_encoding, force_unicode from camel_tools.utils.charmap import CharMapper from camel_tools.utils.transliterate import Transliterator __version__ = camelt.__version__ _BUILTIN_SCHEMES = [ ('ar2bw', 'Arabic to Buckwalter'), ('ar2safebw', 'Arabic to Safe Buckwalter'), ('ar2xmlbw', 'Arabic to XML Buckwalter'), ('ar2hsb', 'Arabic to Habash-Soudi-Buckwalter'), ('bw2ar', 'Buckwalter to Arabic'), ('bw2safebw', 'Buckwalter to Safe Buckwalter'), ('bw2xmlbw', 'Buckwalter to XML Buckwalter'), ('bw2hsb', 'Buckwalter to Habash-Soudi-Buckwalter'), ('safebw2ar', 'Safe Buckwalter to Arabic'), ('safebw2bw', 'Safe Buckwalter to Buckwalter'), ('safebw2xmlbw', 'Safe Buckwalter to XML Buckwalter'), ('safebw2hsb', 'Safe Buckwalter to Habash-Soudi-Buckwalter'), ('xmlbw2ar', 'XML Buckwalter to Arabic'), ('xmlbw2bw', 'XML Buckwalter to Buckwalter'), ('xmlbw2safebw', 'XML Buckwalter to Safe Buckwalter'), ('xmlbw2hsb', 'XML Buckwalter to Habash-Soudi-Buckwalter'), ('hsb2ar', 'Habash-Soudi-Buckwalter to Arabic'), ('hsb2bw', 'Habash-Soudi-Buckwalter to Buckwalter'), ('hsb2safebw', 'Habash-Soudi-Buckwalter to Safe Buckwalter'), ('hsb2xmlbw', 'Habash-Soudi-Buckwalter to Habash-Soudi-Buckwalter'), ] def _open_files(finpath, foutpath): if finpath is None: fin = sys.stdin else: try: fin = open(finpath, 'r', encoding='utf-8') except OSError: sys.stderr.write('Error: Couldn\'t open input file {}.' '\n'.format(repr(finpath))) sys.exit(1) if foutpath is None: fout = sys.stdout else: try: fout = open(foutpath, 'w', encoding='utf-8') except OSError: sys.stderr.write('Error: Couldn\'t open output file {}.' '\n'.format(repr(foutpath))) if finpath is not None: fin.close() sys.exit(1) return fin, fout def main(): # pragma: no cover try: version = ('CAMeL Tools v{}'.format(__version__)) arguments = docopt(__doc__, version=version) if arguments['--list']: for scheme in _BUILTIN_SCHEMES: print("{} {}".format(scheme[0].ljust(20), scheme[1])) sys.exit(0) if arguments['--scheme'] is not None: if arguments['--scheme'] not in [s[0] for s in _BUILTIN_SCHEMES]: sys.stderr.write('Error: {} is not a valid scheme.\n' 'Run `camel_transliterate -l` to see the list' ' of available schemes.' '\n'.format(repr(arguments['--scheme']))) sys.exit(1) if arguments['--marker'] is None: marker = '@@IGNORE@@' else: marker = arguments['--marker'] ignore_markers = arguments['--ignore-markers'] strip_markers = arguments['--strip-markers'] # Open files (or just use stdin and stdout) fin, fout = _open_files(arguments['FILE'], arguments['--output']) # Load the CharMapper and initialize a Transliterator with it try: mapper = CharMapper.builtin_mapper(arguments['--scheme']) trans = Transliterator(mapper, marker) except Exception: # pylint: disable=W0703 sys.stderr.write('Error: Could not load builtin scheme' ' {}.\n'.format(repr(arguments['--scheme']))) sys.exit(1) # Transliterate lines try: for line in fin: line = force_unicode(line) if six.PY3: fout.write( trans.transliterate(line, strip_markers, ignore_markers)) else: fout.write( force_encoding( trans.transliterate(line, strip_markers, ignore_markers))) fout.flush() # If everything worked so far, this shouldn't happen except Exception: # pylint: disable=W0703 sys.stderr.write('Error: An unkown error occured during ' 'transliteration.\n') sys.exit(1) # Cleanup if arguments['FILE'] is not None: fin.close() if arguments['--output'] is not None: fout.close() sys.exit(0) except KeyboardInterrupt: sys.stderr.write('Exiting...\n') sys.exit(1) except Exception: sys.stderr.write('Error: An unknown error occurred.\n') sys.exit(1) if __name__ == '__main__': # pragma: no cover main()
36.40796
79
0.584859
[ "MIT" ]
AhmedYounes94/Camel_tools
camel_tools/cli/camel_transliterate.py
7,318
Python
# -*- coding: utf-8 -*- """ Created on Mon Mar 7 16:41:25 2011 @author: - """ import os; import time; import sys; import plot_pca_functions; import numpy as np import matplotlib.pyplot as plt import math taylor_error_capitol= 0.608546356589; pca_error_9_capitol = 0.614236131016; #at 10% sample-training taylor_error_downtown= 0.248427497809; #this is for downtown12_12_4! pca_error_9_downtown = 0.193806624247; #this is for downtown3_3_1! fig = plt.figure();
20.304348
68
0.755889
[ "BSD-2-Clause" ]
mirestrepo/voxels-at-lems
bvpl/bvpl_octree/taylor_vs_pca.py
467
Python
# encoding: UTF-8 import os print u'load {0}/*'.format(os.path.dirname(__file__)) # 默认设置 from chinese import text # 是否要使用英文 from vnpy.trader.vtGlobal import globalSetting if globalSetting['language'] == 'english': from english import text
20.416667
53
0.742857
[ "MIT" ]
CjoneL/vnpy
vnpy/trader/gateway/ctpGateway/language/__init__.py
267
Python
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Testing RandomColor op in DE """ import numpy as np import pytest import mindspore.dataset as ds import mindspore.dataset.transforms.py_transforms import mindspore.dataset.vision.c_transforms as vision import mindspore.dataset.vision.py_transforms as F from mindspore import log as logger from util import visualize_list, diff_mse, save_and_check_md5, \ config_get_set_seed, config_get_set_num_parallel_workers DATA_DIR = "../data/dataset/testImageNetData/train/" C_DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] C_SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" MNIST_DATA_DIR = "../data/dataset/testMnistData" GENERATE_GOLDEN = False def test_random_color_py(degrees=(0.1, 1.9), plot=False): """ Test Python RandomColor """ logger.info("Test RandomColor") # Original Images data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Resize((224, 224)), F.ToTensor()]) ds_original = data.map(operations=transforms_original, input_columns="image") ds_original = ds_original.batch(512) for idx, (image, _) in enumerate(ds_original): if idx == 0: images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1)) else: images_original = np.append(images_original, np.transpose(image.asnumpy(), (0, 2, 3, 1)), axis=0) # Random Color Adjusted Images data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_random_color = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Resize((224, 224)), F.RandomColor(degrees=degrees), F.ToTensor()]) ds_random_color = data.map(operations=transforms_random_color, input_columns="image") ds_random_color = ds_random_color.batch(512) for idx, (image, _) in enumerate(ds_random_color): if idx == 0: images_random_color = np.transpose(image.asnumpy(), (0, 2, 3, 1)) else: images_random_color = np.append(images_random_color, np.transpose(image.asnumpy(), (0, 2, 3, 1)), axis=0) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = diff_mse(images_random_color[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize_list(images_original, images_random_color) def test_random_color_c(degrees=(0.1, 1.9), plot=False, run_golden=True): """ Test Cpp RandomColor """ logger.info("test_random_color_op") original_seed = config_get_set_seed(10) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # Decode with rgb format set to True data1 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=["image"], shuffle=False) # Serialize and Load dataset requires using vision.Decode instead of vision.Decode(). if degrees is None: c_op = vision.RandomColor() else: c_op = vision.RandomColor(degrees) data1 = data1.map(operations=[vision.Decode()], input_columns=["image"]) data2 = data2.map(operations=[vision.Decode(), c_op], input_columns=["image"]) image_random_color_op = [] image = [] for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True), data2.create_dict_iterator(num_epochs=1, output_numpy=True)): actual = item1["image"] expected = item2["image"] image.append(actual) image_random_color_op.append(expected) if run_golden: # Compare with expected md5 from images filename = "random_color_op_02_result.npz" save_and_check_md5(data2, filename, generate_golden=GENERATE_GOLDEN) if plot: visualize_list(image, image_random_color_op) # Restore configuration ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers((original_num_parallel_workers)) def test_random_color_py_md5(): """ Test Python RandomColor with md5 check """ logger.info("Test RandomColor with md5 check") original_seed = config_get_set_seed(10) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # Generate dataset data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.RandomColor((2.0, 2.5)), F.ToTensor()]) data = data.map(operations=transforms, input_columns="image") # Compare with expected md5 from images filename = "random_color_01_result.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) # Restore configuration ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers((original_num_parallel_workers)) def test_compare_random_color_op(degrees=None, plot=False): """ Compare Random Color op in Python and Cpp """ logger.info("test_random_color_op") original_seed = config_get_set_seed(5) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # Decode with rgb format set to True data1 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(C_DATA_DIR, C_SCHEMA_DIR, columns_list=["image"], shuffle=False) if degrees is None: c_op = vision.RandomColor() p_op = F.RandomColor() else: c_op = vision.RandomColor(degrees) p_op = F.RandomColor(degrees) transforms_random_color_py = mindspore.dataset.transforms.py_transforms.Compose( [lambda img: img.astype(np.uint8), F.ToPIL(), p_op, np.array]) data1 = data1.map(operations=[vision.Decode(), c_op], input_columns=["image"]) data2 = data2.map(operations=[vision.Decode()], input_columns=["image"]) data2 = data2.map(operations=transforms_random_color_py, input_columns=["image"]) image_random_color_op = [] image = [] for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True), data2.create_dict_iterator(num_epochs=1, output_numpy=True)): actual = item1["image"] expected = item2["image"] image_random_color_op.append(actual) image.append(expected) assert actual.shape == expected.shape mse = diff_mse(actual, expected) logger.info("MSE= {}".format(str(np.mean(mse)))) # Restore configuration ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers) if plot: visualize_list(image, image_random_color_op) def test_random_color_c_errors(): """ Test that Cpp RandomColor errors with bad input """ with pytest.raises(TypeError) as error_info: vision.RandomColor((12)) assert "degrees must be either a tuple or a list." in str(error_info.value) with pytest.raises(TypeError) as error_info: vision.RandomColor(("col", 3)) assert "Argument degrees[0] with value col is not of type (<class 'int'>, <class 'float'>)." in str( error_info.value) with pytest.raises(ValueError) as error_info: vision.RandomColor((0.9, 0.1)) assert "degrees should be in (min,max) format. Got (max,min)." in str(error_info.value) with pytest.raises(ValueError) as error_info: vision.RandomColor((0.9,)) assert "degrees must be a sequence with length 2." in str(error_info.value) # RandomColor Cpp Op will fail with one channel input mnist_ds = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False) mnist_ds = mnist_ds.map(operations=vision.RandomColor(), input_columns="image") with pytest.raises(RuntimeError) as error_info: for _ in enumerate(mnist_ds): pass assert "image shape is not <H,W,C> or channel is not 3" in str(error_info.value) if __name__ == "__main__": test_random_color_py() test_random_color_py(plot=True) test_random_color_py(degrees=(2.0, 2.5), plot=True) # Test with degree values that show more obvious transformation test_random_color_py_md5() test_random_color_c() test_random_color_c(plot=True) test_random_color_c(degrees=(2.0, 2.5), plot=True, run_golden=False) # Test with degree values that show more obvious transformation test_random_color_c(degrees=(0.1, 0.1), plot=True, run_golden=False) test_compare_random_color_op(plot=True) test_random_color_c_errors()
38.96124
120
0.65579
[ "Apache-2.0" ]
king4arabs/mindspore
tests/ut/python/dataset/test_random_color.py
10,052
Python
# -*- coding: utf-8 -*- check_state = 0 d = {} p = [] e = [] m = [] n = int(input()) for _ in range(n): ln = input().split() d[ln[0]] = (int(ln[1]), int(ln[2]), int(ln[3])) p.append(int(ln[1])) e.append(int(ln[2])) m.append(int(ln[3])) while True: if check_state == 0: if p.count(max(p)) == 1: for k in d: if d[k][0] == max(p): print(k) break break else: del_list = [] for k in d: if d[k][0] != max(p): p.remove(d[k][0]) e.remove(d[k][1]) m.remove(d[k][2]) del_list.append(k) for k in del_list: del d[k] if check_state == 1: if e.count(max(e)) == 1: for k in d: if d[k][1] == max(e): print(k) break break else: del_list = [] for k in d: if d[k][1] != max(e): p.remove(d[k][0]) e.remove(d[k][1]) m.remove(d[k][2]) del_list.append(k) for k in del_list: del d[k] if check_state == 2: if m.count(min(m)) == 1: for k in d: if d[k][2] == min(m): print(k) break break else: del_list = [] for k in d: if d[k][2] != min(m): p.remove(d[k][0]) e.remove(d[k][1]) m.remove(d[k][2]) del_list.append(k) for k in del_list: del d[k] # Ordem lexicográfica é a mesma coisa que afabética nesse caso keys = sorted(d.keys()) print(keys[0]) break check_state += 1
17.413223
74
0.320361
[ "MIT" ]
ErFer7/URI-Python
2654.py
2,110
Python
import numpy as np from math import * import pymultinest import sys sys.path.insert(0, '/home/kochenma/pysb') from pysb.integrate import Solver import csv import datetime import time as tm from model_550 import model from pysb.pathfinder import set_path set_path('bng', '/home/kochenma/BioNetGen') data_object = [] with open('earm_data.csv') as data_file: reader = csv.reader(data_file) line = list(reader) for each in line: data_object.append(each) for i, each in enumerate(data_object): if i > 0: for j, item in enumerate(each): data_object[i][j] = float(data_object[i][j]) data_object = data_object[1:] time = [] for each in data_object: time.append(float(each[0])) model_solver = Solver(model, time, integrator='vode', integrator_options={'atol': 1e-12, 'rtol': 1e-12}) def prior(cube, ndim, nparams): for k, every in enumerate(model.parameters): if every.name[-3:] == '1kf': cube[k] = cube[k]*4 - 4 if every.name[-3:] == '2kf': cube[k] = cube[k]*4 - 8 if every.name[-3:] == '1kr': cube[k] = cube[k]*4 - 4 if every.name[-3:] == '1kc': cube[k] = cube[k]*4 - 1 postfixes = ['1kf', '2kf', '1kr', '1kc'] def loglike(cube, ndim, nparams): point = [] cube_index = 0 for k, every in enumerate(model.parameters): if every.name[-3:] in postfixes: point.append(10**cube[cube_index]) cube_index += 1 else: point.append(model.parameters[k].value) model_solver.run(point) failed = False for every in model_solver.yobs: for thing in every: if thing <= -0.00000001 or np.isnan(thing): failed = True if failed: return ['fail', -10000.0] else: parpc = model_solver.yobs[-1][6]/(model_solver.yobs[-1][1] + model_solver.yobs[-1][6]) if (parpc > 0.0) and (parpc < 1.00000001): print log(parpc), point return ['sim', log(parpc)] else: return ['fail', -10000.0] n_params = 0 for m, lotsa in enumerate(model.parameters): if lotsa.name[-3:] == '1kf': n_params += 1 if lotsa.name[-3:] == '2kf': n_params += 1 if lotsa.name[-3:] == '1kr': n_params += 1 if lotsa.name[-3:] == '1kc': n_params += 1 start_time = tm.clock() counts = [0, 0] pymultinest.run(loglike, prior, n_params, evidence_tolerance=0.0001, n_live_points=16000, log_zero=-1e3, sampling_efficiency=0.3, outputfiles_basename='/scratch/kochenma/log_casp_act/550/', resume = False, verbose = False, counts=counts) print counts print 'start time', start_time print 'end time', tm.clock()
25.610526
237
0.671599
[ "MIT" ]
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
log_casp_act/run_model_550.py
2,433
Python
# encoding: utf-8 """ parse_process.py Created by Thomas Mangin on 2015-06-05. Copyright (c) 2009-2017 Exa Networks. All rights reserved. License: 3-clause BSD. (See the COPYRIGHT file) """ import time import copy from collections import defaultdict from exabgp.configuration.core import Section from exabgp.configuration.parser import boolean from exabgp.configuration.neighbor.parser import processes class _ParseDirection (Section): action = { 'parsed': 'set-command', 'packets': 'set-command', 'consolidate': 'set-command', 'open': 'set-command', 'update': 'set-command', 'notification': 'set-command', 'keepalive': 'set-command', 'refresh': 'set-command', 'operational': 'set-command', } known = { 'parsed': boolean, 'packets': boolean, 'consolidate': boolean, 'open': boolean, 'update': boolean, 'notification': boolean, 'keepalive': boolean, 'refresh': boolean, 'operational': boolean, } default = { 'parsed': True, 'packets': True, 'consolidate': True, 'open': True, 'update': True, 'notification': True, 'keepalive': True, 'refresh': True, 'operational': True, } syntax = '{\n %s;\n}' % ';\n '.join(default.keys()) def __init__ (self, tokeniser, scope, error, logger): Section.__init__(self,tokeniser,scope,error,logger) def clear (self): pass def pre (self): return True def post (self): return True class ParseSend (_ParseDirection): syntax = \ 'send %s' % _ParseDirection.syntax name = 'api/send' class ParseReceive (_ParseDirection): syntax = \ 'receive %s' % _ParseDirection.syntax name = 'api/receive' class ParseAPI (Section): syntax = \ 'process {\n' \ ' processes [ name-of-processes ];\n' \ ' neighbor-changes;\n' \ ' %s\n' \ ' %s\n' \ '}' % ( '\n '.join(ParseSend.syntax.split('\n')), '\n '.join(ParseReceive.syntax.split('\n')) ) known = { 'processes': processes, 'neighbor-changes': boolean, 'negotiated': boolean, 'fsm': boolean, 'signal': boolean, } action = { 'processes': 'set-command', 'neighbor-changes': 'set-command', 'negotiated': 'set-command', 'fsm': 'set-command', 'signal': 'set-command', } default = { 'neighbor-changes': True, 'negotiated': True, 'fsm': True, 'signal': True, } DEFAULT_API = { 'neighbor-changes': [], 'negotiated': [], 'fsm': [], 'signal': [], 'processes': [], } name = 'api' def __init__ (self, tokeniser, scope, error, logger): Section.__init__(self,tokeniser,scope,error,logger) self.api = {} self.named = '' @classmethod def _empty (cls): return copy.deepcopy(cls.DEFAULT_API) def clear (self): self.api = {} self.named = '' pass def pre (self): named = self.tokeniser.iterate() self.named = named if named else 'auto-named-%d' % int(time.time()*1000000) self.check_name(self.named) self.scope.enter(self.named) self.scope.to_context() return True def post (self): self.scope.leave() self.scope.to_context() return True @classmethod def flatten (cls,apis): built = cls._empty() for api in apis.values(): procs = api.get('processes',[]) built.setdefault('processes',[]).extend(procs) for command in ('neighbor-changes','negotiated','fsm','signal'): built.setdefault(command,[]).extend(procs if api.get(command,False) else []) for direction in ('send','receive'): data = api.get(direction,{}) for action in ('parsed','packets','consolidate','open', 'update', 'notification', 'keepalive', 'refresh', 'operational'): built.setdefault("%s-%s" % (direction,action),[]).extend(procs if data.get(action,False) else []) return built for way in ('send','receive'): for name in ('parsed','packets','consolidate','open', 'update', 'notification', 'keepalive', 'refresh', 'operational'): ParseAPI.DEFAULT_API["%s-%s" % (way,name)] = []
22.755556
125
0.608154
[ "BSD-3-Clause" ]
RIPE-NCC/exabgp
lib/exabgp/configuration/neighbor/api.py
4,096
Python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for boosted_trees.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_boosted_trees_ops from tensorflow.python.ops import resources # Re-exporting ops used by other modules. # pylint: disable=unused-import from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_aggregate_stats from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_bucketize from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_feature_split as calculate_best_feature_split from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_feature_split_v2 as calculate_best_feature_split_v2 from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_gains_per_feature as calculate_best_gains_per_feature from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_center_bias as center_bias from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_create_quantile_stream_resource as create_quantile_stream_resource from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_example_debug_outputs as example_debug_outputs from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_quantile_summaries as make_quantile_summaries from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_stats_summary as make_stats_summary from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_predict as predict from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_add_summaries as quantile_add_summaries from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_deserialize as quantile_resource_deserialize from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_flush as quantile_flush from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_get_bucket_boundaries as get_bucket_boundaries from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_handle_op as quantile_resource_handle_op from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_sparse_aggregate_stats from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_sparse_calculate_best_feature_split as sparse_calculate_best_feature_split from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_training_predict as training_predict from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble as update_ensemble from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble_v2 as update_ensemble_v2 from tensorflow.python.ops.gen_boosted_trees_ops import is_boosted_trees_quantile_stream_resource_initialized as is_quantile_resource_initialized # pylint: enable=unused-import from tensorflow.python.training import saver from tensorflow.python.training.tracking import tracking class PruningMode(object): """Class for working with Pruning modes.""" NO_PRUNING, PRE_PRUNING, POST_PRUNING = range(0, 3) _map = {'none': NO_PRUNING, 'pre': PRE_PRUNING, 'post': POST_PRUNING} @classmethod def from_str(cls, mode): if mode in cls._map: return cls._map[mode] else: raise ValueError( 'pruning_mode mode must be one of: {}. Found: {}'.format(', '.join( sorted(cls._map)), mode)) class QuantileAccumulatorSaveable(saver.BaseSaverBuilder.SaveableObject): """SaveableObject implementation for QuantileAccumulator.""" def __init__(self, resource_handle, create_op, num_streams, name): self._resource_handle = resource_handle self._num_streams = num_streams self._create_op = create_op bucket_boundaries = get_bucket_boundaries(self._resource_handle, self._num_streams) slice_spec = '' specs = [] def make_save_spec(tensor, suffix): return saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name + suffix) for i in range(self._num_streams): specs += [ make_save_spec(bucket_boundaries[i], '_bucket_boundaries_' + str(i)) ] super(QuantileAccumulatorSaveable, self).__init__(self._resource_handle, specs, name) def restore(self, restored_tensors, unused_tensor_shapes): bucket_boundaries = restored_tensors with ops.control_dependencies([self._create_op]): return quantile_resource_deserialize( self._resource_handle, bucket_boundaries=bucket_boundaries) class QuantileAccumulator(tracking.TrackableResource): """SaveableObject implementation for QuantileAccumulator. The bucket boundaries are serialized and deserialized from checkpointing. """ def __init__(self, epsilon, num_streams, num_quantiles, name=None, max_elements=None): self._eps = epsilon self._num_streams = num_streams self._num_quantiles = num_quantiles super(QuantileAccumulator, self).__init__() with ops.name_scope(name, 'QuantileAccumulator') as name: self._name = name self._resource_handle = self._create_resource() self._init_op = self._initialize() is_initialized_op = self.is_initialized() resources.register_resource(self.resource_handle, self._init_op, is_initialized_op) self._saveable = QuantileAccumulatorSaveable( self.resource_handle, self._init_op, self._num_streams, self.resource_handle.name) ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable) def _create_resource(self): return quantile_resource_handle_op( container='', shared_name=self._name, name=self._name) def _initialize(self): return create_quantile_stream_resource(self.resource_handle, self._eps, self._num_streams) @property def initializer(self): if self._init_op is None: self._init_op = self._initialize() return self._init_op def is_initialized(self): return is_quantile_resource_initialized(self.resource_handle) @property def saveable(self): return self._saveable def _gather_saveables_for_checkpoint(self): return {'quantile_accumulator', self._saveable} def add_summaries(self, float_columns, example_weights): summaries = make_quantile_summaries(float_columns, example_weights, self._eps) summary_op = quantile_add_summaries(self.resource_handle, summaries) return summary_op def flush(self): return quantile_flush(self.resource_handle, self._num_quantiles) def get_bucket_boundaries(self): return get_bucket_boundaries(self.resource_handle, self._num_streams) class _TreeEnsembleSavable(saver.BaseSaverBuilder.SaveableObject): """SaveableObject implementation for TreeEnsemble.""" def __init__(self, resource_handle, create_op, name): """Creates a _TreeEnsembleSavable object. Args: resource_handle: handle to the decision tree ensemble variable. create_op: the op to initialize the variable. name: the name to save the tree ensemble variable under. """ stamp_token, serialized = ( gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle)) # slice_spec is useful for saving a slice from a variable. # It's not meaningful the tree ensemble variable. So we just pass an empty # value. slice_spec = '' specs = [ saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec, name + '_stamp'), saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec, name + '_serialized'), ] super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name) self._resource_handle = resource_handle self._create_op = create_op def restore(self, restored_tensors, unused_restored_shapes): """Restores the associated tree ensemble from 'restored_tensors'. Args: restored_tensors: the tensors that were loaded from a checkpoint. unused_restored_shapes: the shapes this object should conform to after restore. Not meaningful for trees. Returns: The operation that restores the state of the tree ensemble variable. """ with ops.control_dependencies([self._create_op]): return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble( self._resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1]) class TreeEnsemble(tracking.TrackableResource): """Creates TreeEnsemble resource.""" def __init__(self, name, stamp_token=0, is_local=False, serialized_proto=''): self._stamp_token = stamp_token self._serialized_proto = serialized_proto self._is_local = is_local with ops.name_scope(name, 'TreeEnsemble') as name: self._name = name self._resource_handle = self._create_resource() self._init_op = self._initialize() is_initialized_op = self.is_initialized() # Adds the variable to the savable list. if not is_local: self._saveable = _TreeEnsembleSavable( self.resource_handle, self.initializer, self.resource_handle.name) ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable) resources.register_resource( self.resource_handle, self.initializer, is_initialized_op, is_shared=not is_local) def _create_resource(self): return gen_boosted_trees_ops.boosted_trees_ensemble_resource_handle_op( container='', shared_name=self._name, name=self._name) def _initialize(self): return gen_boosted_trees_ops.boosted_trees_create_ensemble( self.resource_handle, self._stamp_token, tree_ensemble_serialized=self._serialized_proto) @property def initializer(self): if self._init_op is None: self._init_op = self._initialize() return self._init_op def is_initialized(self): return gen_boosted_trees_ops.is_boosted_trees_ensemble_initialized( self.resource_handle) def _gather_saveables_for_checkpoint(self): if not self._is_local: return {'tree_ensemble': self._saveable} def get_stamp_token(self): """Returns the current stamp token of the resource.""" stamp_token, _, _, _, _ = ( gen_boosted_trees_ops.boosted_trees_get_ensemble_states( self.resource_handle)) return stamp_token def get_states(self): """Returns states of the tree ensemble. Returns: stamp_token, num_trees, num_finalized_trees, num_attempted_layers and range of the nodes in the latest layer. """ (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, nodes_range) = ( gen_boosted_trees_ops.boosted_trees_get_ensemble_states( self.resource_handle)) # Use identity to give names. return (array_ops.identity(stamp_token, name='stamp_token'), array_ops.identity(num_trees, name='num_trees'), array_ops.identity(num_finalized_trees, name='num_finalized_trees'), array_ops.identity( num_attempted_layers, name='num_attempted_layers'), array_ops.identity(nodes_range, name='last_layer_nodes_range')) def serialize(self): """Serializes the ensemble into proto and returns the serialized proto. Returns: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto. """ return gen_boosted_trees_ops.boosted_trees_serialize_ensemble( self.resource_handle) def deserialize(self, stamp_token, serialized_proto): """Deserialize the input proto and resets the ensemble from it. Args: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto. Returns: Operation (for dependencies). """ return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble( self.resource_handle, stamp_token, serialized_proto)
42.996732
145
0.751995
[ "Apache-2.0" ]
AnyaTracy/tensorflow
tensorflow/python/ops/boosted_trees_ops.py
13,157
Python
# -------------- # Code starts here import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import Imputer from sklearn.preprocessing import LabelEncoder import numpy as np from scipy.stats import skew #### Data 1 # Load the data df = pd.read_csv(path) # Overview of the data df.info() df.describe() # Histogram showing distribution of car prices df['price'].plot.hist(bins=12,alpha =0.5) # Countplot of the make column df['make'].value_counts().plot(kind='bar') # Jointplot showing relationship between 'horsepower' and 'price' of the car df.plot.scatter(x='horsepower',y='price',c='blue') # Correlation heat map f = plt.figure(figsize=(19, 15)) plt.matshow(df.corr(), fignum=f.number) plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=45) plt.yticks(range(df.shape[1]), df.columns, fontsize=14) cb = plt.colorbar() cb.ax.tick_params(labelsize=14) plt.title('Correlation Matrix', fontsize=16); # boxplot that shows the variability of each 'body-style' with respect to the 'price' df.boxplot(column=['price'],by=['body-style']) #### Data 2 # Load the data df2 = pd.read_csv(path2) # Impute missing values with mean df2 = df2.replace("?","NaN") mean_imputer = Imputer(missing_values='NaN',strategy='mean',axis=0) df2['normalized-losses'] = mean_imputer.fit_transform(df2[['normalized-losses']]) df2['horsepower'] = mean_imputer.fit_transform(df2[['horsepower']]) # Skewness of numeric features num_cols = df2._get_numeric_data().columns for num_col in num_cols: if skew(df2[num_col].values)>1: print(num_col) df2[num_col]= np.sqrt(df2[num_col]) print(df2.head()) cat_cols = list(set(df2.columns)- set(num_cols)) # Label encode label_encoder = LabelEncoder() for cat_col in cat_cols: df2[cat_col]= label_encoder.fit_transform(df2[cat_col]) df2['area']=df2['height']*df2['width'] print(df2.head()) # Code ends here
21.988506
85
0.716153
[ "MIT" ]
NishthaShukla/eda
code.py
1,913
Python
class CookieContainer(object): """ Provides a container for a collection of System.Net.CookieCollection objects. CookieContainer() CookieContainer(capacity: int) CookieContainer(capacity: int,perDomainCapacity: int,maxCookieSize: int) """ def ZZZ(self): """hardcoded/mock instance of the class""" return CookieContainer() instance=ZZZ() """hardcoded/returns an instance of the class""" def Add(self,*__args): """ Add(self: CookieContainer,cookie: Cookie) Adds a System.Net.Cookie to a System.Net.CookieContainer. This method uses the domain from the System.Net.Cookie to determine which domain collection to associate the System.Net.Cookie with. cookie: The System.Net.Cookie to be added to the System.Net.CookieContainer. Add(self: CookieContainer,cookies: CookieCollection) Adds the contents of a System.Net.CookieCollection to the System.Net.CookieContainer. cookies: The System.Net.CookieCollection to be added to the System.Net.CookieContainer. Add(self: CookieContainer,uri: Uri,cookie: Cookie) Adds a System.Net.Cookie to the System.Net.CookieContainer for a particular URI. uri: The URI of the System.Net.Cookie to be added to the System.Net.CookieContainer. cookie: The System.Net.Cookie to be added to the System.Net.CookieContainer. Add(self: CookieContainer,uri: Uri,cookies: CookieCollection) Adds the contents of a System.Net.CookieCollection to the System.Net.CookieContainer for a particular URI. uri: The URI of the System.Net.CookieCollection to be added to the System.Net.CookieContainer. cookies: The System.Net.CookieCollection to be added to the System.Net.CookieContainer. """ pass def GetCookieHeader(self,uri): """ GetCookieHeader(self: CookieContainer,uri: Uri) -> str Gets the HTTP cookie header that contains the HTTP cookies that represent the System.Net.Cookie instances that are associated with a specific URI. uri: The URI of the System.Net.Cookie instances desired. Returns: An HTTP cookie header,with strings representing System.Net.Cookie instances delimited by semicolons. """ pass def GetCookies(self,uri): """ GetCookies(self: CookieContainer,uri: Uri) -> CookieCollection Gets a System.Net.CookieCollection that contains the System.Net.Cookie instances that are associated with a specific URI. uri: The URI of the System.Net.Cookie instances desired. Returns: A System.Net.CookieCollection that contains the System.Net.Cookie instances that are associated with a specific URI. """ pass def SetCookies(self,uri,cookieHeader): """ SetCookies(self: CookieContainer,uri: Uri,cookieHeader: str) Adds System.Net.Cookie instances for one or more cookies from an HTTP cookie header to the System.Net.CookieContainer for a specific URI. uri: The URI of the System.Net.CookieCollection. cookieHeader: The contents of an HTTP set-cookie header as returned by a HTTP server,with System.Net.Cookie instances delimited by commas. """ pass def __add__(self,*args): """ x.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+y """ pass @staticmethod def __new__(self,capacity=None,perDomainCapacity=None,maxCookieSize=None): """ __new__(cls: type) __new__(cls: type,capacity: int) __new__(cls: type,capacity: int,perDomainCapacity: int,maxCookieSize: int) """ pass Capacity=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets and sets the number of System.Net.Cookie instances that a System.Net.CookieContainer can hold. Get: Capacity(self: CookieContainer) -> int Set: Capacity(self: CookieContainer)=value """ Count=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the number of System.Net.Cookie instances that a System.Net.CookieContainer currently holds. Get: Count(self: CookieContainer) -> int """ MaxCookieSize=property(lambda self: object(),lambda self,v: None,lambda self: None) """Represents the maximum allowed length of a System.Net.Cookie. Get: MaxCookieSize(self: CookieContainer) -> int Set: MaxCookieSize(self: CookieContainer)=value """ PerDomainCapacity=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets and sets the number of System.Net.Cookie instances that a System.Net.CookieContainer can hold per domain. Get: PerDomainCapacity(self: CookieContainer) -> int Set: PerDomainCapacity(self: CookieContainer)=value """ DefaultCookieLengthLimit=4096 DefaultCookieLimit=300 DefaultPerDomainCookieLimit=20
41.403509
172
0.730932
[ "MIT" ]
tranconbv/ironpython-stubs
release/stubs.min/System/Net/__init___parts/CookieContainer.py
4,720
Python
class Solution(object): def changebase(self, n, base): digits = "0123456789ABCDEF" remstack = [] while n > 0: rem = n % base remstack.append(rem) n = n / base newString = "" while not len(remstack) == 0: newString += digits[remstack.pop()] return newString def countNum(self, n, base): res = self.changebase(n, base)[:-1][::-1] i = 0 count = 0 while i < len(res): print int(res[i]) count += base**i * int(res[i]) i += 1 print count a = Solution() print a.changebase(44, 4) print a.countNum(44, 4) print a.changebase(23, 4)
21.3
46
0.544601
[ "MIT" ]
quake0day/oj
changebase.py
639
Python
"""Retrieve county->CBSA crosswalk file from the NBER""" from collections import defaultdict import unicodecsv as csv import logging import requests from utils.fs import cache_json URL = 'http://www.nber.org/cbsa-msa-fips-ssa-county-crosswalk/2016/cbsatocountycrosswalk2016.csv' @cache_json('cbsa_lookup.json') def cbsa_lookup(): """ Construct a County->CBSA Lookup table from NBER data Returns: dict each key is a (State Code, County FIPS code) tuple each value is a (CBSA FIPS code, CBSA Name) tuple """ logging.info("Beginning CBSA lookup") cbsa_lookup = defaultdict(dict) download = requests.get(URL) decoded_content = download.content.decode('latin-1').encode('utf-8') reader = csv.reader(decoded_content.splitlines(), delimiter=',') # skip header line next(reader) for row in reader: state_code = row[1] fipscounty = row[3][-3:] cbsa = row[4] cbsaname = row[5] cbsa_lookup[state_code][fipscounty] = (cbsa, cbsaname) return cbsa_lookup
30.970588
97
0.68471
[ "MIT" ]
squatter1/skills-ml
datasets/nber_county_cbsa.py
1,053
Python
#!/usr/bin/env python # -*- coding: utf-8 -*- class Config(object): DEBUG = True RELOADER = True PORT = 8080 class DevelopmentConfig(Config): pass class ProductionConfig(Config): DEBUG = False RELOADER = False
15.8
32
0.64557
[ "BSD-3-Clause" ]
bsmithgall/cookiecutter-kindergarten
{{ cookiecutter.app_name }}/{{ cookiecutter.app_name }}_backend/{{ cookiecutter.app_name }}/config.py
237
Python
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.utils.spectral_norm as spectral_norm import math import numpy as np import torchvision.models as models from modules.networks import get_pad from torch.distributions.multivariate_normal import MultivariateNormal from util.utils import length_to_mask def get_conv_layer(in_channel, out_channel, gan_type='sn_gan', **kwargs): if gan_type == 'sn_gan': return spectral_norm(nn.Conv2d(in_channel, out_channel, **kwargs)) else: return nn.Conv2d(in_channel, out_channel, **kwargs) def get_conv_block(in_channel, out_channel, gan_type='sn_gan', normalization='instance', activation='leakyrelu', **kwargs): block = [] block.append(get_conv_layer(in_channel, out_channel, gan_type=gan_type, **kwargs)) if normalization == 'instance': block.append(nn.InstanceNorm2d(out_channel)) if activation == 'leakyrelu': block.append(nn.LeakyReLU()) return nn.Sequential(*block) def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) # try: # from apex.normalization.fused_layer_norm import FusedLayerNorm as SketchLayerNorm # except ImportError: # logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .") class SketchLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """ Construct a layernorm module in the TF style (epsilon inside the square root). """ super(SketchLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu}#, "swish": swish NORM2FN = {'BN1d':nn.BatchNorm1d, 'BN2d':nn.BatchNorm2d, 'LN':nn.LayerNorm} class SketchSelfAttention(nn.Module): ''' Implementation for self attention in Sketch. The input will be a K-Dim feature. Input Parameters: config[dict]: hidden_dim[int]: The dimension of input hidden embeddings in the self attention, hidden diension is equal to the output dimension num_heads[int]: The number of heads attention_probs[float]: probability parameter for dropout ''' def __init__(self, num_heads, hidden_dim, attention_dropout_prob): super(SketchSelfAttention, self).__init__() if hidden_dim % num_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_dim, num_heads)) self.hidden_dim = hidden_dim self.num_heads = num_heads #self.attention_dropout_prob = config.attention_dropout_prob # Calculation for intermeidate parameters self.head_dim = int(self.hidden_dim / self.num_heads) self.all_head_dim = self.head_dim * self.num_heads self.scale_factor = math.sqrt(self.head_dim) self.query = nn.Linear(self.hidden_dim, self.all_head_dim) self.key = nn.Linear(self.hidden_dim, self.all_head_dim) self.value = nn.Linear(self.hidden_dim, self.all_head_dim) self.dropout = nn.Dropout(attention_dropout_prob) self.multihead_output = None def transpose_(self, x): ''' Transpose Function for simplicity. ''' new_x_shape = x.size()[:-1] + (self.num_heads , self.head_dim) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask, head_mask=None, output_attentions=False, keep_multihead_output=False): ''' Input: hidden_states[batch, seq_len, hidden_dim] attention_mask[batch, 1, 1, seq_len] Output: context_states[batch, seq_len, hidden_dim] attention_probs[seq_len, hidden_dim] ''' # Get query, key, value together query = self.query(hidden_states) # [batch, seq_len, all_head_dim] key = self.key(hidden_states) # [batch, seq_len, all_head_dim] value = self.value(hidden_states) # [batch, seq_len, all_head_dim] # tranpose the query, key, value into multi heads[batch, seq_len, ] multi_query = self.transpose_(query) # [batch, num_heads, seq_len, head_dim] multi_key = self.transpose_(key) # [batch, num_heads, seq_len, head_dim] multi_value = self.transpose_(value) # [batch, num_heads, seq_len, head_dim] # Calculate Attention maps attention_scores = torch.matmul(multi_query, multi_key.transpose(-1, -2)) attention_scores = attention_scores / self.scale_factor #print(attention_scores.size(), attention_mask.size()) attention_scores = attention_scores + attention_mask attention_probs = F.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask # Compute states values context_states = torch.matmul(attention_probs, multi_value) if keep_multihead_output: self.multihead_output = context_states self.multihead_output.retain_grad() context_states = context_states.permute(0,2,1,3) context_states = context_states.contiguous().view(context_states.size()[:-2]+(-1,)) #view(context_states.size()[:-2]+ (self.all_head_dim,)) if output_attentions: return context_states, attention_probs return context_states class SketchOutput(nn.Module): def __init__(self, input_dim, output_dim, attention_norm_type, output_dropout_prob): super(SketchOutput, self).__init__() self.fc = nn.Linear(input_dim, output_dim) if attention_norm_type not in NORM2FN: raise ValueError( "The attention normalization is not in standard normalization types.") self.norm = NORM2FN[attention_norm_type](output_dim) self.dropout = nn.Dropout(output_dropout_prob) ''' Input: hidden_states[]: Output: hidden_states[]: ''' def forward(self, hidden_states, input_states): hidden_states = self.fc(hidden_states) hidden_states = self.dropout(hidden_states) #print(hidden_states.size()) hidden_states = self.norm(hidden_states+input_states) return hidden_states class SketchMultiHeadAttention(nn.Module): def __init__(self, num_heads, hidden_dim, attention_norm_type, attention_dropout_prob, hidden_dropout_prob,): super(SketchMultiHeadAttention, self).__init__() self.attention = SketchSelfAttention(num_heads, hidden_dim, attention_dropout_prob) self.output = SketchOutput(hidden_dim, hidden_dim, attention_norm_type, hidden_dropout_prob) def forward(self, hidden_states, attention_mask, head_mask=None, output_attentions=False): input_states = hidden_states #print(hidden_states) hidden_states = self.attention(hidden_states, attention_mask, head_mask=head_mask) #print(hidden_states) if output_attentions: hidden_states, attention_probs = hidden_states output_states = self.output(hidden_states, input_states) if output_attentions: return output_states, attention_probs return output_states class SketchIntermediate(nn.Module): def __init__(self, hidden_dim, inter_dim, inter_activation): super(SketchIntermediate, self).__init__() self.fc = nn.Linear(hidden_dim, inter_dim) self.activation = ACT2FN[inter_activation] def forward(self, hidden_states): hidden_states = hidden_states.to(next(self.fc.parameters()).device) inter_states = self.fc(hidden_states.contiguous()) inter_states = self.activation(inter_states) return inter_states class SketchLayer(nn.Module): ''' A transformer layer for sketch bert ''' def __init__(self, num_heads, hidden_dim, inter_dim, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob,): super(SketchLayer, self).__init__() self.attention = SketchMultiHeadAttention(num_heads, hidden_dim, attention_norm_type, attention_dropout_prob, hidden_dropout_prob,) self.inter_layer = SketchIntermediate(hidden_dim, inter_dim, inter_activation) self.output = SketchOutput(inter_dim, hidden_dim, attention_norm_type, output_dropout_prob) ''' Input: hidden_states[batch, seq_len, hidden_dim]: attention_mask[batch, seq_len] ''' def forward(self, hidden_states, attention_mask, head_mask=None, output_attentions=False): hidden_states = self.attention(hidden_states, attention_mask, head_mask) if output_attentions: hidden_states, attention_probs = hidden_states inter_states = self.inter_layer(hidden_states) output_states = self.output(inter_states, hidden_states) if output_attentions: return output_states, attention_probs return output_states class SketchSegmentLayer(nn.Module): ''' A transformer layer for sketch bert ''' def __init__(self, num_heads, hidden_dim, inter_dim, max_segment, segment_atten_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob,): super(SketchSegmentLayer, self).__init__() self.max_segment = max_segment self.inter_dim = inter_dim self.segment_atten_type = segment_atten_type self.local_attention = SketchMultiHeadAttention(num_heads, hidden_dim, attention_norm_type, attention_dropout_prob, hidden_dropout_prob,) self.segment_attention = SketchMultiHeadAttention(num_heads, hidden_dim, attention_norm_type, attention_dropout_prob, hidden_dropout_prob,) self.local_inter_layer = SketchIntermediate(hidden_dim, inter_dim//2, inter_activation) self.seg_inter_layer = SketchIntermediate(hidden_dim, inter_dim//2, inter_activation) self.output = SketchOutput(inter_dim, hidden_dim, attention_norm_type, output_dropout_prob) def get_seg_states(self, hidden_states, segment_index): ''' Input: hidden_states[batch, seq_len, hidden_dim] segment_index[batch, seq_len] ''' seg_states = torch.zeros(hidden_states.size(0), self.max_segment, hidden_states.size(2)).to(hidden_states.device) length = (segment_index==0).sum(dim=1) length_mask = length_to_mask(length, max_len=self.max_segment, dtype=torch.float) seg_states[length_mask==1,:] = hidden_states[segment_index==0,:] return seg_states, length_mask def forward(self, hidden_states, attention_mask, segments, segment_index, head_mask=None, output_attentions=False): ''' Input: hidden_states[batch, seg_len, hidden_dim]: attention_mask[batch, seg_len](segment-based) segments[batch, seg_len]: segment_index[batch, seq_len] ''' # Local Attention local_states = self.local_attention(hidden_states, attention_mask, head_mask) if output_attentions: local_states, attention_probs = local_states #[batch, seq_len, hidden_dim] input_prefix = hidden_states.size(1) - segment_index.size(1) # Segment Level Attention seg_states, seg_atten_mask = self.get_seg_states(local_states[:,input_prefix:,:], segment_index) if self.segment_atten_type == 'multi': seg_states = self.segment_attention(seg_states, seg_atten_mask.unsqueeze(1).unsqueeze(2), head_mask) if output_attentions: seg_states, attention_probs = seg_states #[batch, seq_len, hidden_dim] # Concatenate local_inter_states = self.local_inter_layer(local_states) seg_inter_states = self.seg_inter_layer(seg_states) aug_seg_inter_states = torch.gather(seg_inter_states, 1, (segments[:,input_prefix:]-2).view(segments.size(0), -1, 1).repeat(1,1, seg_inter_states.size(2))) inter_states = torch.zeros(local_inter_states.size(0), local_inter_states.size(1), self.inter_dim).to(local_inter_states.device) #print(hidden_states.size(), local_states.size(), local_inter_states.size()) inter_states[:,:,:self.inter_dim//2] = local_inter_states inter_states[:,input_prefix:, self.inter_dim//2:] = aug_seg_inter_states inter_states[:,:input_prefix, self.inter_dim//2:] = seg_inter_states.sum(dim=1, keepdim=True) output_states = self.output(inter_states, hidden_states) if output_attentions: return output_states, attention_probs return output_states def setting2dict(paras, setting): paras['num_heads'] = setting[0] paras['hidden_dim'] = setting[1] paras['inter_dim'] = setting[2] class SketchEncoder(nn.Module): ''' layers_setting[list]: [[12, ], []] ''' def __init__(self, layers_setting, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob,): super(SketchEncoder, self).__init__() layer_paras = { 'attention_norm_type':attention_norm_type, 'inter_activation':inter_activation, 'attention_dropout_prob':attention_dropout_prob, 'hidden_dropout_prob':hidden_dropout_prob, 'output_dropout_prob':output_dropout_prob} self.layers = [] for layer_setting in layers_setting: setting2dict(layer_paras, layer_setting) self.layers.append(SketchLayer(**layer_paras)) self.layers = nn.ModuleList(self.layers) def forward(self, input_states, attention_mask, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): all_states = [] all_attention_probs = [] hidden_states = input_states for layer in self.layers: hidden_states = layer(hidden_states, attention_mask, head_mask=head_mask, output_attentions=output_attentions) if output_attentions: hidden_states, attention_probs = hidden_states all_attention_probs.append(attention_probs) if output_all_states: all_states.append(hidden_states) if not output_all_states: all_states.append(hidden_states) if output_attentions: return all_states, all_attention_probs return all_states class SketchALEncoder(nn.Module): ''' A Lite BERT: Parameter Sharing layers_setting[list]: [[12, ], []] ''' def __init__(self, layers_setting, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob,): super(SketchALEncoder, self).__init__() layer_paras = { 'attention_norm_type':attention_norm_type, 'inter_activation':inter_activation, 'attention_dropout_prob':attention_dropout_prob, 'hidden_dropout_prob':hidden_dropout_prob, 'output_dropout_prob':output_dropout_prob} setting2dict(layer_paras, layers_setting[0]) self.sketch_layer = SketchLayer(**layer_paras) self.layers = [] for layer_setting in layers_setting: self.layers.append(self.sketch_layer) #self.layers = nn.ModuleList(self.layers) def forward(self, input_states, attention_mask, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): all_states = [] all_attention_probs = [] hidden_states = input_states for layer in self.layers: hidden_states = layer(hidden_states, attention_mask, head_mask=head_mask, output_attentions=output_attentions) if output_attentions: hidden_states, attention_probs = hidden_states all_attention_probs.append(attention_probs) if output_all_states: all_states.append(hidden_states) if not output_all_states: all_states.append(hidden_states) if output_attentions: return all_states, all_attention_probs return all_states class SketchSegmentEncoder(nn.Module): ''' layers_setting[list]: [[12, ], []] ''' def __init__(self, layers_setting, max_segment, segment_atten_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob,): super(SketchSegmentEncoder, self).__init__() layer_paras = { 'max_segment':max_segment, 'segment_atten_type':segment_atten_type, 'attention_norm_type':attention_norm_type, 'inter_activation':inter_activation, 'attention_dropout_prob':attention_dropout_prob, 'hidden_dropout_prob':hidden_dropout_prob, 'output_dropout_prob':output_dropout_prob} self.layers = [] self.max_segment = max_segment for layer_setting in layers_setting: setting2dict(layer_paras, layer_setting) self.layers.append(SketchSegmentLayer(**layer_paras)) self.layers = nn.ModuleList(self.layers) def forward(self, input_states, attention_mask, segments, segment_index, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): all_states = [] all_attention_probs = [] hidden_states = input_states for layer in self.layers: hidden_states = layer(hidden_states, attention_mask, segments, segment_index, head_mask=head_mask, output_attentions=output_attentions) if output_attentions: hidden_states, attention_probs = hidden_states all_attention_probs.append(attention_probs) if output_all_states: all_states.append(hidden_states) if not output_all_states: all_states.append(hidden_states) if output_attentions: return all_states, all_attention_probs return all_states class SketchEmbedding(nn.Module): def __init__(self, input_dim, hidden_dim): super(SketchEmbedding, self).__init__() self.embedding = nn.Linear(input_dim, hidden_dim) def forward(self, input_states): return self.embedding(input_states) class SketchDiscreteEmbedding(nn.Module): ''' max_size[tuple](x_length, y_length) ''' def __init__(self, max_size, type_size, hidden_dim, pool_type): super(SketchDiscreteEmbedding, self).__init__() self.x_embedding = nn.Embedding(2*max_size[0]+2, hidden_dim//2) self.y_embedding = nn.Embedding(2*max_size[1]+2, hidden_dim//2) self.type_embedding = nn.Embedding(type_size+1, hidden_dim) assert pool_type in ['sum', 'con'] self.pool_type = pool_type ''' input_states[batch, seq_len, 3(input_dim)](Inputs are encoded as discrete type) ''' def forward(self, input_states): input_states = input_states.to(dtype=torch.long) input_states = input_states + 1 #print(input_states[0,0,:], torch.min(input_states), torch.max(input_states)) x_hidden = self.x_embedding(input_states[:,:,0]) y_hidden = self.y_embedding(input_states[:,:,1]) #print(x_hidden.size(), y_hidden.size()) axis_hidden = torch.cat([x_hidden, y_hidden], dim=2) type_hidden = self.type_embedding(input_states[:,:,2]) if self.pool_type == 'sum': return axis_hidden + type_hidden elif self.pool_type == 'con': return torch.cat([axis_hidden, type_hidden], dim=2) class SketchSinPositionEmbedding(nn.Module): def __init__(self, max_length, pos_hidden_dim): super(SketchSinPositionEmbedding, self).__init__() self.pos_embedding_matrix = torch.zeros(max_length, pos_hidden_dim) pos_vector = torch.arange(max_length).view(max_length, 1).type(torch.float) dim_vector = torch.arange(pos_hidden_dim).type(torch.float) + 1.0 #print((pos_vector / (dim_vector[::2] / 2).view(1, -1)).size(), self.pos_embedding_matrix[:,::2].size()) self.pos_embedding_matrix[:,::2] = torch.sin(pos_vector / (dim_vector[::2] / 2).view(1, -1)) self.pos_embedding_matrix[:,1::2] = torch.cos(pos_vector / ((dim_vector[1::2] - 1) / 2).view(1, -1)) #print(self.pos_embedding_matrix) ''' Input: position_labels[batch, seq_len] Output: position_states[batch, seq_len, pos_hidden_dim] ''' def forward(self, position_labels): return self.pos_embedding_matrix[position_labels.view(-1),:].view(position_labels.size(0), position_labels.size(1), -1) class SketchLearnPositionEmbedding(nn.Module): def __init__(self, max_length, pos_hidden_dim): super(SketchLearnPositionEmbedding, self).__init__() print(max_length, pos_hidden_dim) self.pos_embedding = nn.Embedding(max_length, pos_hidden_dim) ''' Input: position_labels[batch, seq_len] Output: position_states[batch, seq_len, pos_hidden_dim] ''' def forward(self, position_labels): return self.pos_embedding(position_labels) class SketchEmbeddingRefineNetwork(nn.Module): ''' The module to upsample the embedding feature, idea from the ALBERT: Factorized Embedding ''' def __init__(self, out_dim, layers_dim): super(SketchEmbeddingRefineNetwork, self).__init__() self.layers = [] layers_dim = layers_dim.copy() layers_dim.append(out_dim) for i in range(len(layers_dim)-1): self.layers.append(nn.Linear(layers_dim[i], layers_dim[i+1])) self.layers = nn.ModuleList(self.layers) def forward(self, input_state): x = input_state for layer in self.layers: x = layer(x) return x class SketchTransformerModel(nn.Module): ''' Input: layers_setting[list] input_dim[int] max_length[int] position_type[str] attention_norm_type[str] inter_activation[str] attention_dropout_prob[float] hidden_dropout_prob[float] output_dropout_prob[float] ''' def __init__(self, model_type, layers_setting, embed_layers_setting, input_dim, max_length, max_size, type_size, position_type, segment_type, sketch_embed_type, embed_pool_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob): super(SketchTransformerModel, self).__init__() self.layers_setting = layers_setting self.num_hidden_layers = len(layers_setting) self.embed_pool_type = embed_pool_type assert sketch_embed_type in ['linear', 'discrete'] if sketch_embed_type == 'linear': self.embedding = SketchEmbedding(input_dim, embed_layers_setting[0]) elif sketch_embed_type == 'discrete': self.embedding = SketchDiscreteEmbedding(max_size, type_size, embed_layers_setting[0], embed_pool_type) assert position_type in ['sin', 'learn', 'none'] if position_type == 'sin': self.pos_embedding = SketchSinPositionEmbedding(max_length, embed_layers_setting[0]) elif position_type == 'learn': self.pos_embedding = SketchLearnPositionEmbedding(max_length, embed_layers_setting[0]) else: self.pos_embedding = None if segment_type == 'learn': self.segment_embedding = SketchLearnPositionEmbedding(max_length, embed_layers_setting[0]) else: self.segment_embedding = None self.embed_refine_net = SketchEmbeddingRefineNetwork(layers_setting[0][1], embed_layers_setting) assert model_type in ['albert', 'bert'] if model_type == 'albert': self.encoder = SketchALEncoder(layers_setting, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob) elif model_type == 'bert': self.encoder = SketchEncoder(layers_setting, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob) def load_model(self, state_dict, own_rel_in_input, own_cls_in_input, pre_rel_in_input, pre_cls_in_input): own_state = self.state_dict() for k, v in own_state.items(): if k == 'pos_embedding.pos_embedding.weight': own_pos_size = v.size(0) seq_len = own_pos_size - own_rel_in_input - own_cls_in_input pretrained_pos_size = state_dict[k].size(0) own_start_ind = int(own_rel_in_input+own_cls_in_input) pre_start_ind = int(pre_rel_in_input+pre_cls_in_input) seq_len = min(seq_len, state_dict[k].size(0)-pre_start_ind) own_state[k][own_start_ind:own_start_ind+seq_len] = state_dict[k][pre_start_ind:pre_start_ind+seq_len] if own_rel_in_input and own_cls_in_input: if pre_cls_in_input and pre_cls_in_input: own_state[k][:2] = state_dict[k][:2] elif pre_cls_in_input: own_state[k][1] = state_dict[k][0] elif pre_rel_in_input: own_state[k][0] = state_dict[k][0] elif own_rel_in_input: if pre_rel_in_input: own_state[k][0] = state_dict[k][0] elif own_cls_in_input: if pre_cls_in_input: own_state[k][0] = state_dict[k][int(pre_rel_in_input)] else: own_state[k] = state_dict[k] self.load_state_dict(own_state) def get_pos_states(self, input_states): return torch.arange(input_states.size(1)).view(1,-1).repeat(input_states.size(0),1).to(device=input_states.device) ''' Input: input_states[batch, seq_len, 5], attention_mask[batch, seq_len]/[batch, seq_len, ],(length mask) Output: output_states[batch, seq_len, hidden_dim], ''' def forward(self, input_states, attention_mask, segments=None, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): if attention_mask is None: attention_mask = torch.ones(input_states.size(0), input_states.size(1)) # Extending attention mask if len(attention_mask.size()) == 3: extended_attention_mask = attention_mask.unsqueeze(1) elif len(attention_mask.size()) == 2: extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 attention_mask = extended_attention_mask # process head mask if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand_as(self.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) # switch to fload if need + fp16 compatibility else: head_mask = None input_states = self.embedding(input_states) if self.pos_embedding is not None: pos_states = self.pos_embedding(self.get_pos_states(input_states)) input_states = input_states + pos_states.to(device=input_states.device) if self.segment_embedding is not None and segments is not None: segment_states = self.segment_embedding(segments) input_states = input_states + segment_states input_states = self.embed_refine_net(input_states) output_states = self.encoder(input_states, attention_mask, head_mask, output_all_states, output_attentions, keep_multihead_output) if output_attentions: output_states, attention_probs = output_states return output_states[-1], attention_probs return output_states[-1] class SketchCNN(nn.Module): ''' Truely a CNN model ''' def __init__(self, hidden_dim, net_type, pretrained): super(SketchCNN, self).__init__() if net_type == 'resnet18': self.encoder = models.resnet18(pretrained=pretrained) self.encoder.fc = nn.Linear(self.encoder.fc.in_features, hidden_dim) elif net_type == 'resnet50': self.encoder = models.resnet50(pretrained=pretrained) self.encoder.fc = nn.Linear(self.encoder.fc.in_features, hidden_dim) elif net_type == 'tcnet': pass elif net_type =='sketchanet': pass def forward(self, input): return self.encoder(input) ''' Sketch Transformer based GAN ''' class SketchGANGenerator(nn.Module): ''' Assume Label in the Input ''' def __init__(self, layers_setting, input_dim, cls_dim, max_length, position_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob): super(SketchGANGenerator, self).__init__() self.encoder = SketchTransformerModel(layers_setting, input_dim, cls_dim, max_length, position_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob) self.output = nn.Linear(layers_setting[0][1], 5) ''' The same as Transformer Model ''' def forward(self, input_states, attention_mask, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): hidden_states = self.encoder(input_states, attention_mask, head_mask=head_mask, output_all_states=output_all_states, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output) fake_states = self.output(hidden_states) return fake_states class SketchGANDiscriminator(nn.Module): ''' Assume Label in the Input ''' def __init__(self, layers_setting, input_dim, cls_dim, max_length, position_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob): super(SketchGANDiscriminator, self).__init__() self.encoder = SketchTransformerModel(layers_setting, input_dim, cls_dim, max_length, position_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob) self.output = nn.Linear(layers_setting[0][1], 2) ''' The same as Transformer Model ''' def forward(self, input_states, attention_mask, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): hidden_states = self.encoder(input_states, attention_mask, head_mask=head_mask, output_all_states=output_all_states, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output) label = self.output(hidden_states[:,0,:]) return label ''' Sketch Transformer based VAE ''' class SketchVAEEncoder(SketchTransformerModel): def __init__(self, model_type, layers_setting, embed_layers_setting, input_dim, cls_dim, max_length, max_size, type_size, conditional, position_type, segment_type, sketch_embed_type, embed_pool_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob): super(SketchVAEEncoder, self).__init__(model_type, layers_setting, embed_layers_setting, input_dim, max_length, max_size, type_size, position_type, segment_type, sketch_embed_type, embed_pool_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob) # self.rec_fc = nn.Linear(layers_setting[0][1], output_dim) self.conditional = conditional if self.conditional: self.cls_embedding = nn.Embedding(cls_dim, embed_layers_setting[0]) else: self.cls_embedding = None def load_model(self, state_dict, only_encoder): own_state = self.state_dict() for k, v in own_state.items(): if only_encoder and ('encoder' in k or 'embed_refine_net' in k): own_state[k] = state_dict[k] else: if k in state_dict and k in own_state: own_state[k] = state_dict[k] self.load_state_dict(own_state) def forward(self, input_states, attention_mask, targets=None, segments=None, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): ''' Input: input_states[batch, seq_len, 5], zs[batch, latent_dim] ''' if attention_mask is None: attention_mask = torch.ones(input_states.size(0), input_states.size(1)) # Extending attention mask if len(attention_mask.size()) == 3: extended_attention_mask = attention_mask.unsqueeze(1) elif len(attention_mask.size()) == 2: extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 attention_mask = extended_attention_mask # process head mask if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand_as(self.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) # switch to fload if need + fp16 compatibility else: head_mask = None input_states = self.embedding(input_states) if self.pos_embedding is not None: pos_states = self.pos_embedding(self.get_pos_states(input_states)) input_states = input_states + pos_states if self.segment_embedding is not None and segments is not None: segment_states = self.segment_embedding(segments) input_states = input_states + segment_states if self.cls_embedding is not None and targets is not None: cls_states = self.cls_embedding(targets) cls_states = cls_states.unsqueeze(1).repeat(1,input_states.size(1),1) input_states = input_states + cls_states input_states = self.embed_refine_net(input_states) # Append the latent_states output_states = self.encoder(input_states, attention_mask, head_mask, output_all_states, output_attentions, keep_multihead_output) if output_attentions: output_states, attention_probs = output_states #return self.rec_fc(output_states[-1]), attention_probs return output_states[-1], attention_probs return output_states[-1] ''' Sketch Transformer based VAE ''' class SketchVAEDecoder(SketchTransformerModel): def __init__(self, model_type, layers_setting, embed_layers_setting, rec_layers_setting, input_dim, output_dim, latent_dim, cls_dim, max_length, max_size, type_size, conditional, position_type, segment_type, sketch_embed_type, embed_pool_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob): print(embed_layers_setting) super(SketchVAEDecoder, self).__init__(model_type, layers_setting, embed_layers_setting, input_dim, max_length, max_size, type_size, position_type, segment_type, sketch_embed_type, embed_pool_type, attention_norm_type, inter_activation, attention_dropout_prob, hidden_dropout_prob, output_dropout_prob) self.conditional = conditional if self.conditional: self.cls_embedding = nn.Embedding(cls_dim, embed_layers_setting[0]) else: self.cls_embedding = None self.re_fcs = [] rec_layers_setting = rec_layers_setting.copy() rec_layers_setting.append(output_dim), rec_layers_setting.insert(0, layers_setting[0][1]) for i in range(len(rec_layers_setting)-1): self.re_fcs.append(nn.Linear(rec_layers_setting[i], rec_layers_setting[i+1])) self.re_fcs = nn.ModuleList(self.re_fcs) self.latent_fusion = nn.Linear(layers_setting[0][1]+latent_dim, layers_setting[0][1]) def load_model(self, state_dict, only_encoder): own_state = self.state_dict() for k, v in own_state.items(): if only_encoder and ('encoder' in k or 'embed_refine_net' in k): #print(k in own_state, k in state_dict) own_state[k] = state_dict[k] else: if k in state_dict and k in own_state: own_state[k] = state_dict[k] self.load_state_dict(own_state) def forward(self, input_states, zs, attention_mask, targets=None, segments=None, head_mask=None, output_all_states=False, output_attentions=False, keep_multihead_output=False): ''' Input: input_states[batch, seq_len, 5], zs[batch, latent_dim] ''' if attention_mask is None: attention_mask = torch.ones(input_states.size(0), input_states.size(1)) # Extending attention mask if len(attention_mask.size()) == 3: extended_attention_mask = attention_mask.unsqueeze(1) elif len(attention_mask.size()) == 2: extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 attention_mask = extended_attention_mask # process head mask if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand_as(self.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype, device=input_states.device) # switch to fload if need + fp16 compatibility else: head_mask = None input_states = self.embedding(input_states) if self.pos_embedding is not None: pos_states = self.pos_embedding(self.get_pos_states(input_states)) input_states = input_states + pos_states if self.segment_embedding is not None and segments is not None: segment_states = self.segment_embedding(segments) input_states = input_states + segment_states if self.cls_embedding is not None and targets is not None: cls_states = self.cls_embedding(targets) cls_states = cls_states.unsqueeze(1).repeat(1,input_states.size(1),1) input_states = input_states + cls_states input_states = self.embed_refine_net(input_states) # Append the latent_states input_states = torch.cat([input_states, zs.unsqueeze(1).repeat(1,input_states.size(1),1)],dim=2) input_states = self.latent_fusion(input_states) output_states = self.encoder(input_states, attention_mask, head_mask, output_all_states, output_attentions, keep_multihead_output) if output_attentions: output_states, attention_probs = output_states output_states = output_states[-1] for re_fc in self.re_fcs: output_states = re_fc(output_states) return output_states, attention_probs output_states = output_states[-1] for re_fc in self.re_fcs: output_states = re_fc(output_states) return output_states class SketchVAELatentEmbedding(nn.Module): def __init__(self, hidden_dim, latent_dim, max_length): super(SketchVAELatentEmbedding, self).__init__() self.mu_embedding = nn.Linear(hidden_dim, latent_dim) self.sigma_embedding = nn.Linear(hidden_dim, latent_dim) self.gaussian_generator = MultivariateNormal(torch.zeros(latent_dim), torch.eye(latent_dim)) ''' Input: hidden_states[batch, seq_len, hidden_dim] Output: mus[batch, latent_dim] sigmas[batch, latent_dim] z[batch, latent_dim] ''' def forward(self, hidden_states, attention_mask): # Mask the lengths beyond latent_states = hidden_states[:,0,:] mus = self.mu_embedding(latent_states) sigmas = self.sigma_embedding(latent_states) sigmas = torch.exp(sigmas/2) random_normal = self.gaussian_generator.sample([sigmas.size(0)]).to(sigmas.device) zs = mus + sigmas * random_normal return mus, sigmas , zs ''' Different Pooling Layers ''' class SketchPooling(nn.Module): def __init__(self, hidden_dim, input_dim, cls_dim, max_length=250): super(SketchPooling, self).__init__() self.fc1 = nn.Linear(hidden_dim, 4) self.fc2 = nn.Linear(max_length*4, cls_dim) self.re_fc = nn.Linear(hidden_dim, input_dim) def forward(self, hidden_states): re_sketch = self.re_fc(hidden_states) pooled = self.fc1(hidden_states) pooled = self.fc2(pooled.view(pooled.size(0), -1)) return re_sketch, pooled class SketchGMMPooling(nn.Module): def __init__(self, hidden_dim, M, cls_dim, max_length=250): super(SketchPooling, self).__init__() self.fc1 = nn.Linear(hidden_dim, 4) self.fc2 = nn.Linear(max_length*4, cls_dim) self.re_fc = nn.Linear(hidden_dim, 6*M + 3) ''' Input: hidden_states[batch, seq_len, hidden_dim] Output: re_sketch[batch, seq_len, 6M+3] pooled[batch, cls_dim] ''' def forward(self, hidden_states): re_sketch = self.re_fc(hidden_states) pooled = self.fc1(hidden_states) pooled = self.fc2(pooled.view(pooled.size(0), -1)) return re_sketch, pooled class SketchHiddenPooling(nn.Module): def __init__(self, hidden_dim): super(SketchPooler, self).__init__() self.fc = nn.Linear(hidden_dim, hidden_dim) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.fc(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output ''' Multi models for transformer backbone ''' #Mask Sketch Model class MaskSketchRecModel(nn.Module): def __init__(self, rec_layers_setting, hidden_dim, input_dim, cls_in_input, rel_in_input): super(MaskSketchRecModel, self).__init__() self.re_fcs = [] rec_layers_setting.append(input_dim), rec_layers_setting.insert(0, hidden_dim) for i in range(len(rec_layers_setting)-1): self.re_fcs.append(nn.Linear(rec_layers_setting[i], rec_layers_setting[i+1])) self.re_fcs = nn.ModuleList(self.re_fcs) self.cls_in_input = cls_in_input self.rel_in_input = rel_in_input ''' Input: hidden_states[batch, seq_len+cls_input, hidden_dim] ''' def forward(self, hidden_states): hidden_states = hidden_states[:, self.cls_in_input+self.rel_in_input:, :] for re_fc in self.re_fcs: hidden_states = re_fc(hidden_states) return hidden_states class MaskSketchGMMModel(nn.Module): def __init__(self, hidden_dim, M, cls_in_input, rel_in_input): super(MaskSketchGMMModel, self).__init__() self.re_fc = nn.Linear(hidden_dim, 6*M + 3) self.cls_in_input = cls_in_input self.rel_in_input = rel_in_input ''' Input: hidden_states[batch, seq_len+cls_input, hidden_dim] attention_mask[batch, seq_len+cls_input] ''' def forward(self, hidden_states): hidden_states = hidden_states[:, self.cls_in_input+self.rel_in_input:, :] return self.re_fc(hidden_states) # Sketch classification model class SketchClassificationModel(nn.Module): def __init__(self, hidden_dim, cls_dim, max_length): super(SketchClassificationModel, self).__init__() self.fc1 = nn.Linear(hidden_dim, 4) self.fc2 = nn.Linear(max_length*4, cls_dim) ''' Input: hidden_states[batch, seq_len, hidden_dim] attention_mask[batch, seq_len] Output: cls_states[batch, cls_dim] ''' def forward(self, hidden_states): pooled = self.fc1(hidden_states) pooled = self.fc2(pooled.view(pooled.size(0), -1)) return pooled class SketchClsPoolingModel(nn.Module): def __init__(self, cls_layers_setting, hidden_dim, cls_dim, pool_dim): super(SketchClsPoolingModel, self).__init__() self.pool_dim = int(pool_dim) cls_layers_setting = cls_layers_setting.copy() cls_layers_setting.insert(0, hidden_dim), cls_layers_setting.append(cls_dim) self.cls_fcs = [] for i in range(len(cls_layers_setting)-1): self.cls_fcs.append(nn.Linear(cls_layers_setting[i], cls_layers_setting[i+1])) self.cls_fcs = nn.ModuleList(self.cls_fcs) ''' Input: hidden_states[batch, seq_len+cls_dim, hidden_dim](0 dim is cls) Output: cls_states[batch, cls_dim] ''' def forward(self, hidden_states): pooled = hidden_states[:,self.pool_dim,:] for cls_fc in self.cls_fcs: pooled = cls_fc(pooled) return pooled class SketchRetrievalPoolingModel(nn.Module): def __init__(self, rel_layers_setting, hidden_dim, feat_dim, pool_dim): super(SketchRetrievalPoolingModel, self).__init__() self.pool_dim = int(pool_dim) rel_layers_setting = rel_layers_setting.copy() rel_layers_setting.insert(0, hidden_dim), rel_layers_setting.append(feat_dim) self.rel_fcs = [] for i in range(len(rel_layers_setting)-1): self.rel_fcs.append(nn.Linear(rel_layers_setting[i], rel_layers_setting[i+1])) self.rel_fcs = nn.ModuleList(self.rel_fcs) ''' Input: hidden_states[batch, seq_len+cls_dim, hidden_dim](0 dim is cls) Output: cls_states[batch, cls_dim] ''' def forward(self, hidden_states): pooled = hidden_states[:,self.pool_dim,:] for rel_fc in self.rel_fcs: pooled = rel_fc(pooled) return pooled class SketchDiscretePoolingModel(nn.Module): def __init__(self, hidden_dim, max_size, type_size, cls_in_input, rel_in_input): super(SketchDiscretePoolingModel, self).__init__() self.cls_in_input = cls_in_input self.rel_in_input = rel_in_input self.x_pooling = nn.Linear(hidden_dim, 2*max_size[0]+1) self.y_pooling = nn.Linear(hidden_dim, 2*max_size[1]+1) self.type_pooling = nn.Linear(hidden_dim, type_size) def forward(self, hidden_states): ''' Input: hidden_states[batch, seq_len+cls_dim, hidden_dim](0 dim is cls) Output: x_pred[batch, seq_len+cls_dim, 2*max_size[0]+1] y_pred[batch, seq_len+cls_dim, 2*max_size[1]+1] type_pred[batch, seq_len+cls_dim, type_size] ''' hidden_states = (hidden_states)[:,self.cls_in_input+self.rel_in_input:,:] x_pred = self.x_pooling(hidden_states) y_pred = self.y_pooling(hidden_states) type_pred = self.type_pooling(hidden_states) return x_pred, y_pred, type_pred class SketchSegmentOrderPoolingModel(nn.Module): def __init__(self, hidden_dim, max_segment, cls_in_input, rel_in_input): super(SketchSegmentOrderPoolingModel, self).__init__() self.sg_fc = nn.Linear(hidden_dim, max_segment) self.cls_in_input = cls_in_input self.rel_in_input = rel_in_input def forward(self, hidden_states, segment_index): ''' Input: hidden_states[batch, seg_len, hidden_dim] segment_index[batch, seq_len] ''' seg_states = hidden_states[:,self.cls_in_input+self.rel_in_input:,:][segment_index==0,:] return self.sg_fc(seg_states) class GMMLoss(nn.Module): def __init__(self, reduction='mean'): super(GMMLoss, self).__init__() # self.logsoftmax = nn.LogSoftmax(dim=-1) self.reduction = reduction ''' x[seq_len, batch, 2] lengths[batch, seq_len] pis[seq_len, batch, M]: no softmax The {pis} in the paper SketchRNN, https://arxiv.org/abs/1704.03477 mus[seq_len, batch, M, 2]: The {mus} in the paper SketchRNN, https://arxiv.org/abs/1704.03477 sigmas[seq_len, batch, M, 2]: exp The {sigmas} in the paper SketchRNN, https://arxiv.org/abs/1704.03477 rhos[seq_len, batch, M]: tanh The {rho} in the paper SketchRNN, https://arxiv.org/abs/1704.03477 masks[] ''' def forward(self, x, lengths, pis, mus, sigmas, rhos, epsilon=1e-8): batch_size, seq_len, M = pis.size() #print(batch_size, seq_len) #print(x.size(), pis.size()) x = x.view(batch_size, seq_len, 1, 2).repeat(1, 1, M, 1) sigma_prods = torch.prod(sigmas, dim=3) # [seq_len, batch, M] sigma_sq = torch.pow(sigmas, 2) # [seq_len, batch, M, 2] #print(x.size(), mus.size(), sigmas.size()) x_center = (x - mus) / (sigmas) Z = torch.sum(x_center*x_center, dim=3) - 2 * rhos * torch.prod(x_center, dim=3) rho_sq = 1 - rhos*rhos # [seq_len, batch, M] denom = 2 * np.pi * sigma_prods * torch.sqrt(rho_sq) probs = torch.exp(-Z / (2*rho_sq)) / denom # pis = F.softmax(pis, dim=-1) probs = torch.sum(F.softmax(pis, dim=-1) * probs, dim=-1) log_probs = torch.log(probs+epsilon) * lengths # [len] loss = - torch.mean(log_probs) return loss class KLLoss(nn.Module): def __init__(self, kl_tolerance): super(KLLoss, self).__init__() self.kl_tolerance = torch.tensor(kl_tolerance) ''' Input: mus[batch, latent_size]: sigmas[batch, latent_size]: ''' def forward(self, mus, sigmas): loss = - (0.5) * torch.mean(1 + torch.log(sigmas)*2.0 - mus*mus - sigmas*sigmas) return torch.max(loss, self.kl_tolerance.to(loss.device))
44.399156
217
0.667744
[ "Apache-2.0" ]
avalonstrel/SketchBERT
models/SketchTransformer/models/networks.py
52,613
Python
import binascii import re try: import json except: import simplejson as json # Inserting certain referenced dicts in here means they can be declared in the same order as in the spec. maps = {} # SMPP PDU Definition - SMPP v3.4, section 4, page 45 mandatory_parameter_lists = { 'bind_transmitter': [ # SMPP v3.4, section 4.1.1, table 4-1, page 46 {'name': 'system_id', 'min': 1, 'max': 16, 'var': True, 'type': 'string', 'map': None}, {'name': 'password', 'min': 1, 'max': 9, 'var': True, 'type': 'string', 'map': None}, {'name': 'system_type', 'min': 1, 'max': 13, 'var': True, 'type': 'string', 'map': None}, {'name': 'interface_version', 'min': 1, 'max': 1, 'var': False, 'type': 'hex', 'map': None}, {'name': 'addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'address_range', 'min': 1, 'max': 41, 'var': True, 'type': 'string', 'map': None} ], 'bind_transmitter_resp': [ # SMPP v3.4, section 4.1.2, table 4-2, page 47 {'name': 'system_id', 'min': 1, 'max': 16, 'var': True, 'type': 'string', 'map': None} ], 'bind_receiver': [ # SMPP v3.4, section 4.1.3, table 4-3, page 48 {'name': 'system_id', 'min': 1, 'max': 16, 'var': True, 'type': 'string', 'map': None}, {'name': 'password', 'min': 1, 'max': 9, 'var': True, 'type': 'string', 'map': None}, {'name': 'system_type', 'min': 1, 'max': 13, 'var': True, 'type': 'string', 'map': None}, {'name': 'interface_version', 'min': 1, 'max': 1, 'var': False, 'type': 'hex', 'map': None}, {'name': 'addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'address_range', 'min': 1, 'max': 41, 'var': True, 'type': 'string', 'map': None} ], 'bind_receiver_resp': [ # SMPP v3.4, section 4.1.4, table 4-4, page 50 {'name': 'system_id', 'min': 1, 'max': 16, 'var': True, 'type': 'string', 'map': None} ], 'bind_transceiver': [ # SMPP v3.4, section 4.1.5, table 4-5, page 51 {'name': 'system_id', 'min': 1, 'max': 16, 'var': True, 'type': 'string', 'map': None}, {'name': 'password', 'min': 1, 'max': 9, 'var': True, 'type': 'string', 'map': None}, {'name': 'system_type', 'min': 1, 'max': 13, 'var': True, 'type': 'string', 'map': None}, {'name': 'interface_version', 'min': 1, 'max': 1, 'var': False, 'type': 'hex', 'map': None}, {'name': 'addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'address_range', 'min': 1, 'max': 41, 'var': True, 'type': 'string', 'map': None} ], 'bind_transceiver_resp': [ # SMPP v3.4, section 4.1.6, table 4-6, page 52 {'name': 'system_id', 'min': 1, 'max': 16, 'var': True, 'type': 'string', 'map': None} ], 'outbind': [ # SMPP v3.4, section 4.1.7.1, page 54 {'name': 'system_id', 'min': 1, 'max': 16, 'var': True, 'type': 'string', 'map': None}, {'name': 'password', 'min': 1, 'max': 9, 'var': True, 'type': 'string', 'map': None} ], 'unbind': [ # SMPP v3.4, section 4.2.1, table 4-7, page 56 ], 'unbind_resp': [ # SMPP v3.4, section 4.2.2, table 4-8, page 56 ], 'generic_nack': [ # SMPP v3.4, section 4.3.1, table 4-9, page 57 ], 'submit_sm': [ # SMPP v3.4, section 4.4.1, table 4-10, page 59-61 {'name': 'service_type', 'min': 1, 'max': 6, 'var': True, 'type': 'string', 'map': None}, {'name': 'source_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'source_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'source_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None}, {'name': 'dest_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'dest_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'destination_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None}, {'name': 'esm_class', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'protocol_id', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'priority_flag', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'schedule_delivery_time', 'min': 1, 'max': 17, 'var': False, 'type': 'string', 'map': None}, {'name': 'validity_period', 'min': 1, 'max': 17, 'var': True, 'type': 'string', 'map': None}, {'name': 'registered_delivery', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'replace_if_present_flag', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'data_coding', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'sm_default_msg_id', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'sm_length', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'short_message', 'min': 0, 'max': 254, 'var': 'sm_length', 'type': 'xstring', 'map': None} ], 'submit_sm_resp': [ # SMPP v3.4, section 4.4.2, table 4-11, page 67 {'name': 'message_id', 'min': 0, 'max': 65, 'var': True, 'type': 'string', 'map': None} ], 'submit_multi': [ # SMPP v3.4, section 4.5.1, table 4-12, page 69-71 {'name': 'service_type', 'min': 1, 'max': 6, 'var': True, 'type': 'string', 'map': None}, {'name': 'source_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'source_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'source_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None}, {'name': 'number_of_dests', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'dest_address', 'min': 0, 'max': 0, 'var': 'number_of_dests', 'type': 'dest_address', 'map': None}, {'name': 'esm_class', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'protocol_id', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'priority_flag', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'schedule_delivery_time', 'min': 1, 'max': 17, 'var': False, 'type': 'string', 'map': None}, {'name': 'validity_period', 'min': 1, 'max': 17, 'var': False, 'type': 'string', 'map': None}, {'name': 'registered_delivery', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'replace_if_present_flag', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'data_coding', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'sm_default_msg_id', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'sm_length', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'short_message', 'min': 0, 'max': 254, 'var': 'sm_length', 'type': 'xstring', 'map': None} ], 'dest_address': [ # SMPP v3.4, section 4.5.1.1, table 4-13, page 75 {'name': 'dest_flag', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None} # 'sme_dest_address' or 'distribution_list' goes here ], 'sme_dest_address': [ # SMPP v3.4, section 4.5.1.1, table 4-14, page 75 {'name': 'dest_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'dest_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'destination_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None} ], 'distribution_list': [ # SMPP v3.4, section 4.5.1.2, table 4-15, page 75 {'name': 'dl_name', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None} ], 'submit_multi_resp': [ # SMPP v3.4, section 4.5.2, table 4-16, page 76 {'name': 'message_id', 'min': 1, 'max': 65, 'var': True, 'type': 'string', 'map': None}, {'name': 'no_unsuccess', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'unsuccess_sme', 'min': 0, 'max': 0, 'var': 'no_unsuccess', 'type': 'unsuccess_sme', 'map': None} ], 'unsuccess_sme': [ # SMPP v3.4, section 4.5.2.1, table 4-17, page 77 {'name': 'dest_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'dest_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'destination_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None}, {'name': 'error_status_code', 'min': 4, 'max': 4, 'var': False, 'type': 'integer', 'map': None} ], 'deliver_sm': [ # SMPP v3.4, section 4.6.1, table 4-18, page 79-81 {'name': 'service_type', 'min': 1, 'max': 6, 'var': True, 'type': 'string', 'map': None}, {'name': 'source_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'source_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'source_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None}, {'name': 'dest_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'dest_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'destination_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None}, {'name': 'esm_class', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'protocol_id', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'priority_flag', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'schedule_delivery_time', 'min': 1, 'max': 1, 'var': False, 'type': 'string', 'map': None}, {'name': 'validity_period', 'min': 1, 'max': 1, 'var': False, 'type': 'string', 'map': None}, {'name': 'registered_delivery', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'replace_if_present_flag', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'data_coding', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'sm_default_msg_id', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'sm_length', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'short_message', 'min': 0, 'max': 254, 'var': 'sm_length', 'type': 'xstring', 'map': None} ], 'deliver_sm_resp': [ # SMPP v3.4, section 4.6.2, table 4-19, page 85 {'name': 'message_id', 'min': 1, 'max': 1, 'var': False, 'type': 'string', 'map': None} ], 'data_sm': [ # SMPP v3.4, section 4.7.1, table 4-20, page 87-88 {'name': 'service_type', 'min': 1, 'max': 6, 'var': True, 'type': 'string', 'map': None}, {'name': 'source_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'source_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'source_addr', 'min': 1, 'max': 65, 'var': True, 'type': 'string', 'map': None}, {'name': 'dest_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'dest_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'destination_addr', 'min': 1, 'max': 65, 'var': True, 'type': 'string', 'map': None}, {'name': 'esm_class', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'registered_delivery', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'data_coding', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None} ], 'data_sm_resp': [ # SMPP v3.4, section 4.7.2, table 4-21, page 93 {'name': 'message_id', 'min': 1, 'max': 65, 'var': True, 'type': 'string', 'map': None} ], 'query_sm': [ # SMPP v3.4, section 4.8.1, table 4-22, page 95 {'name': 'message_id', 'min': 1, 'max': 65, 'var': True, 'type': 'string', 'map': None}, {'name': 'source_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'source_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'source_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None} ], 'query_sm_resp': [ # SMPP v3.4, section 4.7.2, table 4-21, page 93 {'name': 'message_id', 'min': 1, 'max': 65, 'var': True, 'type': 'string', 'map': None}, {'name': 'final_date', 'min': 1, 'max': 17, 'var': False, 'type': 'string', 'map': None}, {'name': 'message_state', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'error_code', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None} ], 'cancel_sm': [ # SMPP v3.4, section 4.9.1, table 4-24, page 98-99 {'name': 'service_type', 'min': 1, 'max': 6, 'var': True, 'type': 'string', 'map': None}, {'name': 'message_id', 'min': 1, 'max': 65, 'var': True, 'type': 'string', 'map': None}, {'name': 'source_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'source_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'source_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None}, {'name': 'dest_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'dest_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'destination_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None} ], 'cancel_sm_resp': [ # SMPP v3.4, section 4.9.2, table 4-25, page 100 ], 'replace_sm': [ # SMPP v3.4, section 4.10.1, table 4-26, page 102-103 {'name': 'message_id', 'min': 1, 'max': 65, 'var': True, 'type': 'string', 'map': None}, {'name': 'source_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'source_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'source_addr', 'min': 1, 'max': 21, 'var': True, 'type': 'string', 'map': None}, {'name': 'schedule_delivery_time', 'min': 1, 'max': 17, 'var': False, 'type': 'string', 'map': None}, {'name': 'validity_period', 'min': 1, 'max': 17, 'var': False, 'type': 'string', 'map': None}, {'name': 'registered_delivery', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'replace_if_present_flag', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'data_coding', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'sm_default_msg_id', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'sm_length', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': None}, {'name': 'short_message', 'min': 0, 'max': 254, 'var': 'sm_length', 'type': 'xstring', 'map': None} ], 'replace_sm_resp': [ # SMPP v3.4, section 4.10.2, table 4-27, page 104 ], 'enquire_link': [ # SMPP v3.4, section 4.11.1, table 4-28, page 106 ], 'enquire_link_resp': [ # SMPP v3.4, section 4.11.2, table 4-29, page 106 ], 'alert_notification': [ # SMPP v3.4, section 4.12.1, table 4-30, page 108 {'name': 'source_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'source_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'source_addr', 'min': 1, 'max': 65, 'var': True, 'type': 'string', 'map': None}, {'name': 'esme_addr_ton', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_ton'}, {'name': 'esme_addr_npi', 'min': 1, 'max': 1, 'var': False, 'type': 'integer', 'map': 'addr_npi'}, {'name': 'esme_addr', 'min': 1, 'max': 65, 'var': True, 'type': 'string', 'map': None}, ] } def mandatory_parameter_list_by_command_name(command_name): return mandatory_parameter_lists.get(command_name, []) # Command IDs - SMPP v3.4, section 5.1.2.1, table 5-1, page 110-111 command_id_by_hex = { '80000000': {'hex': '80000000', 'name': 'generic_nack'}, '00000001': {'hex': '00000001', 'name': 'bind_receiver'}, '80000001': {'hex': '80000001', 'name': 'bind_receiver_resp'}, '00000002': {'hex': '00000002', 'name': 'bind_transmitter'}, '80000002': {'hex': '80000002', 'name': 'bind_transmitter_resp'}, '00000003': {'hex': '00000003', 'name': 'query_sm'}, '80000003': {'hex': '80000003', 'name': 'query_sm_resp'}, '00000004': {'hex': '00000004', 'name': 'submit_sm'}, '80000004': {'hex': '80000004', 'name': 'submit_sm_resp'}, '00000005': {'hex': '00000005', 'name': 'deliver_sm'}, '80000005': {'hex': '80000005', 'name': 'deliver_sm_resp'}, '00000006': {'hex': '00000006', 'name': 'unbind'}, '80000006': {'hex': '80000006', 'name': 'unbind_resp'}, '00000007': {'hex': '00000007', 'name': 'replace_sm'}, '80000007': {'hex': '80000007', 'name': 'replace_sm_resp'}, '00000008': {'hex': '00000008', 'name': 'cancel_sm'}, '80000008': {'hex': '80000008', 'name': 'cancel_sm_resp'}, '00000009': {'hex': '00000009', 'name': 'bind_transceiver'}, '80000009': {'hex': '80000009', 'name': 'bind_transceiver_resp'}, '0000000b': {'hex': '0000000b', 'name': 'outbind'}, '00000015': {'hex': '00000015', 'name': 'enquire_link'}, '80000015': {'hex': '80000015', 'name': 'enquire_link_resp'}, '00000021': {'hex': '00000021', 'name': 'submit_multi'}, '80000021': {'hex': '80000021', 'name': 'submit_multi_resp'}, '00000102': {'hex': '00000102', 'name': 'alert_notification'}, '00000103': {'hex': '00000103', 'name': 'data_sm'}, '80000103': {'hex': '80000103', 'name': 'data_sm_resp'}, # v4 codes '80010000': {'hex': '80010000', 'name': 'generic_nack_v4'}, '00010001': {'hex': '00010001', 'name': 'bind_receiver_v4'}, '80010001': {'hex': '80010001', 'name': 'bind_receiver_resp_v4'}, '00010002': {'hex': '00010002', 'name': 'bind_transmitter_v4'}, '80010002': {'hex': '80010002', 'name': 'bind_transmitter_resp_v4'}, '00010003': {'hex': '00010003', 'name': 'query_sm_v4'}, '80010003': {'hex': '80010003', 'name': 'query_sm_resp_v4'}, '00010004': {'hex': '00010004', 'name': 'submit_sm_v4'}, '80010004': {'hex': '80010004', 'name': 'submit_sm_resp_v4'}, '00010005': {'hex': '00010005', 'name': 'deliver_sm_v4'}, '80010005': {'hex': '80010005', 'name': 'deliver_sm_resp_v4'}, '00010006': {'hex': '00010006', 'name': 'unbind_v4'}, '80010006': {'hex': '80010006', 'name': 'unbind_resp_v4'}, '00010007': {'hex': '00010007', 'name': 'replace_sm_v4'}, '80010007': {'hex': '80010007', 'name': 'replace_sm_resp_v4'}, '00010008': {'hex': '00010008', 'name': 'cancel_sm_v4'}, '80010008': {'hex': '80010008', 'name': 'cancel_sm_resp_v4'}, '00010009': {'hex': '00010009', 'name': 'delivery_receipt_v4'}, '80010009': {'hex': '80010009', 'name': 'delivery_receipt_resp_v4'}, '0001000a': {'hex': '0001000a', 'name': 'enquire_link_v4'}, '8001000a': {'hex': '8001000a', 'name': 'enquire_link_resp_v4'}, '0001000b': {'hex': '0001000b', 'name': 'outbind_v4'}, } def command_id_name_by_hex(x): return command_id_by_hex.get(x, {}).get('name') command_id_by_name = { 'generic_nack' :{ 'hex': '80000000', 'name': 'generic_nack'}, 'bind_receiver' :{ 'hex': '00000001', 'name': 'bind_receiver'}, 'bind_receiver_resp' :{ 'hex': '80000001', 'name': 'bind_receiver_resp'}, 'bind_transmitter' :{ 'hex': '00000002', 'name': 'bind_transmitter'}, 'bind_transmitter_resp' :{ 'hex': '80000002', 'name': 'bind_transmitter_resp'}, 'query_sm' :{ 'hex': '00000003', 'name': 'query_sm'}, 'query_sm_resp' :{ 'hex': '80000003', 'name': 'query_sm_resp'}, 'submit_sm' :{ 'hex': '00000004', 'name': 'submit_sm'}, 'submit_sm_resp' :{ 'hex': '80000004', 'name': 'submit_sm_resp'}, 'deliver_sm' :{ 'hex': '00000005', 'name': 'deliver_sm'}, 'deliver_sm_resp' :{ 'hex': '80000005', 'name': 'deliver_sm_resp'}, 'unbind' :{ 'hex': '00000006', 'name': 'unbind'}, 'unbind_resp' :{ 'hex': '80000006', 'name': 'unbind_resp'}, 'replace_sm' :{ 'hex': '00000007', 'name': 'replace_sm'}, 'replace_sm_resp' :{ 'hex': '80000007', 'name': 'replace_sm_resp'}, 'cancel_sm' :{ 'hex': '00000008', 'name': 'cancel_sm'}, 'cancel_sm_resp' :{ 'hex': '80000008', 'name': 'cancel_sm_resp'}, 'bind_transceiver' :{ 'hex': '00000009', 'name': 'bind_transceiver'}, 'bind_transceiver_resp' :{ 'hex': '80000009', 'name': 'bind_transceiver_resp'}, 'outbind' :{ 'hex': '0000000b', 'name': 'outbind'}, 'enquire_link' :{ 'hex': '00000015', 'name': 'enquire_link'}, 'enquire_link_resp' :{ 'hex': '80000015', 'name': 'enquire_link_resp'}, 'submit_multi' :{ 'hex': '00000021', 'name': 'submit_multi'}, 'submit_multi_resp' :{ 'hex': '80000021', 'name': 'submit_multi_resp'}, 'alert_notification' :{ 'hex': '00000102', 'name': 'alert_notification'}, 'data_sm' :{ 'hex': '00000103', 'name': 'data_sm'}, 'data_sm_resp' :{ 'hex': '80000103', 'name': 'data_sm_resp'}, # v4 codes 'generic_nack_v4' :{ 'hex': '80010000', 'name': 'generic_nack_v4'}, 'bind_receiver_v4' :{ 'hex': '00010001', 'name': 'bind_receiver_v4'}, 'bind_receiver_resp_v4' :{ 'hex': '80010001', 'name': 'bind_receiver_resp_v4'}, 'bind_transmitter_v4' :{ 'hex': '00010002', 'name': 'bind_transmitter_v4'}, 'bind_transmitter_resp_v4': {'hex': '80010002', 'name': 'bind_transmitter_resp_v4'}, 'query_sm_v4' :{ 'hex': '00010003', 'name': 'query_sm_v4'}, 'query_sm_resp_v4' :{ 'hex': '80010003', 'name': 'query_sm_resp_v4'}, 'submit_sm_v4' :{ 'hex': '00010004', 'name': 'submit_sm_v4'}, 'submit_sm_resp_v4' :{ 'hex': '80010004', 'name': 'submit_sm_resp_v4'}, 'deliver_sm_v4' :{ 'hex': '00010005', 'name': 'deliver_sm_v4'}, 'deliver_sm_resp_v4' :{ 'hex': '80010005', 'name': 'deliver_sm_resp_v4'}, 'unbind_v4' :{ 'hex': '00010006', 'name': 'unbind_v4'}, 'unbind_resp_v4' :{ 'hex': '80010006', 'name': 'unbind_resp_v4'}, 'replace_sm_v4' :{ 'hex': '00010007', 'name': 'replace_sm_v4'}, 'replace_sm_resp_v4' :{ 'hex': '80010007', 'name': 'replace_sm_resp_v4'}, 'cancel_sm_v4' :{ 'hex': '00010008', 'name': 'cancel_sm_v4'}, 'cancel_sm_resp_v4' :{ 'hex': '80010008', 'name': 'cancel_sm_resp_v4'}, 'delivery_receipt_v4' :{ 'hex': '00010009', 'name': 'delivery_receipt_v4'}, 'delivery_receipt_resp_v4':{ 'hex': '80010009', 'name': 'delivery_receipt_resp_v4'}, 'enquire_link_v4' :{ 'hex': '0001000a', 'name': 'enquire_link_v4'}, 'enquire_link_resp_v4' :{ 'hex': '8001000a', 'name': 'enquire_link_resp_v4'}, 'outbind_v4' :{ 'hex': '0001000b', 'name': 'outbind_v4'} } def command_id_hex_by_name(n): return command_id_by_name.get(n, {}).get('hex') # SMPP Error Codes (ESME) - SMPP v3.4, section 5.1.3, table 5-2, page 112-114 command_status_by_hex = { '00000000': {'hex': '00000000', 'name': 'ESME_ROK', 'description': 'No error'}, '00000001': {'hex': '00000001', 'name': 'ESME_RINVMSGLEN', 'description': 'Message Length is invalid'}, '00000002': {'hex': '00000002', 'name': 'ESME_RINVCMDLEN', 'description': 'Command Length is invalid'}, '00000003': {'hex': '00000003', 'name': 'ESME_RINVCMDID', 'description': 'Invalid Command ID'}, '00000004': {'hex': '00000004', 'name': 'ESME_RINVBNDSTS', 'description': 'Incorrect BIND Status for given command'}, '00000005': {'hex': '00000005', 'name': 'ESME_RALYBND', 'description': 'ESME Already in bound state'}, '00000006': {'hex': '00000006', 'name': 'ESME_RINVPRTFLG', 'description': 'Invalid priority flag'}, '00000007': {'hex': '00000007', 'name': 'ESME_RINVREGDLVFLG', 'description': 'Invalid registered delivery flag'}, '00000008': {'hex': '00000008', 'name': 'ESME_RSYSERR', 'description': 'System Error'}, '0000000a': {'hex': '0000000a', 'name': 'ESME_RINVSRCADR', 'description': 'Invalid source address'}, '0000000b': {'hex': '0000000b', 'name': 'ESME_RINVDSTADR', 'description': 'Invalid destination address'}, '0000000c': {'hex': '0000000c', 'name': 'ESME_RINVMSGID', 'description': 'Message ID is invalid'}, '0000000d': {'hex': '0000000d', 'name': 'ESME_RBINDFAIL', 'description': 'Bind failed'}, '0000000e': {'hex': '0000000e', 'name': 'ESME_RINVPASWD', 'description': 'Invalid password'}, '0000000f': {'hex': '0000000f', 'name': 'ESME_RINVSYSID', 'description': 'Invalid System ID'}, '00000011': {'hex': '00000011', 'name': 'ESME_RCANCELFAIL', 'description': 'Cancel SM Failed'}, '00000013': {'hex': '00000013', 'name': 'ESME_RREPLACEFAIL', 'description': 'Replace SM Failed'}, '00000014': {'hex': '00000014', 'name': 'ESME_RMSGQFUL', 'description': 'Message queue full'}, '00000015': {'hex': '00000015', 'name': 'ESME_RINVSERTYP', 'description': 'Invalid service type'}, '00000033': {'hex': '00000033', 'name': 'ESME_RINVNUMDESTS', 'description': 'Invalid number of destinations'}, '00000034': {'hex': '00000034', 'name': 'ESME_RINVDLNAME', 'description': 'Invalid distribution list name'}, '00000040': {'hex': '00000040', 'name': 'ESME_RINVDESTFLAG', 'description': 'Destination flag is invalid (submit_multi)'}, '00000042': {'hex': '00000042', 'name': 'ESME_RINVSUBREP', 'description': "Invalid `submit with replace' request (i.e. submit_sm with replace_if_present_flag set)"}, '00000043': {'hex': '00000043', 'name': 'ESME_RINVESMCLASS', 'description': 'Invalid esm_class field data'}, '00000044': {'hex': '00000044', 'name': 'ESME_RCNTSUBDL', 'description': 'Cannot submit to distribution list'}, '00000045': {'hex': '00000045', 'name': 'ESME_RSUBMITFAIL', 'description': 'submit_sm or submit_multi failed'}, '00000048': {'hex': '00000048', 'name': 'ESME_RINVSRCTON', 'description': 'Invalid source address TON'}, '00000049': {'hex': '00000049', 'name': 'ESME_RINVSRCNPI', 'description': 'Invalid source address NPI'}, '00000050': {'hex': '00000050', 'name': 'ESME_RINVDSTTON', 'description': 'Invalid destination address TON'}, '00000051': {'hex': '00000051', 'name': 'ESME_RINVDSTNPI', 'description': 'Invalid destination address NPI'}, '00000053': {'hex': '00000053', 'name': 'ESME_RINVSYSTYP', 'description': 'Invalid system_type field'}, '00000054': {'hex': '00000054', 'name': 'ESME_RINVREPFLAG', 'description': 'Invalid replace_if_present flag'}, '00000055': {'hex': '00000055', 'name': 'ESME_RINVNUMMSGS', 'description': 'Invalid number of messages'}, '00000058': {'hex': '00000058', 'name': 'ESME_RTHROTTLED', 'description': 'Throttling error (ESME has exceeded allowed message limits)'}, '00000061': {'hex': '00000061', 'name': 'ESME_RINVSCHED', 'description': 'Invalid scheduled delivery time'}, '00000062': {'hex': '00000062', 'name': 'ESME_RINVEXPIRY', 'description': 'Invalid message validity period (expiry time)'}, '00000063': {'hex': '00000063', 'name': 'ESME_RINVDFTMSGID', 'description': 'Predefined message invalid or not found'}, '00000064': {'hex': '00000064', 'name': 'ESME_RX_T_APPN', 'description': 'ESME Receiver Temporary App Error Code'}, '00000065': {'hex': '00000065', 'name': 'ESME_RX_P_APPN', 'description': 'ESME Receiver Permanent App Error Code'}, '00000066': {'hex': '00000066', 'name': 'ESME_RX_R_APPN', 'description': 'ESME Receiver Reject Message Error Code'}, '00000067': {'hex': '00000067', 'name': 'ESME_RQUERYFAIL', 'description': 'query_sm request failed'}, '000000c0': {'hex': '000000c0', 'name': 'ESME_RINVOPTPARSTREAM', 'description': 'Error in the optional part of the PDU Body'}, '000000c1': {'hex': '000000c1', 'name': 'ESME_ROPTPARNOTALLWD', 'description': 'Optional paramenter not allowed'}, '000000c2': {'hex': '000000c2', 'name': 'ESME_RINVPARLEN', 'description': 'Invalid parameter length'}, '000000c3': {'hex': '000000c3', 'name': 'ESME_RMISSINGOPTPARAM', 'description': 'Expected optional parameter missing'}, '000000c4': {'hex': '000000c4', 'name': 'ESME_RINVOPTPARAMVAL', 'description': 'Invalid optional parameter value'}, '000000fe': {'hex': '000000fe', 'name': 'ESME_RDELIVERYFAILURE', 'description': 'Delivery Failure (used for data_sm_resp)'}, '000000ff': {'hex': '000000ff', 'name': 'ESME_RUNKNOWNERR', 'description': 'Unknown error'} } def command_status_name_by_hex(x): return command_status_by_hex.get(x, {}).get('name') command_status_by_name = { 'ESME_ROK' :{ 'hex': '00000000', 'name': 'ESME_ROK', 'description': 'No error'}, 'ESME_RINVMSGLEN' :{ 'hex': '00000001', 'name': 'ESME_RINVMSGLEN', 'description': 'Message Length is invalid'}, 'ESME_RINVCMDLEN' :{ 'hex': '00000002', 'name': 'ESME_RINVCMDLEN', 'description': 'Command Length is invalid'}, 'ESME_RINVCMDID' :{ 'hex': '00000003', 'name': 'ESME_RINVCMDID', 'description': 'Invalid Command ID'}, 'ESME_RINVBNDSTS' :{ 'hex': '00000004', 'name': 'ESME_RINVBNDSTS', 'description': 'Incorrect BIND Status for given command'}, 'ESME_RALYBND' :{ 'hex': '00000005', 'name': 'ESME_RALYBND', 'description': 'ESME Already in bound state'}, 'ESME_RINVPRTFLG' :{ 'hex': '00000006', 'name': 'ESME_RINVPRTFLG', 'description': 'Invalid priority flag'}, 'ESME_RINVREGDLVFLG' :{ 'hex': '00000007', 'name': 'ESME_RINVREGDLVFLG', 'description': 'Invalid registered delivery flag'}, 'ESME_RSYSERR' :{ 'hex': '00000008', 'name': 'ESME_RSYSERR', 'description': 'System Error'}, 'ESME_RINVSRCADR' :{ 'hex': '0000000a', 'name': 'ESME_RINVSRCADR', 'description': 'Invalid source address'}, 'ESME_RINVDSTADR' :{ 'hex': '0000000b', 'name': 'ESME_RINVDSTADR', 'description': 'Invalid destination address'}, 'ESME_RINVMSGID' :{ 'hex': '0000000c', 'name': 'ESME_RINVMSGID', 'description': 'Message ID is invalid'}, 'ESME_RBINDFAIL' :{ 'hex': '0000000d', 'name': 'ESME_RBINDFAIL', 'description': 'Bind failed'}, 'ESME_RINVPASWD' :{ 'hex': '0000000e', 'name': 'ESME_RINVPASWD', 'description': 'Invalid password'}, 'ESME_RINVSYSID' :{ 'hex': '0000000f', 'name': 'ESME_RINVSYSID', 'description': 'Invalid System ID'}, 'ESME_RCANCELFAIL' :{ 'hex': '00000011', 'name': 'ESME_RCANCELFAIL', 'description': 'Cancel SM Failed'}, 'ESME_RREPLACEFAIL' :{ 'hex': '00000013', 'name': 'ESME_RREPLACEFAIL', 'description': 'Replace SM Failed'}, 'ESME_RMSGQFUL' :{ 'hex': '00000014', 'name': 'ESME_RMSGQFUL', 'description': 'Message queue full'}, 'ESME_RINVSERTYP' :{ 'hex': '00000015', 'name': 'ESME_RINVSERTYP', 'description': 'Invalid service type'}, 'ESME_RINVNUMDESTS' :{ 'hex': '00000033', 'name': 'ESME_RINVNUMDESTS', 'description': 'Invalid number of destinations'}, 'ESME_RINVDLNAME' :{ 'hex': '00000034', 'name': 'ESME_RINVDLNAME', 'description': 'Invalid distribution list name'}, 'ESME_RINVDESTFLAG' :{ 'hex': '00000040', 'name': 'ESME_RINVDESTFLAG', 'description': 'Destination flag is invalid (submit_multi)'}, 'ESME_RINVSUBREP' :{ 'hex': '00000042', 'name': 'ESME_RINVSUBREP', 'description': "Invalid `submit with replace' request (i.e. submit_sm with replace_if_present_flag set)"}, 'ESME_RINVESMCLASS' :{ 'hex': '00000043', 'name': 'ESME_RINVESMCLASS', 'description': 'Invalid esm_class field data'}, 'ESME_RCNTSUBDL' :{ 'hex': '00000044', 'name': 'ESME_RCNTSUBDL', 'description': 'Cannot submit to distribution list'}, 'ESME_RSUBMITFAIL' :{ 'hex': '00000045', 'name': 'ESME_RSUBMITFAIL', 'description': 'submit_sm or submit_multi failed'}, 'ESME_RINVSRCTON' :{ 'hex': '00000048', 'name': 'ESME_RINVSRCTON', 'description': 'Invalid source address TON'}, 'ESME_RINVSRCNPI' :{ 'hex': '00000049', 'name': 'ESME_RINVSRCNPI', 'description': 'Invalid source address NPI'}, 'ESME_RINVDSTTON' :{ 'hex': '00000050', 'name': 'ESME_RINVDSTTON', 'description': 'Invalid destination address TON'}, 'ESME_RINVDSTNPI' :{ 'hex': '00000051', 'name': 'ESME_RINVDSTNPI', 'description': 'Invalid destination address NPI'}, 'ESME_RINVSYSTYP' :{ 'hex': '00000053', 'name': 'ESME_RINVSYSTYP', 'description': 'Invalid system_type field'}, 'ESME_RINVREPFLAG' :{ 'hex': '00000054', 'name': 'ESME_RINVREPFLAG', 'description': 'Invalid replace_if_present flag'}, 'ESME_RINVNUMMSGS' :{ 'hex': '00000055', 'name': 'ESME_RINVNUMMSGS', 'description': 'Invalid number of messages'}, 'ESME_RTHROTTLED' :{ 'hex': '00000058', 'name': 'ESME_RTHROTTLED', 'description': 'Throttling error (ESME has exceeded allowed message limits)'}, 'ESME_RINVSCHED' :{ 'hex': '00000061', 'name': 'ESME_RINVSCHED', 'description': 'Invalid scheduled delivery time'}, 'ESME_RINVEXPIRY' :{ 'hex': '00000062', 'name': 'ESME_RINVEXPIRY', 'description': 'Invalid message validity period (expiry time)'}, 'ESME_RINVDFTMSGID' :{ 'hex': '00000063', 'name': 'ESME_RINVDFTMSGID', 'description': 'Predefined message invalid or not found'}, 'ESME_RX_T_APPN' :{ 'hex': '00000064', 'name': 'ESME_RX_T_APPN', 'description': 'ESME Receiver Temporary App Error Code'}, 'ESME_RX_P_APPN' :{ 'hex': '00000065', 'name': 'ESME_RX_P_APPN', 'description': 'ESME Receiver Permanent App Error Code'}, 'ESME_RX_R_APPN' :{ 'hex': '00000066', 'name': 'ESME_RX_R_APPN', 'description': 'ESME Receiver Reject Message Error Code'}, 'ESME_RQUERYFAIL' :{ 'hex': '00000067', 'name': 'ESME_RQUERYFAIL', 'description': 'query_sm request failed'}, 'ESME_RINVOPTPARSTREAM': {'hex': '000000c0', 'name': 'ESME_RINVOPTPARSTREAM', 'description': 'Error in the optional part of the PDU Body'}, 'ESME_ROPTPARNOTALLWD' :{ 'hex': '000000c1', 'name': 'ESME_ROPTPARNOTALLWD', 'description': 'Optional paramenter not allowed'}, 'ESME_RINVPARLEN' :{ 'hex': '000000c2', 'name': 'ESME_RINVPARLEN', 'description': 'Invalid parameter length'}, 'ESME_RMISSINGOPTPARAM': {'hex': '000000c3', 'name': 'ESME_RMISSINGOPTPARAM', 'description': 'Expected optional parameter missing'}, 'ESME_RINVOPTPARAMVAL' :{ 'hex': '000000c4', 'name': 'ESME_RINVOPTPARAMVAL', 'description': 'Invalid optional parameter value'}, 'ESME_RDELIVERYFAILURE': {'hex': '000000fe', 'name': 'ESME_RDELIVERYFAILURE', 'description': 'Delivery Failure (used for data_sm_resp)'}, 'ESME_RUNKNOWNERR' :{ 'hex': '000000ff', 'name': 'ESME_RUNKNOWNERR', 'description': 'Unknown error'} } def command_status_hex_by_name(n): return command_status_by_name.get(n, {}).get('hex') # Type of Number (TON) - SMPP v3.4, section 5.2.5, table 5-3, page 117 maps['addr_ton_by_name'] = { 'unknown' : '00', 'international' : '01', 'national' : '02', 'network_specific' : '03', 'subscriber_number': '04', 'alphanumeric' : '05', 'abbreviated' : '06' } maps['addr_ton_by_hex'] = { '00': 'unknown', '01': 'international', '02': 'national', '03': 'network_specific', '04': 'subscriber_number', '05': 'alphanumeric', '06': 'abbreviated' } # Numberic Plan Indicator (NPI) - SMPP v3.4, section 5.2.6, table 5-4, page 118 maps['addr_npi_by_name'] = { 'unknown' : '00', 'ISDN' : '01', 'data' : '03', 'telex' : '04', 'land_mobile': '06', 'national' : '08', 'private' : '09', 'ERMES' : '0a', 'internet' : '0e', 'WAP' : '12' } maps['addr_npi_by_hex'] = { '00': 'unknown', '01': 'ISDN', '03': 'data', '04': 'telex', '06': 'land_mobile', '08': 'national', '09': 'private', '0a': 'ERMES', '0e': 'internet', '12': 'WAP' } # ESM Class bits - SMPP v3.4, section 5.2.12, page 121 maps['esm_class_bits'] = { 'mode_mask' : '03', 'type_mask' : '3c', 'feature_mask' : 'c0', 'mode_default' : '00', 'mode_datagram' : '01', 'mode_forward' : '02', 'mode_store_and_forward' : '03', 'type_default' : '00', 'type_delivery_receipt' : '04', 'type_delivery_ack' : '08', 'type_0011' : '0a', 'type_user_ack' : '10', 'type_0101' : '14', 'type_conversation_abort' : '18', 'type_0111' : '1a', 'type_intermed_deliv_notif' : '20', 'type_1001' : '24', 'type_1010' : '28', 'type_1011' : '2a', 'type_1100' : '30', 'type_1101' : '34', 'type_1110' : '38', 'type_1111' : '3a', 'feature_none' : '00', 'feature_UDHI' : '40', 'feature_reply_path' : '80', 'feature_UDHI_and_reply_path': 'c0' } # Registered Delivery bits - SMPP v3.4, section 5.2.17, page 124 maps['registered_delivery_bits'] = { 'receipt_mask' : '03', 'ack_mask' : '0c', 'intermed_notif_mask' : '80', 'receipt_none' : '00', 'receipt_always' : '01', 'receipt_on_fail' : '02', 'receipt_res' : '03', 'ack_none' : '00', 'ack_delivery' : '04', 'ack_user' : '08', 'ack_delivery_and_user': '0c', 'intermed_notif_none' : '00', 'intermed_notif' : '10' } # submit_multi dest_flag constants - SMPP v3.4, section 5.2.25, page 129 # maps['dest_flag_by_name'] = { # 'SME Address' :1, # 'Distribution List Name': 2 # } # Message State codes returned in query_sm_resp PDUs - SMPP v3.4, section 5.2.28, table 5-6, page 130 maps['message_state_by_name'] = { 'ENROUTE' : 1, 'DELIVERED' : 2, 'EXPIRED' : 3, 'DELETED' : 4, 'UNDELIVERABLE': 5, 'ACCEPTED' : 6, 'UNKNOWN' : 7, 'REJECTED' : 8 } # Facility Code bits for SMPP v4 maps['facility_code_bits'] = { 'GF_PVCY' : '00000001', 'GF_SUBADDR': '00000002', 'NF_CC' : '00080000', 'NF_PDC' : '00010000', 'NF_IS136' : '00020000', 'NF_IS95A' : '00040000' } # Optional Parameter Tags - SMPP v3.4, section 5.3.2, Table 5-7, page 132-133 optional_parameter_tag_by_hex = { '0005': {'hex': '0005', 'name': 'dest_addr_subunit', 'type': 'integer', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.1, page 134 '0006': {'hex': '0006', 'name': 'dest_network_type', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.3, page 135 '0007': {'hex': '0007', 'name': 'dest_bearer_type', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.5, page 136 '0008': {'hex': '0008', 'name': 'dest_telematics_id', 'type': 'integer', 'tech': 'GSM', 'min': 2}, # SMPP v3.4, section 5.3.2.7, page 137 '000d': {'hex': '000d', 'name': 'source_addr_subunit', 'type': 'integer', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.2, page 134 '000e': {'hex': '000e', 'name': 'source_network_type', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.4, page 135 '000f': {'hex': '000f', 'name': 'source_bearer_type', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.6, page 136 '0010': {'hex': '0010', 'name': 'source_telematics_id', 'type': 'integer', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.8, page 137 '0017': {'hex': '0017', 'name': 'qos_time_to_live', 'type': 'integer', 'tech': 'Generic', 'min': 4}, # SMPP v3.4, section 5.3.2.9, page 138 '0019': {'hex': '0019', 'name': 'payload_type', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.10, page 138 '001d': {'hex': '001d', 'name': 'additional_status_info_text', 'type': 'string', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.11, page 139 '001e': {'hex': '001e', 'name': 'receipted_message_id', 'type': 'string', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.12, page 139 '0030': {'hex': '0030', 'name': 'ms_msg_wait_facilities', 'type': 'bitmask', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.13, page 140 '0101': {'hex': '0101', 'name': 'PVCY_AuthenticationStr', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 58-62 '0201': {'hex': '0201', 'name': 'privacy_indicator', 'type': 'integer', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.14, page 141 '0202': {'hex': '0202', 'name': 'source_subaddress', 'type': 'hex', 'tech': 'CDMA, TDMA', 'min': 2}, # SMPP v3.4, section 5.3.2.15, page 142 '0203': {'hex': '0203', 'name': 'dest_subaddress', 'type': 'hex', 'tech': 'CDMA, TDMA', 'min': 2}, # SMPP v3.4, section 5.3.2.16, page 143 '0204': {'hex': '0204', 'name': 'user_message_reference', 'type': 'integer', 'tech': 'Generic', 'min': 2}, # SMPP v3.4, section 5.3.2.17, page 143 '0205': {'hex': '0205', 'name': 'user_response_code', 'type': 'integer', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.18, page 144 '020a': {'hex': '020a', 'name': 'source_port', 'type': 'integer', 'tech': 'Generic', 'min': 2}, # SMPP v3.4, section 5.3.2.20, page 145 '020b': {'hex': '020b', 'name': 'destination_port', 'type': 'integer', 'tech': 'Generic', 'min': 2}, # SMPP v3.4, section 5.3.2.21, page 145 '020c': {'hex': '020c', 'name': 'sar_msg_ref_num', 'type': 'integer', 'tech': 'Generic', 'min': 2}, # SMPP v3.4, section 5.3.2.22, page 146 '020d': {'hex': '020d', 'name': 'language_indicator', 'type': 'integer', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.19, page 144 '020e': {'hex': '020e', 'name': 'sar_total_segments', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.23, page 147 '020f': {'hex': '020f', 'name': 'sar_segment_seqnum', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.24, page 147 '0210': {'hex': '0210', 'name': 'sc_interface_version', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.25, page 148 '0301': {'hex': '0301', 'name': 'CC_CBN', 'type': None, 'tech': 'V4'}, # v4 page 70 '0302': {'hex': '0302', 'name': 'callback_num_pres_ind', 'type': 'bitmask', 'tech': 'TDMA'}, # SMPP v3.4, section 5.3.2.37, page 156 '0303': {'hex': '0303', 'name': 'callback_num_atag', 'type': 'hex', 'tech': 'TDMA'}, # SMPP v3.4, section 5.3.2.38, page 157 '0304': {'hex': '0304', 'name': 'number_of_messages', 'type': 'integer', 'tech': 'CDMA'}, # SMPP v3.4, section 5.3.2.39, page 158 '0381': {'hex': '0381', 'name': 'callback_num', 'type': 'hex', 'tech': 'CDMA, TDMA, GSM, iDEN', 'min': 4}, # SMPP v3.4, section 5.3.2.36, page 155 '0420': {'hex': '0420', 'name': 'dpf_result', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.28, page 149 '0421': {'hex': '0421', 'name': 'set_dpf', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.29, page 150 '0422': {'hex': '0422', 'name': 'ms_availability_status', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.30, page 151 '0423': {'hex': '0423', 'name': 'network_error_code', 'type': 'hex', 'tech': 'Generic', 'min': 3}, # SMPP v3.4, section 5.3.2.31, page 152 '0424': {'hex': '0424', 'name': 'message_payload', 'type': 'hex', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.32, page 153 '0425': {'hex': '0425', 'name': 'delivery_failure_reason', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.33, page 153 '0426': {'hex': '0426', 'name': 'more_messages_to_send', 'type': 'integer', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.34, page 154 '0427': {'hex': '0427', 'name': 'message_state', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.35, page 154 '0428': {'hex': '0428', 'name': 'congestion_state', 'type': None, 'tech': 'Generic'}, '0501': {'hex': '0501', 'name': 'ussd_service_op', 'type': 'hex', 'tech': 'GSM (USSD)'}, # SMPP v3.4, section 5.3.2.44, page 161 '0600': {'hex': '0600', 'name': 'broadcast_channel_indicator', 'type': None, 'tech': 'GSM'}, '0601': {'hex': '0601', 'name': 'broadcast_content_type', 'type': None, 'tech': 'CDMA, TDMA, GSM'}, '0602': {'hex': '0602', 'name': 'broadcast_content_type_info', 'type': None, 'tech': 'CDMA, TDMA'}, '0603': {'hex': '0603', 'name': 'broadcast_message_class', 'type': None, 'tech': 'GSM'}, '0604': {'hex': '0604', 'name': 'broadcast_rep_num', 'type': None, 'tech': 'GSM'}, '0605': {'hex': '0605', 'name': 'broadcast_frequency_interval', 'type': None, 'tech': 'CDMA, TDMA, GSM'}, '0606': {'hex': '0606', 'name': 'broadcast_area_identifier', 'type': None, 'tech': 'CDMA, TDMA, GSM'}, '0607': {'hex': '0607', 'name': 'broadcast_error_status', 'type': None, 'tech': 'CDMA, TDMA, GSM'}, '0608': {'hex': '0608', 'name': 'broadcast_area_success', 'type': None, 'tech': 'GSM'}, '0609': {'hex': '0609', 'name': 'broadcast_end_time', 'type': None, 'tech': 'CDMA, TDMA, GSM'}, '060a': {'hex': '060a', 'name': 'broadcast_service_group', 'type': None, 'tech': 'CDMA, TDMA'}, '060b': {'hex': '060b', 'name': 'billing_identification', 'type': None, 'tech': 'Generic'}, '060d': {'hex': '060d', 'name': 'source_network_id', 'type': None, 'tech': 'Generic'}, '060e': {'hex': '060e', 'name': 'dest_network_id', 'type': None, 'tech': 'Generic'}, '060f': {'hex': '060f', 'name': 'source_node_id', 'type': None, 'tech': 'Generic'}, '0610': {'hex': '0610', 'name': 'dest_node_id', 'type': None, 'tech': 'Generic'}, '0611': {'hex': '0611', 'name': 'dest_addr_np_resolution', 'type': None, 'tech': 'CDMA, TDMA (US Only)'}, '0612': {'hex': '0612', 'name': 'dest_addr_np_information', 'type': None, 'tech': 'CDMA, TDMA (US Only)'}, '0613': {'hex': '0613', 'name': 'dest_addr_np_country', 'type': None, 'tech': 'CDMA, TDMA (US Only)'}, '1101': {'hex': '1101', 'name': 'PDC_MessageClass', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 75 '1102': {'hex': '1102', 'name': 'PDC_PresentationOption', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 76 '1103': {'hex': '1103', 'name': 'PDC_AlertMechanism', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 76 '1104': {'hex': '1104', 'name': 'PDC_Teleservice', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 77 '1105': {'hex': '1105', 'name': 'PDC_MultiPartMessage', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 77 '1106': {'hex': '1106', 'name': 'PDC_PredefinedMsg', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 78 '1201': {'hex': '1201', 'name': 'display_time', 'type': 'integer', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.26, page 148 '1203': {'hex': '1203', 'name': 'sms_signal', 'type': 'integer', 'tech': 'TDMA', 'min': 2}, # SMPP v3.4, section 5.3.2.40, page 158 '1204': {'hex': '1204', 'name': 'ms_validity', 'type': 'integer', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.27, page 149 '1304': {'hex': '1304', 'name': 'IS95A_AlertOnDelivery', 'type': None, 'tech': 'CDMA'}, # v4 page 85 '1306': {'hex': '1306', 'name': 'IS95A_LanguageIndicator', 'type': None, 'tech': 'CDMA'}, # v4 page 86 '130c': {'hex': '130c', 'name': 'alert_on_message_delivery', 'type': None, 'tech': 'CDMA'}, # SMPP v3.4, section 5.3.2.41, page 159 '1380': {'hex': '1380', 'name': 'its_reply_type', 'type': 'integer', 'tech': 'CDMA'}, # SMPP v3.4, section 5.3.2.42, page 159 '1383': {'hex': '1383', 'name': 'its_session_info', 'type': 'hex', 'tech': 'CDMA', 'min': 2}, # SMPP v3.4, section 5.3.2.43, page 160 '1402': {'hex': '1402', 'name': 'operator_id', 'type': None, 'tech': 'vendor extension'}, '1403': {'hex': '1403', 'name': 'tariff', 'type': None, 'tech': 'Mobile Network Code vendor extension'}, '1450': {'hex': '1450', 'name': 'mcc', 'type': None, 'tech': 'Mobile Country Code vendor extension'}, '1451': {'hex': '1451', 'name': 'mnc', 'type': None, 'tech': 'Mobile Network Code vendor extension'} } def optional_parameter_tag_name_by_hex(x): return optional_parameter_tag_by_hex.get(x, {}).get('name') def optional_parameter_tag_type_by_hex(x): return optional_parameter_tag_by_hex.get(x, {}).get('type') def optional_parameter_tag_min_by_hex(x): return optional_parameter_tag_by_hex.get(x, {}).get('min', 0) optional_parameter_tag_by_name = { 'dest_addr_subunit' :{ 'hex': '0005', 'name': 'dest_addr_subunit', 'type': 'integer', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.1, page 134 'dest_network_type' :{ 'hex': '0006', 'name': 'dest_network_type', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.3, page 135 'dest_bearer_type' :{ 'hex': '0007', 'name': 'dest_bearer_type', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.5, page 136 'dest_telematics_id' :{ 'hex': '0008', 'name': 'dest_telematics_id', 'type': 'integer', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.7, page 137 'source_addr_subunit' :{ 'hex': '000d', 'name': 'source_addr_subunit', 'type': 'integer', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.2, page 134 'source_network_type' :{ 'hex': '000e', 'name': 'source_network_type', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.4, page 135 'source_bearer_type' :{ 'hex': '000f', 'name': 'source_bearer_type', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.6, page 136 'source_telematics_id' :{ 'hex': '0010', 'name': 'source_telematics_id', 'type': 'integer', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.8, page 137 'qos_time_to_live' :{ 'hex': '0017', 'name': 'qos_time_to_live', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.9, page 138 'payload_type' :{ 'hex': '0019', 'name': 'payload_type', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.10, page 138 'additional_status_info_text' :{ 'hex': '001d', 'name': 'additional_status_info_text', 'type': 'string', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.11, page 139 'receipted_message_id' :{ 'hex': '001e', 'name': 'receipted_message_id', 'type': 'string', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.12, page 139 'ms_msg_wait_facilities' :{ 'hex': '0030', 'name': 'ms_msg_wait_facilities', 'type': 'bitmask', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.13, page 140 'PVCY_AuthenticationStr' :{ 'hex': '0101', 'name': 'PVCY_AuthenticationStr', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 58-62 'privacy_indicator' :{ 'hex': '0201', 'name': 'privacy_indicator', 'type': 'integer', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.14, page 141 'source_subaddress' :{ 'hex': '0202', 'name': 'source_subaddress', 'type': 'hex', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.15, page 142 'dest_subaddress' :{ 'hex': '0203', 'name': 'dest_subaddress', 'type': 'hex', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.16, page 143 'user_message_reference' :{ 'hex': '0204', 'name': 'user_message_reference', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.17, page 143 'user_response_code' :{ 'hex': '0205', 'name': 'user_response_code', 'type': 'integer', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.18, page 144 'source_port' :{ 'hex': '020a', 'name': 'source_port', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.20, page 145 'destination_port' :{ 'hex': '020b', 'name': 'destination_port', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.21, page 145 'sar_msg_ref_num' :{ 'hex': '020c', 'name': 'sar_msg_ref_num', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.22, page 146 'language_indicator' :{ 'hex': '020d', 'name': 'language_indicator', 'type': 'integer', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.19, page 144 'sar_total_segments' :{ 'hex': '020e', 'name': 'sar_total_segments', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.23, page 147 'sar_segment_seqnum' :{ 'hex': '020f', 'name': 'sar_segment_seqnum', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.24, page 147 'sc_interface_version' :{ 'hex': '0210', 'name': 'sc_interface_version', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.25, page 148 'CC_CBN' :{ 'hex': '0301', 'name': 'CC_CBN', 'type': None, 'tech': 'V4'}, # v4 page 70 'callback_num_pres_ind' :{ 'hex': '0302', 'name': 'callback_num_pres_ind', 'type': 'bitmask', 'tech': 'TDMA'}, # SMPP v3.4, section 5.3.2.37, page 156 'callback_num_atag' :{ 'hex': '0303', 'name': 'callback_num_atag', 'type': 'hex', 'tech': 'TDMA'}, # SMPP v3.4, section 5.3.2.38, page 157 'number_of_messages' :{ 'hex': '0304', 'name': 'number_of_messages', 'type': 'integer', 'tech': 'CDMA'}, # SMPP v3.4, section 5.3.2.39, page 158 'callback_num' :{ 'hex': '0381', 'name': 'callback_num', 'type': 'hex', 'tech': 'CDMA, TDMA, GSM, iDEN'}, # SMPP v3.4, section 5.3.2.36, page 155 'dpf_result' :{ 'hex': '0420', 'name': 'dpf_result', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.28, page 149 'set_dpf' :{ 'hex': '0421', 'name': 'set_dpf', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.29, page 150 'ms_availability_status' :{ 'hex': '0422', 'name': 'ms_availability_status', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.30, page 151 'network_error_code' :{ 'hex': '0423', 'name': 'network_error_code', 'type': 'hex', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.31, page 152 'message_payload' :{ 'hex': '0424', 'name': 'message_payload', 'type': 'hex', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.32, page 153 'delivery_failure_reason' :{ 'hex': '0425', 'name': 'delivery_failure_reason', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.33, page 153 'more_messages_to_send' :{ 'hex': '0426', 'name': 'more_messages_to_send', 'type': 'integer', 'tech': 'GSM'}, # SMPP v3.4, section 5.3.2.34, page 154 'message_state' :{ 'hex': '0427', 'name': 'message_state', 'type': 'integer', 'tech': 'Generic'}, # SMPP v3.4, section 5.3.2.35, page 154 'congestion_state' :{ 'hex': '0428', 'name': 'congestion_state', 'type': None, 'tech': 'Generic'}, 'ussd_service_op' :{ 'hex': '0501', 'name': 'ussd_service_op', 'type': 'hex', 'tech': 'GSM (USSD)'}, # SMPP v3.4, section 5.3.2.44, page 161 'broadcast_channel_indicator' :{ 'hex': '0600', 'name': 'broadcast_channel_indicator', 'type': None, 'tech': 'GSM'}, 'broadcast_content_type' :{ 'hex': '0601', 'name': 'broadcast_content_type', 'type': None, 'tech': 'CDMA, TDMA, GSM'}, 'broadcast_content_type_info' :{ 'hex': '0602', 'name': 'broadcast_content_type_info', 'type': None, 'tech': 'CDMA, TDMA'}, 'broadcast_message_class' :{ 'hex': '0603', 'name': 'broadcast_message_class', 'type': None, 'tech': 'GSM'}, 'broadcast_rep_num' :{ 'hex': '0604', 'name': 'broadcast_rep_num', 'type': None, 'tech': 'GSM'}, 'broadcast_frequency_interval': {'hex': '0605', 'name': 'broadcast_frequency_interval', 'type': None, 'tech': 'CDMA, TDMA, GSM'}, 'broadcast_area_identifier' :{ 'hex': '0606', 'name': 'broadcast_area_identifier', 'type': None, 'tech': 'CDMA, TDMA, GSM'}, 'broadcast_error_status' :{ 'hex': '0607', 'name': 'broadcast_error_status', 'type': None, 'tech': 'CDMA, TDMA, GSM'}, 'broadcast_area_success' :{ 'hex': '0608', 'name': 'broadcast_area_success', 'type': None, 'tech': 'GSM'}, 'broadcast_end_time' :{ 'hex': '0609', 'name': 'broadcast_end_time', 'type': None, 'tech': 'CDMA, TDMA, GSM'}, 'broadcast_service_group' :{ 'hex': '060a', 'name': 'broadcast_service_group', 'type': None, 'tech': 'CDMA, TDMA'}, 'billing_identification' :{ 'hex': '060b', 'name': 'billing_identification', 'type': None, 'tech': 'Generic'}, 'source_network_id' :{ 'hex': '060d', 'name': 'source_network_id', 'type': None, 'tech': 'Generic'}, 'dest_network_id' :{ 'hex': '060e', 'name': 'dest_network_id', 'type': None, 'tech': 'Generic'}, 'source_node_id' :{ 'hex': '060f', 'name': 'source_node_id', 'type': None, 'tech': 'Generic'}, 'dest_node_id' :{ 'hex': '0610', 'name': 'dest_node_id', 'type': None, 'tech': 'Generic'}, 'dest_addr_np_resolution' :{ 'hex': '0611', 'name': 'dest_addr_np_resolution', 'type': None, 'tech': 'CDMA, TDMA (US Only)'}, 'dest_addr_np_information' :{ 'hex': '0612', 'name': 'dest_addr_np_information', 'type': None, 'tech': 'CDMA, TDMA (US Only)'}, 'dest_addr_np_country' :{ 'hex': '0613', 'name': 'dest_addr_np_country', 'type': None, 'tech': 'CDMA, TDMA (US Only)'}, 'PDC_MessageClass' :{ 'hex': '1101', 'name': 'PDC_MessageClass', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 75 'PDC_PresentationOption' :{ 'hex': '1102', 'name': 'PDC_PresentationOption', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 76 'PDC_AlertMechanism' :{ 'hex': '1103', 'name': 'PDC_AlertMechanism', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 76 'PDC_Teleservice' :{ 'hex': '1104', 'name': 'PDC_Teleservice', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 77 'PDC_MultiPartMessage' :{ 'hex': '1105', 'name': 'PDC_MultiPartMessage', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 77 'PDC_PredefinedMsg' :{ 'hex': '1106', 'name': 'PDC_PredefinedMsg', 'type': None, 'tech': '? (J-Phone)'}, # v4 page 78 'display_time' :{ 'hex': '1201', 'name': 'display_time', 'type': 'integer', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.26, page 148 'sms_signal' :{ 'hex': '1203', 'name': 'sms_signal', 'type': 'integer', 'tech': 'TDMA'}, # SMPP v3.4, section 5.3.2.40, page 158 'ms_validity' :{ 'hex': '1204', 'name': 'ms_validity', 'type': 'integer', 'tech': 'CDMA, TDMA'}, # SMPP v3.4, section 5.3.2.27, page 149 'IS95A_AlertOnDelivery' :{ 'hex': '1304', 'name': 'IS95A_AlertOnDelivery', 'type': None, 'tech': 'CDMA'}, # v4 page 85 'IS95A_LanguageIndicator' :{ 'hex': '1306', 'name': 'IS95A_LanguageIndicator', 'type': None, 'tech': 'CDMA'}, # v4 page 86 'alert_on_message_delivery' :{ 'hex': '130c', 'name': 'alert_on_message_delivery', 'type': None, 'tech': 'CDMA'}, # SMPP v3.4, section 5.3.2.41, page 159 'its_reply_type' :{ 'hex': '1380', 'name': 'its_reply_type', 'type': 'integer', 'tech': 'CDMA'}, # SMPP v3.4, section 5.3.2.42, page 159 'its_session_info' :{ 'hex': '1383', 'name': 'its_session_info', 'type': 'hex', 'tech': 'CDMA'}, # SMPP v3.4, section 5.3.2.43, page 160 'operator_id' :{ 'hex': '1402', 'name': 'operator_id', 'type': None, 'tech': 'vendor extension'}, 'tariff' :{ 'hex': '1403', 'name': 'tariff', 'type': None, 'tech': 'Mobile Network Code vendor extension'}, 'mcc' :{ 'hex': '1450', 'name': 'mcc', 'type': None, 'tech': 'Mobile Country Code vendor extension'}, 'mnc' :{ 'hex': '1451', 'name': 'mnc', 'type': None, 'tech': 'Mobile Network Code vendor extension'} } def optional_parameter_tag_hex_by_name(n): return optional_parameter_tag_by_name.get(n, {}).get('hex') # Decoding functions ####################################################### def unpack_pdu(pdu_bin): return decode_pdu(binascii.b2a_hex(pdu_bin)) def decode_pdu(pdu_hex): hex_ref = [pdu_hex] pdu = {} pdu['header'] = decode_header(hex_ref) command = pdu['header'].get('command_id', None) if command is not None: body = decode_body(command, hex_ref) if len(body) > 0: pdu['body'] = body return pdu def decode_header(hex_ref): pdu_hex = hex_ref[0] header = {} (command_length, command_id, command_status, sequence_number, hex_ref[0]) = \ (pdu_hex[0:8], pdu_hex[8:16], pdu_hex[16:24], pdu_hex[24:32], pdu_hex[32:]) length = int(command_length, 16) command = command_id_name_by_hex(command_id) status = command_status_name_by_hex(command_status) sequence = int(sequence_number, 16) header = {} header['command_length'] = length header['command_id'] = command header['command_status'] = status header['sequence_number'] = sequence return header def decode_body(command, hex_ref): body = {} if command is not None: fields = mandatory_parameter_list_by_command_name(command) mandatory = decode_mandatory_parameters(fields, hex_ref) if len(mandatory) > 0: body['mandatory_parameters'] = mandatory optional = decode_optional_parameters(hex_ref) if len(optional) > 0: body['optional_parameters'] = optional return body def decode_mandatory_parameters(fields, hex_ref): mandatory_parameters = {} if len(hex_ref[0]) > 1: for field in fields: # old = len(hex_ref[0]) data = '' octet = '' count = 0 if field['var'] is True or field['var'] is False: while (len(hex_ref[0]) > 1 and (count < field['min'] or (field['var'] is True and count < field['max']+1 and octet != '00'))): octet = octpop(hex_ref) data += octet count += 1 elif field['type'] in ['string', 'xstring']: count = mandatory_parameters[field['var']] if count == 0: data = None else: for i in range(count): if len(hex_ref[0]) > 1: data += octpop(hex_ref) else: count = mandatory_parameters[field['var']] if field['map'] is not None: mandatory_parameters[field['name']] = maps[field['map']+'_by_hex'].get(data, None) if field['map'] is None or mandatory_parameters[field['name']] is None: mandatory_parameters[field['name']] = decode_hex_type(data, field['type'], count, hex_ref) # print field['type'], (old - len(hex_ref[0]))/2, repr(data), field['name'], mandatory_parameters[field['name']] return mandatory_parameters def decode_optional_parameters(hex_ref): optional_parameters = [] hex = hex_ref[0] while len(hex) > 0: if len(hex) < 8: # We don't have enough data here for this to be a valid param. # TODO: Something better than `print` here. print "Invalid optional param data, ignoring: %s" % (hex,) break (tag_hex, length_hex, rest) = (hex[0:4], hex[4:8], hex[8:]) tag = optional_parameter_tag_name_by_hex(tag_hex) if tag is None: tag = tag_hex length = int(length_hex, 16) (value_hex, tail) = (rest[0:length*2], rest[length*2:]) if len(value_hex) == 0: value = None else: value = decode_hex_type(value_hex, optional_parameter_tag_type_by_hex(tag_hex)) hex = tail optional_parameters.append({'tag': tag, 'length': length, 'value': value}) return optional_parameters def decode_hex_type(hex, type, count=0, hex_ref=['']): if hex is None: return hex elif type == 'integer': return int(hex, 16) elif type == 'string': return re.sub('00', '', hex).decode('hex') elif type == 'xstring': return hex.decode('hex') elif (type == 'dest_address' or type == 'unsuccess_sme'): list = [] fields = mandatory_parameter_list_by_command_name(type) for i in range(count): item = decode_mandatory_parameters(fields, hex_ref) if item.get('dest_flag', None) == 1: # 'dest_address' only subfields = mandatory_parameter_list_by_command_name('sme_dest_address') rest = decode_mandatory_parameters(subfields, hex_ref) item.update(rest) elif item.get('dest_flag', None) == 2: # 'dest_address' only subfields = mandatory_parameter_list_by_command_name('distribution_list') rest = decode_mandatory_parameters(subfields, hex_ref) item.update(rest) list.append(item) return list else: return hex def octpop(hex_ref): octet = None if len(hex_ref[0]) > 1: (octet, hex_ref[0]) = (hex_ref[0][0:2], hex_ref[0][2:]) return octet # Encoding functions def pack_pdu(pdu_obj): return binascii.a2b_hex(encode_pdu(pdu_obj)) def encode_pdu(pdu_obj): header = pdu_obj.get('header', {}) body = pdu_obj.get('body', {}) mandatory = body.get('mandatory_parameters', {}) optional = body.get('optional_parameters', []) body_hex = '' fields = mandatory_parameter_list_by_command_name(header['command_id']) body_hex += encode_mandatory_parameters(mandatory, fields) for opt in optional: body_hex += encode_optional_parameter(opt['tag'], opt['value']) actual_length = 16 + len(body_hex)/2 command_length = '%08x' % actual_length command_id = command_id_hex_by_name(header['command_id']) command_status = command_status_hex_by_name(header['command_status']) sequence_number = '%08x' % header['sequence_number'] pdu_hex = command_length + command_id + command_status + sequence_number + body_hex return pdu_hex def encode_mandatory_parameters(mandatory_obj, fields): mandatory_hex_array = [] index_names = {} index = 0 for field in fields: param = mandatory_obj.get(field['name'], None) param_length = None if param is not None or field['min'] > 0: map = None if field['map'] is not None: map = maps.get(field['map']+'_by_name', None) if isinstance(param, list): hex_list = [] for item in param: flagfields = mandatory_parameter_list_by_command_name(field['type']) plusfields = [] if item.get('dest_flag', None) == 1: plusfields = mandatory_parameter_list_by_command_name('sme_dest_address') elif item.get('dest_flag', None) == 2: plusfields = mandatory_parameter_list_by_command_name('distribution_list') hex_item = encode_mandatory_parameters(item, flagfields + plusfields) if isinstance(hex_item, str) and len(hex_item) > 0: hex_list.append(hex_item) param_length = len(hex_list) mandatory_hex_array.append(''.join(hex_list)) else: hex_param = encode_param_type( param, field['type'], field['min'], field['max'], map) param_length = len(hex_param)/2 mandatory_hex_array.append(hex_param) index_names[field['name']] = index length_index = index_names.get(field['var'], None) if length_index is not None and param_length is not None: mandatory_hex_array[length_index] = encode_param_type( param_length, 'integer', len(mandatory_hex_array[length_index])/2) index += 1 return ''.join(mandatory_hex_array) def encode_optional_parameter(tag, value): optional_hex_array = [] tag_hex = optional_parameter_tag_hex_by_name(tag) if tag_hex is not None: value_hex = encode_param_type( value, optional_parameter_tag_type_by_hex(tag_hex), optional_parameter_tag_min_by_hex(tag_hex), ) length_hex = '%04x' % (len(value_hex)/2) optional_hex_array.append(tag_hex + length_hex + value_hex) return ''.join(optional_hex_array) def encode_param_type(param, type, min=0, max=None, map=None): if param is None: hex = None elif map is not None: if type == 'integer' and isinstance(param, int): hex = ('%0'+str(min*2)+'x') % param else: hex = map.get(param, ('%0'+str(min*2)+'x') % 0) elif type == 'integer': hex = ('%0'+str(min*2)+'x') % int(param) elif type == 'string': hex = param.encode('hex') + '00' elif type == 'xstring': hex = param.encode('hex') elif type == 'bitmask': hex = param elif type == 'hex': hex = param else: hex = None if hex: if len(hex) % 2: # pad odd length hex strings hex = '0' + hex if None not in (max, hex) and len(hex) > 2 * max: raise ValueError("Value exceeds maximum size of %s." % (max,)) return hex
73.97561
188
0.513138
[ "BSD-3-Clause" ]
komuW/smpp_server
smpp/__init__.py
78,858
Python
# Copyright 2018 Iguazio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from base64 import b64encode from nuclio.build import mlrun_footer import mlrun from ..model import ModelObj from ..utils import generate_object_uri from .utils import enrich_function_from_dict class FunctionReference(ModelObj): """function reference/template, point to function and add/override resources""" def __init__( self, url=None, image=None, requirements=None, code=None, spec=None, kind=None, name=None, ): self.url = url self.kind = kind self.image = image self.requirements = requirements self.name = name if hasattr(spec, "to_dict"): spec = spec.to_dict() self.spec = spec self.code = code self._function = None self._address = None def is_empty(self): if self.url or self.code or self.spec: return False return True def fullname(self, parent): return f"{parent.metadata.name}-{self.name}" def uri(self, parent, tag=None, hash_key=None, fullname=True): name = self.fullname(parent) if fullname else self.name return generate_object_uri( parent.metadata.project, name, tag=tag or parent.metadata.tag, hash_key=hash_key, ) @property def function_object(self): """get the generated function object""" return self._function def to_function(self, default_kind=None): """generate a function object from the ref definitions""" if self.url and "://" not in self.url: if not os.path.isfile(self.url): raise OSError(f"{self.url} not found") kind = self.kind or default_kind if self.url: if ( self.url.endswith(".yaml") or self.url.startswith("db://") or self.url.startswith("hub://") ): func = mlrun.import_function(self.url) if self.image: func.spec.image = self.image elif self.url.endswith(".ipynb"): func = mlrun.code_to_function( self.name, filename=self.url, image=self.image, kind=kind ) elif self.url.endswith(".py"): # todo: support code text as input (for UI) if not self.image: raise ValueError( "image must be provided with py code files, " "use function object for more control/settings" ) func = mlrun.code_to_function( self.name, filename=self.url, image=self.image, kind=kind ) else: raise ValueError(f"unsupported function url {self.url} or no spec") if self.spec: func = enrich_function_from_dict(func, self.spec) elif self.code is not None: code = self.code if kind == mlrun.runtimes.RuntimeKinds.serving: code = code + mlrun_footer.format( mlrun.runtimes.serving.serving_subkind ) func = mlrun.new_function(self.name, kind=kind, image=self.image) data = b64encode(code.encode("utf-8")).decode("utf-8") func.spec.build.functionSourceCode = data if kind not in mlrun.runtimes.RuntimeKinds.nuclio_runtimes(): func.spec.default_handler = "handler" if self.spec: func = enrich_function_from_dict(func, self.spec) elif self.spec: func = mlrun.new_function(self.name, runtime=self.spec) else: raise ValueError("url or spec or code must be specified") if self.requirements: func.with_requirements(self.requirements) self._function = func return func @property def address(self): return self._address def deploy(self, **kwargs): """deploy the function""" self._address = self._function.deploy(**kwargs) return self._address
33.814286
83
0.586608
[ "Apache-2.0" ]
AlonMaor14/mlrun
mlrun/runtimes/function_reference.py
4,734
Python
# -*- coding: utf-8 -*- # Copyright 2019 Cohesity Inc. class HypervBackupEnvParams(object): """Implementation of the 'HyperVBackupEnvParams' model. Message to capture any additional backup params for a HyperV environment. Attributes: allow_crash_consistent_snapshot (bool): Whether to fallback to take a crash-consistent snapshot incase taking an app-consistent snapshot fails. """ # Create a mapping from Model property names to API property names _names = { "allow_crash_consistent_snapshot":'allowCrashConsistentSnapshot' } def __init__(self, allow_crash_consistent_snapshot=None): """Constructor for the HypervBackupEnvParams class""" # Initialize members of the class self.allow_crash_consistent_snapshot = allow_crash_consistent_snapshot @classmethod def from_dictionary(cls, dictionary): """Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. """ if dictionary is None: return None # Extract variables from the dictionary allow_crash_consistent_snapshot = dictionary.get('allowCrashConsistentSnapshot') # Return an object of this model return cls(allow_crash_consistent_snapshot)
29.654545
88
0.67198
[ "Apache-2.0" ]
anoopbhat/management-sdk-python
cohesity_management_sdk/models/hyperv_backup_env_params.py
1,631
Python
from {{ project_name }}.settings.base import * # noqa: F401, F403
33.5
66
0.686567
[ "MIT" ]
pennlabs/labs-django-startproject
project_name/settings/staging.py
67
Python
""" This file contains SPRKKRAtoms - an enhanced version of Atoms to be used with SPRKKR """ from ase import Atoms from ..common.unique_values import UniqueValuesMapping import spglib from ase.spacegroup import Spacegroup import numpy as np from ..sprkkr.sites import Site from ..common.misc import numpy_index class SPRKKRAtoms(Atoms): """ ASE Atoms object extended by the data necessary for SPR-KKR calculations """ @staticmethod def promote_ase_atoms(obj, symmetry=None): """ Convert ASE Atoms object to the one usable by SPRKKR. For the case of the usability it is a bit ugly hack: The __class__ attribute is replaced so the extra methods and properties of the objects will be available. Parameters ---------- obj: ase.Atoms The atoms object to be promoted to be used for SPRKKR calculations symmetry: boolean or None The sites property of the resulting object will consider the symmetry of the structure. I.e., the by-symmetry-equal atomic sites will share the same sites object. Default None is the same as True, however it does not change the symmetry of the already promoted obj passed into the routine. """ if obj and not isinstance(obj, SPRKKRAtoms): if obj.__class__ is Atoms: obj.__class__ = SPRKKRAtoms else: if not isinstance(obj, Atoms): raise(f'Can not promote class {obj} of class {obj.__class__} to {SPRKKRAtoms}') class SprKKrAtomsEx(obj.__class__, SPRKKRAtoms): pass obj.__class__ = SprKKrAtomsEx obj._init(True if symmetry is None else symmetry) else: if symmetry is not None: obj.symmetry = symmetry return obj def __init__(self, *args, symmetry=True, potential=None, **kwargs): """ Creates SPRKKRAtoms atoms Parameters ---------- *args: list The positionals arguments of ase.Atoms.__init__ symmetry: boolean The symmetry will be computed when the sites property will be initialized. I.e., the by-symmetry-equal atomic sites will share the same sites object. **kwargs: dict The named arguments of ase.Atoms.__init__ """ self._init(symmetry, potential) super().__init__(*args, **kwargs) def _init(self, symmetry=True, potential=None): """ The initialization of the additional (not-in-ASE) properties. To be used by constructor and by promote_ase_atoms""" self._unique_sites = None self._potential = potential self._symmetry = symmetry @property def symmetry(self): """ Whether the sites property is/will be generated using symmetry, i.e. whether the Sites objects in the sites property will be shared among symmetric atomic sites. """ return self._symmetry @symmetry.setter def symmetry(self, value): """ Recomputes the sites with enabled/disabled symmetry if the value of the property has changed. """ if self._symmetry == value: return self._symmetry = value if self._unique_sites is not None: if value: self._compute_sites_symmetry() else: self._cancel_sites_symmetry() def compute_spacegroup_for_atomic_numbers(self, atomic_numbers=None, symprec=1e-5): """ Return spacegroup that suits to the atoms' cell structure and to the given atomic_numbers (not necessary the real ones, they can be just ''labels''). """ atomic_numbers = atomic_numbers if atomic_numbers is not None else self.get_atomic_numbers() sg = spglib.get_spacegroup((self.get_cell(), self.get_scaled_positions(), atomic_numbers), symprec=symprec) if sg is None: return None sg_no = int(sg[sg.find('(') + 1:sg.find(')')]) spacegroup = Spacegroup(sg_no) return spacegroup def compute_sites_symmetry(self, spacegroup=None, atomic_numbers=None, consider_old=False, symprec=1e-5): """ SPRKKR has some properties shared by all by-symmetry-equal sites. This method initializes _sites property, that holds these properties: makes identical all the atoms on the "symmetry identical positions" with the same atomic number. The method is called automatically when the sites property is firstly accessed. The effect of the method is the nearly same as setting the symmetry property. However, setting the symmetry property on an 'already symmetrized' object has no effect, while this methods always recompute the sites property. Parameters ---------- spacegroup: Spacegroup If not None, the given spacegroup is used for determining the symmetry, instead of the one determined by cell geometry. atomic_numbers: [ int ] Atomic numbers used to determine the spacegroup (if it is not given) to compute the symmetry. The atomic numbers can be ''virtual'', just to denote the equivalence of the sites. The array should have the same length as the number of atoms in the unit cell. If None, self.symbols are used. consider_old: bool If True, and _unique_sites is not None, the non-symmetry-equivalent sites won't be equivalent in the newly computed symmetry. symprec: float A threshold for spatial error for the symmetry computing. See spglib.get_spacegroup """ self._symmetry = True SPRKKRAtoms._compute_sites_symmetry(**locals()) def _compute_sites_symmetry(self, spacegroup=None, atomic_numbers=None, consider_old=False, symprec=1e-5): """ See compute_sites_symmetry - this metod does just the same, but it does not set the symmetry property.""" occupation = self.info.get('occupancy', {}) if not spacegroup and self._symmetry: if atomic_numbers: mapping = UniqueValuesMapping(atomic_numbers) else: mapping = UniqueValuesMapping(self.get_atomic_numbers()) if consider_old and self._unique_sites: mapping = mapping.merge(self._unique_sites) if occupation: def gen_occ(): for i in range(len(mapping)): val = occupation.get(i, None) if val is None: yield val else: yield tuple((k, val[k]) for k in val) mapping = mapping.merge(gen_occ()) spacegroup = self.compute_spacegroup_for_atomic_numbers(mapping.mapping, symprec=symprec) self.info['spacegroup'] = spacegroup if not spacegroup: return self.cancel_sites_symmetry() tags = spacegroup.tag_sites(self.get_scaled_positions()) mapping = mapping.merge( tags ) tags = mapping.mapping sites = np.empty(len(tags), dtype=object) uniq, umap = np.unique(tags, return_inverse = True) used = set() for i in range(len(uniq)): index = umap == i if self._unique_sites is not None: #first non-none of the given index possible = (i for i in self._unique_sites[index]) site = next(filter(None, possible), None) if site in used: site = site.copy() else: used.add(site) else: site = None if not site: symbol = self.symbols[ numpy_index(umap,i)] for ai in np.where(index)[0]: if ai in occupation and occupation[ai]: symbol = occupation[ai] site = Site(self, symbol) sites[index] = site self.sites = sites def cancel_sites_symmetry(self): """ Cancel the use of symmetry in the structure, i.e., makes the Site object uniqe (not shared) for each atomic site. Calling this method is nearly equivalent to the setting the symmetry property to False, however, this method always recompute the sites object, while setting symmetry=False recomputes the sites property only if it was previously set to False. """ self._symmetry = False self._cancel_sites_symmetry() def _cancel_sites_symmetry(self): """ See cancel_sites_symmetry - this metod does just the same, but it does not set the symmetry property.""" sites = np.empty(len(self), dtype=object) used = set() occupation = self.info.get('occupancy', {}) for i in range(len(self)): if self._unique_sites is not None: site=self._unique_sites[i] if site in used: site = site.copy() else: used.add(site) else: symbol = occupation[i] if i in occupation and occupation[i] else \ self.symbols[i] site = Site(self, symbol) sites[i] = site self.sites = sites @property def sites(self): """ The sites property holds all the information for the SPR-KKR package: atomic types (including number of semicore and valence electrons), occupancy, symmetries, meshes... Some of the properties are stored in the ASE atoms properties (e.g. occupancy, atomic symbol), however, ASE is not able to hold them all and/or to describe fully the SPR-KKR options; thus, these properties are hold in this array. The changes made on this array are reflected (as is possible) to the ASE properties, but the opposite does not hold - to reflect the changes in these properties please create a new Atoms object with given properties. """ if self._unique_sites is None: self._compute_sites_symmetry() return self._unique_sites @sites.setter def sites(self, v): """ Set the sites property and update all other dependent properties (symbols, occupancy) according to the sites """ an = np.zeros(len(v), dtype= int) occ = {} for i,j in enumerate(v): occ[i] = j.occupation.as_dict an[i] = j.occupation.primary_atomic_number self.set_atomic_numbers(an) self.info['occupancy'] = occ self._unique_sites = v @property def potential(self): if self._potential is None: self._potential = potentials.Potential.from_atoms(self) return self._potential @potential.setter def potential(self, potential): self._potential = potential def reset_sprkkr_potential(self): for i in self.sites: i.reset() if self._potential: self._potential.reset(update_atoms = False) self._potential.set_from_atoms() #at the last - to avoid circular imports from ..potentials import potentials
39.161512
117
0.612496
[ "MIT" ]
ase2sprkkr/ase2sprkkr
src/ase2sprkkr/sprkkr/sprkkr_atoms.py
11,396
Python
""" Open Orchestrator Cloud Radio Access Network Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django import forms from .models import Image class ImageForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(ImageForm, self).__init__(*args, **kwargs) self.fields['architecture'] = forms.ChoiceField(required=True, choices=[('amd64', 'amd64'), ('i386', 'i386')]) self.fields['format'] = forms.ChoiceField(required=True, widget=forms.Select(attrs={"onChange": 'select(this);'}), choices=[('OpenStack', 'OpenStack'), ('Azure','Azure'), ('AWS','AWS'), ('GCE', 'GCE'), ("Libvirt", "Libvirt"), ("VirtualBox", "VirtualBox"), ("Docker", "Docker"),]) class Meta: model = Image fields = [ "name", "version", "format", "architecture", ]
41.531915
109
0.456967
[ "Apache-2.0", "BSD-3-Clause" ]
howls90/OOCRAN
oocran/django/images/forms.py
1,952
Python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jul 8 23:53:58 2019 @author: yanyanyu """ from spark import start_spark from pyspark import SparkConf from pyspark import SparkFiles from pyspark.sql import Row def main(): spark,conf=start_spark() steps_per_floor_=conf['steps_per_floor'] pass def extract(spark): df=spark.read.parquet('tests/test_data/employees') return df def transform(df,steps_per_floor_,spark): df.createOrReplaceTempView("table1") df_transformed=spark.sql("select id, concat(first_name,' ' , second_name) as name, floor* %s as steps_to_desk from table1"%steps_per_floor_) return df_transformed def load(df): df.coalesce(1).write.csv('loaded_data', mode='overwrite', header=True) def create_test_data(spark,conf): local_records=[ Row(id=1, first_name='nancy', second_name="yan", floor=1), Row(id=2, first_name='Dan', second_name='Sommerville', floor=1), Row(id=3, first_name='Alex', second_name='Ioannides', floor=2), Row(id=4, first_name='Ken', second_name='Lai', floor=2), Row(id=5, first_name='Stu', second_name='White', floor=3), Row(id=6, first_name='Mark', second_name='Sweeting', floor=3), Row(id=7, first_name='Phil', second_name='Bird', floor=4), Row(id=8, first_name='Kim', second_name='Suter', floor=4) ] df=spark.createDataFrame(local_records) df_tf=transform(df,conf['steps_per_floor'],spark) df_tf.coalesce(1).write.parquet('tests/test_data/employees_report',mode='overwrite')
27.87931
144
0.66914
[ "Apache-2.0" ]
nancyyanyu/mini_projects
examples_pyspark/pyspark_small_project/etl_job.py
1,617
Python
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('report_database', '0003_auto_20160501_1646'), ] operations = [ migrations.AddField( model_name='report', name='shared_users', field=models.ManyToManyField(related_name='shared_users', to=settings.AUTH_USER_MODEL), ), ]
25.590909
99
0.673179
[ "MIT" ]
MikeVerdicchio/SafeCollab
report_database/migrations/0004_report_shared_users.py
563
Python
from kafka import KafkaConsumer consumer = KafkaConsumer('primeiro', bootstrap_servers='hdpdemo.local:6667', group_id='my_favorite_group') for msg in consumer: print(msg)
29.333333
106
0.789773
[ "MIT" ]
thiagonogueira/fiap
abd/ingestao de dados/demo_ingestao/kafka_python/consumer_group.py
176
Python
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # https://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import boto3 from boto3.exceptions import ResourceNotExistsError import botocore.session from tests import unittest def identity(self, x): return x class TestResourceCustomization(unittest.TestCase): def setUp(self): self.botocore_session = botocore.session.get_session() def add_new_method(self, name): def handler(class_attributes, **kwargs): class_attributes[name] = identity return handler def test_can_inject_method_onto_resource(self): session = boto3.Session(botocore_session=self.botocore_session) self.botocore_session.register('creating-resource-class.s3', self.add_new_method(name='my_method')) resource = session.resource('s3') self.assertTrue(hasattr(resource, 'my_method')) self.assertEqual(resource.my_method('anything'), 'anything') class TestSessionErrorMessages(unittest.TestCase): def test_has_good_error_message_when_no_resource(self): bad_resource_name = 'doesnotexist' err_regex = ( '%s.*resource does not exist.' % bad_resource_name ) with self.assertRaisesRegex(ResourceNotExistsError, err_regex): boto3.resource(bad_resource_name) class TestGetAvailableSubresources(unittest.TestCase): def test_s3_available_subresources_exists(self): s3 = boto3.resource('s3') self.assertTrue(hasattr(s3, 'get_available_subresources'))
36.232143
77
0.720059
[ "Apache-2.0" ]
GabrielFraga962/boto3
tests/functional/test_resource.py
2,029
Python
# -*- coding: utf-8 -*- #BEGIN_HEADER # The header block is where all import statements should live from __future__ import print_function import os import re import uuid import requests import json import psutil import subprocess import numpy as np import yaml import time from pprint import pformat from installed_clients.WorkspaceClient import Workspace from installed_clients.ReadsUtilsClient import ReadsUtils # @IgnorePep8 from installed_clients.baseclient import ServerError from installed_clients.AssemblyUtilClient import AssemblyUtil from installed_clients.KBaseReportClient import KBaseReport from installed_clients.kb_quastClient import kb_quast from installed_clients.kb_ea_utilsClient import kb_ea_utils from kb_SPAdes.utils.spades_assembler import SPAdesAssembler class ShockException(Exception): pass #END_HEADER class kb_SPAdes: ''' Module Name: kb_SPAdes Module Description: A KBase module: kb_SPAdes A wrapper for the SPAdes assembler with hybrid features supported. http://bioinf.spbau.ru/spades Always runs in careful mode. Runs 3 threads / CPU. Maximum memory use is set to available memory - 1G. Autodetection is used for the PHRED quality offset and k-mer sizes. A coverage cutoff is not specified. ''' ######## WARNING FOR GEVENT USERS ####### noqa # Since asynchronous IO can lead to methods - even the same method - # interrupting each other, you must be *very* careful when using global # state. A method could easily clobber the state set by another while # the latter method is running. ######################################### noqa VERSION = "1.2.0" GIT_URL = "https://github.com/qzzhang/kb_SPAdes" GIT_COMMIT_HASH = "5b7e88d6993728abc26c93cfef780ee7feb16c63" #BEGIN_CLASS_HEADER # Class variables and functions can be defined in this block DISABLE_SPADES_OUTPUT = False # should be False in production PARAM_IN_WS = 'workspace_name' PARAM_IN_LIB = 'read_libraries' PARAM_IN_CS_NAME = 'output_contigset_name' PARAM_IN_DNA_SOURCE = 'dna_source' PARAM_IN_SINGLE_CELL = 'single_cell' PARAM_IN_METAGENOME = 'metagenomic' PARAM_IN_PLASMID = 'plasmid' PARAM_IN_MIN_CONTIG_LENGTH = 'min_contig_length' PARAM_IN_KMER_SIZES = 'kmer_sizes' PARAM_IN_SKIP_ERR_CORRECT = 'skip_error_correction' INVALID_WS_OBJ_NAME_RE = re.compile('[^\\w\\|._-]') INVALID_WS_NAME_RE = re.compile('[^\\w:._-]') THREADS_PER_CORE = 3 MAX_THREADS = 64 # per email thread with Anton Korobeynikov MAX_THREADS_META = 128 # Increase threads for metagenomic assemblies MEMORY_OFFSET_GB = 1 # 1GB MIN_MEMORY_GB = 5 MAX_MEMORY_GB_SPADES = 500 MAX_MEMORY_GB_META_SPADES = 1000 GB = 1000000000 URL_WS = 'workspace-url' URL_SHOCK = 'shock-url' URL_KB_END = 'kbase-endpoint' TRUE = 'true' FALSE = 'false' def log(self, message, prefix_newline=False): print(('\n' if prefix_newline else '') + str(time.time()) + ': ' + str(message)) def check_shock_response(self, response, errtxt): if not response.ok: try: err = json.loads(response.content)['error'][0] except: # this means shock is down or not responding. self.log("Couldn't parse response error content from Shock: " + response.content) response.raise_for_status() raise ShockException(errtxt + str(err)) # Helper script borrowed from the transform service, logger removed def upload_file_to_shock(self, file_path, token): """ Use HTTP multi-part POST to save a file to a SHOCK instance. """ if token is None: raise Exception("Authentication token required!") header = {'Authorization': "Oauth {0}".format(token)} if file_path is None: raise Exception("No file given for upload to SHOCK!") with open(os.path.abspath(file_path), 'rb') as data_file: files = {'upload': data_file} response = requests.post( self.shockURL + '/node', headers=header, files=files, stream=True, allow_redirects=True) self.check_shock_response( response, ('Error trying to upload contig FASTA file {} to Shock: ' ).format(file_path)) return response.json()['data'] # spades is configured with yaml # def generate_spades_yaml(self, reads_data): left = [] # fwd in fr orientation right = [] # rev single = [] # single end reads pacbio = [] # pacbio CLR reads (for pacbio CCS use -s option.) interlaced = [] illumina_present = 0 iontorrent_present = 0 for read in reads_data: seq_tech = read['seq_tech'] if seq_tech == "PacBio CLR": pacbio.append(read['fwd_file']) elif read['type'] == "paired": if 'rev_file' in read and read['rev_file']: left.append(read['fwd_file']) right.append(read['rev_file']) else: interlaced.append(read['fwd_file']) elif read['type'] == "single": single.append(read['fwd_file']) if seq_tech == "IonTorrent": iontorrent_present = 1 elif seq_tech == "Illumina": illumina_present = 1 if (illumina_present == 1 and iontorrent_present == 1): raise ValueError('Both IonTorrent and Illumina read libraries exist. ' + 'SPAdes can not assemble them together.') yml = [] yml_index_counter = 0 # Pacbio CLR ahs to be run with at least one single end or paired end library other_reads_present_for_pacbio = 0 if left or interlaced: yml.append({'type': 'paired-end', 'orientation': 'fr'}) if left: yml[yml_index_counter]['left reads'] = left yml[yml_index_counter]['right reads'] = right if interlaced: yml[yml_index_counter]['interlaced reads'] = interlaced yml_index_counter += 1 other_reads_present_for_pacbio = 1 if single: yml.append({'type': "single"}) yml[yml_index_counter]['single reads'] = single yml_index_counter += 1 other_reads_present_for_pacbio = 1 if pacbio: if other_reads_present_for_pacbio == 1: yml.append({'type': "pacbio"}) yml[yml_index_counter]['single reads'] = pacbio yml_index_counter += 1 else: # RAISE AN ERROR AS PACBIO REQUIRES AT LEAST # ONE SINGLE OR PAIRED ENDS LIBRARY raise ValueError('Per SPAdes requirements : If doing PacBio CLR reads, you must ' + 'also supply at least one paired end or single end reads library') yml_path = os.path.join(self.scratch, 'run.yaml') with open(yml_path, 'w') as yml_file: yaml.safe_dump(yml, yml_file) return yml_path, iontorrent_present def exec_spades(self, dna_source, reads_data, phred_type, kmer_sizes, skip_error_correction): mem = (psutil.virtual_memory().available / self.GB - self.MEMORY_OFFSET_GB) if mem < self.MIN_MEMORY_GB: raise ValueError( 'Only ' + str(psutil.virtual_memory().available) + ' bytes of memory are available. The SPAdes wrapper will' + ' not run without at least ' + str(self.MIN_MEMORY_GB + self.MEMORY_OFFSET_GB) + ' gigabytes available') if dna_source == self.PARAM_IN_METAGENOME: max_mem = self.MAX_MEMORY_GB_META_SPADES max_threads = self.MAX_THREADS_META else: max_mem = self.MAX_MEMORY_GB_SPADES max_threads = self.MAX_THREADS threads = min(max_threads, psutil.cpu_count() * self.THREADS_PER_CORE) if mem > max_mem: mem = max_mem outdir = os.path.join(self.scratch, 'spades_output_dir') if not os.path.exists(outdir): os.makedirs(outdir) tmpdir = os.path.join(self.scratch, 'spades_tmp_dir') if not os.path.exists(tmpdir): os.makedirs(tmpdir) cmd = ['spades.py', '--threads', str(threads), '--memory', str(mem), '-o', outdir, '--tmp-dir', tmpdir] print("THE DNA SOURCE IS : " + str(dna_source)) if dna_source == self.PARAM_IN_SINGLE_CELL: cmd += ['--sc'] if dna_source == self.PARAM_IN_PLASMID: cmd += ['--plasmid'] # The plasmid assembly can only be run on a single library if len(reads_data) > 1: raise ValueError('Plasmid assembly requires that one ' + 'and only one library as input. ' + str(len(reads_data)) + ' libraries detected.') if dna_source == self.PARAM_IN_METAGENOME: cmd += ['--meta'] # The metagenome assembly can only be run on a single library # The library must be paired end. if len(reads_data) > 1 or reads_data[0]['type'] != 'paired': error_msg = 'Metagenome assembly requires that one and ' + \ 'only one paired end library as input.' if len(reads_data) > 1: error_msg += ' ' + str(len(reads_data)) + \ ' libraries detected.' raise ValueError(error_msg) else: cmd += ['--careful'] cmd += ['--phred-offset', phred_type] if kmer_sizes is not None: cmd += ['-k ' + kmer_sizes] if skip_error_correction == 1: cmd += ['--only-assembler'] # print("LENGTH OF READSDATA IN EXEC: " + str(len(reads_data))) # print("READS DATA: " + str(reads_data)) # print("SPADES YAML: " + str(self.generate_spades_yaml(reads_data))) spades_yaml_path, iontorrent_present = self.generate_spades_yaml(reads_data) if iontorrent_present == 1: cmd += ['--iontorrent'] cmd += ['--dataset', spades_yaml_path] self.log('Running SPAdes command line:') print("SPADES CMD:" + str(cmd)) self.log(cmd) if self.DISABLE_SPADES_OUTPUT: with open(os.devnull, 'w') as null: p = subprocess.Popen(cmd, cwd=self.scratch, shell=False, stdout=null) else: p = subprocess.Popen(cmd, cwd=self.scratch, shell=False) retcode = p.wait() self.log('Return code: ' + str(retcode)) if p.returncode != 0: raise ValueError('Error running SPAdes, return code: ' + str(retcode) + '\n') return outdir # adapted from # https://github.com/kbase/transform/blob/master/plugins/scripts/convert/trns_transform_KBaseFile_AssemblyFile_to_KBaseGenomes_ContigSet.py # which was adapted from an early version of # https://github.com/kbase/transform/blob/master/plugins/scripts/upload/trns_transform_FASTA_DNA_Assembly_to_KBaseGenomes_ContigSet.py def load_stats(self, input_file_name): self.log('Starting conversion of FASTA to KBaseGenomeAnnotations.Assembly') self.log('Building Object.') if not os.path.isfile(input_file_name): raise Exception('The input file name {0} is not a file!'.format( input_file_name)) with open(input_file_name, 'r') as input_file_handle: contig_id = None sequence_len = 0 fasta_dict = dict() first_header_found = False # Pattern for replacing white space pattern = re.compile(r'\s+') for current_line in input_file_handle: if (current_line[0] == '>'): # found a header line # Wrap up previous fasta sequence if not first_header_found: first_header_found = True else: fasta_dict[contig_id] = sequence_len sequence_len = 0 fasta_header = current_line.replace('>', '').strip() try: contig_id = fasta_header.strip().split(' ', 1)[0] except: contig_id = fasta_header.strip() else: sequence_len += len(re.sub(pattern, '', current_line)) # wrap up last fasta sequence, should really make this a method if not first_header_found: raise Exception("There are no contigs in this file") else: fasta_dict[contig_id] = sequence_len return fasta_dict def load_report(self, input_file_name, params, wsname): fasta_stats = self.load_stats(input_file_name) lengths = [fasta_stats[contig_id] for contig_id in fasta_stats] assembly_ref = params[self.PARAM_IN_WS] + '/' + params[self.PARAM_IN_CS_NAME] report = '' report += 'Assembly saved to: ' + assembly_ref + '\n' report += 'Assembled into ' + str(len(lengths)) + ' contigs.\n' report += 'Avg Length: ' + str(sum(lengths) / float(len(lengths))) + \ ' bp.\n' # compute a simple contig length distribution bins = 10 counts, edges = np.histogram(lengths, bins) # @UndefinedVariable report += 'Contig Length Distribution (# of contigs -- min to max ' +\ 'basepairs):\n' for c in range(bins): report += ' ' + str(counts[c]) + '\t--\t' + str(edges[c]) +\ ' to ' + str(edges[c + 1]) + ' bp\n' print('Running QUAST') kbq = kb_quast(self.callbackURL) quastret = kbq.run_QUAST({'files': [{'path': input_file_name, 'label': params[self.PARAM_IN_CS_NAME]}]}) print('Saving report') kbr = KBaseReport(self.callbackURL) report_info = kbr.create_extended_report({ 'message': report, 'objects_created': [{'ref': assembly_ref, 'description': 'Assembled contigs'}], 'direct_html_link_index': 0, 'html_links': [{'shock_id': quastret['shock_id'], 'name': 'report.html', 'label': 'QUAST report'}], 'report_object_name': 'kb_megahit_report_' + str(uuid.uuid4()), 'workspace_name': params['workspace_name'] }) reportName = report_info['name'] reportRef = report_info['ref'] return reportName, reportRef def make_ref(self, object_info): return str(object_info[6]) + '/' + str(object_info[0]) + \ '/' + str(object_info[4]) def determine_unknown_phreds(self, reads, phred64_reads, phred33_reads, unknown_phred_reads, reftoname): print("IN UNKNOWN CHECKING") eautils = kb_ea_utils(self.callbackURL) for ref in unknown_phred_reads: rds = reads[ref] obj_name = reftoname[ref] files_to_check = [] f = rds['files'] if f['type'] == 'interleaved': files_to_check.append(f['fwd']) elif f['type'] == 'paired': files_to_check.append(f['fwd']) files_to_check.append(f['rev']) elif f['type'] == 'single': files_to_check.append(f['fwd']) # print("FILES TO CHECK:" + str(files_to_check)) for file_path in files_to_check: ea_stats_dict = eautils.calculate_fastq_stats({'read_library_path': file_path}) # print("EA UTILS STATS : " + str(ea_stats_dict)) if ea_stats_dict['phred_type'] == '33': phred33_reads.add(obj_name) elif ea_stats_dict['phred_type'] == '64': phred64_reads.add(obj_name) else: raise ValueError(('Reads object {} ({}) phred type is not of the ' + 'expected value of 33 or 64. It had a phred type of ' + '{}').format(obj_name, rds, ea_stats_dict['phred_type'])) return phred64_reads, phred33_reads def check_reads(self, params, reads, reftoname): phred64_reads, phred33_reads, unknown_phred_reads = (set() for i in range(3)) for ref in reads: rds = reads[ref] obj_name = reftoname[ref] obj_ref = rds['ref'] if rds['phred_type'] == '33': phred33_reads.add(obj_name) elif rds['phred_type'] == '64': phred64_reads.add(obj_name) else: unknown_phred_reads.add(ref) if rds['read_orientation_outward'] == self.TRUE: raise ValueError( ('Reads object {} ({}) is marked as having outward ' + 'oriented reads, which SPAdes does not ' + 'support.').format(obj_name, obj_ref)) # ideally types would be firm enough that we could rely on the # metagenomic boolean. However KBaseAssembly doesn't have the field # and it's optional anyway. Ideally fix those issues and then set # the --meta command line flag automatically based on the type # Dylan: removing these requirements because too much work for user to go all the way # back and reimport reads with "single_genome" flag set opposite. Additionally, now # that "metagenomic" assembly is now an explicit App instead of an option, this check # is far less necessary # if (rds['single_genome'] == self.TRUE and # params[self.PARAM_IN_DNA_SOURCE] == # self.PARAM_IN_METAGENOME): # raise ValueError( # ('Reads object {} ({}) is marked as containing dna from ' + # 'a single genome but the assembly method was specified ' + # 'as metagenomic').format(obj_name, obj_ref)) if (rds['single_genome'] == self.FALSE and params[self.PARAM_IN_DNA_SOURCE] != self.PARAM_IN_METAGENOME): raise ValueError( ('Reads object {} ({}) is marked as containing ' + 'metagenomic data but the assembly method was not ' + 'specified as metagenomic').format(obj_name, obj_ref)) # IF UNKNOWN TYPE NEED TO DETERMINE PHRED TYPE USING EAUTILS if len(unknown_phred_reads) > 0: phred64_reads, phred33_reads = \ self.determine_unknown_phreds(reads, phred64_reads, phred33_reads, unknown_phred_reads, reftoname) # IF THERE ARE READS OF BOTH PHRED 33 and 64, throw an error if (len(phred64_reads) > 0) and (len(phred33_reads) > 0): raise ValueError( ('The set of Reads objects passed in have reads that have different ' + 'phred type scores. SPAdes does not support assemblies of ' + 'reads with different phred type scores.\nThe following read objects ' + 'have phred 33 scores : {}.\nThe following read objects have phred 64 ' + 'scores : {}').format(", ".join(phred33_reads), ", ".join(phred64_reads))) elif len(phred64_reads) > 0: return '64' elif len(phred33_reads) > 0: return '33' else: raise ValueError('The phred type of the read(s) was unable to be determined') def process_params(self, params): if (self.PARAM_IN_WS not in params or not params[self.PARAM_IN_WS]): raise ValueError(self.PARAM_IN_WS + ' parameter is required') if self.INVALID_WS_NAME_RE.search(params[self.PARAM_IN_WS]): raise ValueError('Invalid workspace name ' + params[self.PARAM_IN_WS]) if self.PARAM_IN_LIB not in params: raise ValueError(self.PARAM_IN_LIB + ' parameter is required') if type(params[self.PARAM_IN_LIB]) != list: raise ValueError(self.PARAM_IN_LIB + ' must be a list') if not params[self.PARAM_IN_LIB]: raise ValueError('At least one reads library must be provided') # for l in params[self.PARAM_IN_LIB]: # print("PARAM_IN_LIB : " + str(l)) # if self.INVALID_WS_OBJ_NAME_RE.search(l): # raise ValueError('Invalid workspace object name ' + l) if (self.PARAM_IN_CS_NAME not in params or not params[self.PARAM_IN_CS_NAME]): raise ValueError(self.PARAM_IN_CS_NAME + ' parameter is required') if self.INVALID_WS_OBJ_NAME_RE.search(params[self.PARAM_IN_CS_NAME]): raise ValueError('Invalid workspace object name ' + params[self.PARAM_IN_CS_NAME]) if self.PARAM_IN_DNA_SOURCE in params: s = params[self.PARAM_IN_DNA_SOURCE] # print("FOUND THE DNA SOURCE: " + str(params[self.PARAM_IN_DNA_SOURCE])) if s not in [self.PARAM_IN_SINGLE_CELL, self.PARAM_IN_METAGENOME, self.PARAM_IN_PLASMID]: params[self.PARAM_IN_DNA_SOURCE] = None else: params[self.PARAM_IN_DNA_SOURCE] = None # print("PARAMS ARE:" + str(params)) if self.PARAM_IN_MIN_CONTIG_LENGTH in params: if not isinstance(params[self.PARAM_IN_MIN_CONTIG_LENGTH], int): raise ValueError('min_contig_length must be of type int') if self.PARAM_IN_KMER_SIZES in params and params[self.PARAM_IN_KMER_SIZES] is not None: print("KMER_SIZES: " + ",".join(str(num) for num in params[self.PARAM_IN_KMER_SIZES])) if self.PARAM_IN_SKIP_ERR_CORRECT in params and params[self.PARAM_IN_SKIP_ERR_CORRECT] is not None: print("SKIP ERR CORRECTION: " + str(params[self.PARAM_IN_SKIP_ERR_CORRECT])) #END_CLASS_HEADER # config contains contents of config file in a hash or None if it couldn't # be found def __init__(self, config): #BEGIN_CONSTRUCTOR self.cfg = config self.cfg['SDK_CALLBACK_URL'] = os.environ['SDK_CALLBACK_URL'] self.cfg['KB_AUTH_TOKEN'] = os.environ['KB_AUTH_TOKEN'] self.callbackURL = self.cfg['SDK_CALLBACK_URL'] self.log('Callback URL: ' + self.callbackURL) self.workspaceURL = config[self.URL_WS] self.shockURL = config[self.URL_SHOCK] self.catalogURL = config[self.URL_KB_END] + '/catalog' self.scratch = os.path.abspath(config['scratch']) if not os.path.exists(self.scratch): os.makedirs(self.scratch) #END_CONSTRUCTOR pass def run_SPAdes(self, ctx, params): """ Run SPAdes on paired end libraries :param params: instance of type "SPAdesParams" (Input parameters for running SPAdes. workspace_name - the name of the workspace from which to take input and store output. output_contigset_name - the name of the output contigset read_libraries - a list of Illumina PairedEndLibrary files in FASTQ or BAM format. dna_source - (optional) the source of the DNA used for sequencing 'single_cell': DNA amplified from a single cell via MDA anything else: Standard DNA sample from multiple cells. Default value is None. min_contig_length - (optional) integer to filter out contigs with length < min_contig_length from the SPAdes output. Default value is 0 implying no filter. kmer_sizes - (optional) K-mer sizes, Default values: 33, 55, 77, 99, 127 (all values must be odd, less than 128 and listed in ascending order) In the absence of these values, K values are automatically selected. skip_error_correction - (optional) Assembly only (No error correction). By default this is disabled.) -> structure: parameter "workspace_name" of String, parameter "output_contigset_name" of String, parameter "read_libraries" of list of type "paired_end_lib" (The workspace object name of a PairedEndLibrary file, whether of the KBaseAssembly or KBaseFile type.), parameter "dna_source" of String, parameter "min_contig_length" of Long, parameter "kmer_sizes" of list of Long, parameter "skip_error_correction" of type "bool" (A boolean. 0 = false, anything else = true.) :returns: instance of type "SPAdesOutput" (Output parameters for SPAdes run. report_name - the name of the KBaseReport.Report workspace object. report_ref - the workspace reference of the report.) -> structure: parameter "report_name" of String, parameter "report_ref" of String """ # ctx is the context object # return variables are: output #BEGIN run_SPAdes # A whole lot of this is adapted or outright copied from # https://github.com/msneddon/MEGAHIT self.log('Running run_SPAdes with params:\n' + pformat(params)) token = ctx['token'] # the reads should really be specified as a list of absolute ws refs # but the narrative doesn't do that yet self.process_params(params) # get absolute refs from ws wsname = params[self.PARAM_IN_WS] obj_ids = [] for r in params[self.PARAM_IN_LIB]: obj_ids.append({'ref': r if '/' in r else (wsname + '/' + r)}) ws = Workspace(self.workspaceURL, token=token) ws_info = ws.get_object_info_new({'objects': obj_ids}) reads_params = [] reftoname = {} for wsi, oid in zip(ws_info, obj_ids): ref = oid['ref'] reads_params.append(ref) obj_name = wsi[1] reftoname[ref] = wsi[7] + '/' + obj_name readcli = ReadsUtils(self.callbackURL, token=ctx['token']) typeerr = ('Supported types: KBaseFile.SingleEndLibrary ' + 'KBaseFile.PairedEndLibrary ' + 'KBaseAssembly.SingleEndLibrary ' + 'KBaseAssembly.PairedEndLibrary') try: reads = readcli.download_reads({'read_libraries': reads_params, 'interleaved': 'false', 'gzipped': None })['files'] except ServerError as se: self.log('logging stacktrace from dynamic client error') self.log(se.data) if typeerr in se.message: prefix = se.message.split('.')[0] raise ValueError( prefix + '. Only the types ' + 'KBaseAssembly.PairedEndLibrary ' + 'and KBaseFile.PairedEndLibrary are supported') else: raise self.log('Got reads data from converter:\n' + pformat(reads)) phred_type = self.check_reads(params, reads, reftoname) reads_data = [] for ref in reads: reads_name = reftoname[ref] f = reads[ref]['files'] # print ("REF:" + str(ref)) # print ("READS REF:" + str(reads[ref])) seq_tech = reads[ref]["sequencing_tech"] if f['type'] == 'interleaved': reads_data.append({'fwd_file': f['fwd'], 'type': 'paired', 'seq_tech': seq_tech}) elif f['type'] == 'paired': reads_data.append({'fwd_file': f['fwd'], 'rev_file': f['rev'], 'type': 'paired', 'seq_tech': seq_tech}) elif f['type'] == 'single': reads_data.append({'fwd_file': f['fwd'], 'type': 'single', 'seq_tech': seq_tech}) else: raise ValueError('Something is very wrong with read lib' + reads_name) kmer_sizes = None if self.PARAM_IN_KMER_SIZES in params and params[self.PARAM_IN_KMER_SIZES] is not None: if (len(params[self.PARAM_IN_KMER_SIZES])) > 0: kmer_sizes = ",".join(str(num) for num in params[self.PARAM_IN_KMER_SIZES]) skip_error_correction = 0 if self.PARAM_IN_SKIP_ERR_CORRECT in params and params[self.PARAM_IN_SKIP_ERR_CORRECT] is not None: if params[self.PARAM_IN_SKIP_ERR_CORRECT] == 1: skip_error_correction = 1 spades_out = self.exec_spades(params[self.PARAM_IN_DNA_SOURCE], reads_data, phred_type, kmer_sizes, skip_error_correction) self.log('SPAdes output dir: ' + spades_out) # parse the output and save back to KBase output_contigs = os.path.join(spades_out, 'scaffolds.fasta') self.log('Uploading FASTA file to Assembly') assemblyUtil = AssemblyUtil(self.callbackURL, token=ctx['token'], service_ver='release') if params.get('min_contig_length', 0) > 0: assemblyUtil.save_assembly_from_fasta( {'file': {'path': output_contigs}, 'workspace_name': wsname, 'assembly_name': params[self.PARAM_IN_CS_NAME], 'min_contig_length': params['min_contig_length'] }) # load report from scaffolds.fasta.filtered.fa report_name, report_ref = self.load_report( output_contigs + '.filtered.fa', params, wsname) else: assemblyUtil.save_assembly_from_fasta( {'file': {'path': output_contigs}, 'workspace_name': wsname, 'assembly_name': params[self.PARAM_IN_CS_NAME] }) # load report from scaffolds.fasta report_name, report_ref = self.load_report( output_contigs, params, wsname) output = {'report_name': report_name, 'report_ref': report_ref } #END run_SPAdes # At some point might do deeper type checking... if not isinstance(output, dict): raise ValueError('Method run_SPAdes return value ' + 'output is not type dict as required.') # return the results return [output] def run_HybridSPAdes(self, ctx, params): """ Run HybridSPAdes on paired end libraries with PacBio CLR and Oxford Nanopore reads :param params: instance of type "HybridSPAdesParams" (------To run HybridSPAdes 3.13.0 you need at least one library of the following types:------ 1) Illumina paired-end/high-quality mate-pairs/unpaired reads 2) IonTorrent paired-end/high-quality mate-pairs/unpaired reads 3) PacBio CCS reads Version 3.13.0 of SPAdes supports paired-end reads, mate-pairs and unpaired reads. SPAdes can take as input several paired-end and mate-pair libraries simultaneously. workspace_name - the name of the workspace from which to take input and store output. output_contigset_name - the name of the output contigset read_libraries - a list of Illumina or IonTorrent paired-end/high-quality mate-pairs/unpaired reads long_reads_libraries - a list of PacBio, Oxford Nanopore Sanger reads and/or additional contigs dna_source - the source of the DNA used for sequencing 'single_cell': DNA amplified from a single cell via MDA anything else: Standard DNA sample from multiple cells. Default value is None. pipeline_options - a list of string specifying how the SPAdes pipeline should be run kmer_sizes - (optional) K-mer sizes, Default values: 21, 33, 55, 77, 99, 127 (all values must be odd, less than 128 and listed in ascending order) In the absence of these values, K values are automatically selected. min_contig_length - integer to filter out contigs with length < min_contig_length from the HybridSPAdes output. Default value is 0 implying no filter. @optional dna_source @optional pipeline_options @optional kmer_sizes @optional min_contig_length) -> structure: parameter "workspace_name" of String, parameter "output_contigset_name" of String, parameter "reads_libraries" of list of type "ReadsParams" (parameter groups--define attributes for specifying inputs with YAML data set file (advanced) The following attributes are available: - orientation ("fr", "rf", "ff") - type ("paired-end", "mate-pairs", "hq-mate-pairs", "single", "pacbio", "nanopore", "sanger", "trusted-contigs", "untrusted-contigs") - interlaced reads (comma-separated list of files with interlaced reads) - left reads (comma-separated list of files with left reads) - right reads (comma-separated list of files with right reads) - single reads (comma-separated list of files with single reads or unpaired reads from paired library) - merged reads (comma-separated list of files with merged reads)) -> structure: parameter "lib_ref" of type "obj_ref" (An X/Y/Z style KBase object reference), parameter "orientation" of String, parameter "lib_type" of String, parameter "long_reads_libraries" of list of type "LongReadsParams" -> structure: parameter "long_reads_ref" of type "obj_ref" (An X/Y/Z style KBase object reference), parameter "long_reads_type" of String, parameter "dna_source" of String, parameter "pipeline_options" of list of String, parameter "kmer_sizes" of list of Long, parameter "min_contig_length" of Long, parameter "create_report" of type "bool" (A boolean. 0 = false, anything else = true.) :returns: instance of type "SPAdesOutput" (Output parameters for SPAdes run. report_name - the name of the KBaseReport.Report workspace object. report_ref - the workspace reference of the report.) -> structure: parameter "report_name" of String, parameter "report_ref" of String """ # ctx is the context object # return variables are: output #BEGIN run_HybridSPAdes self.log('Running run_HybridSPAdes with params:\n{}'.format( json.dumps(params, indent=1))) spades_assembler = SPAdesAssembler(self.cfg, ctx.provenance()) output = spades_assembler.run_hybrid_spades(params) #END run_HybridSPAdes # At some point might do deeper type checking... if not isinstance(output, dict): raise ValueError('Method run_HybridSPAdes return value ' + 'output is not type dict as required.') # return the results return [output] def run_metaSPAdes(self, ctx, params): """ Run SPAdes on paired end libraries for metagenomes :param params: instance of type "SPAdesParams" (Input parameters for running SPAdes. workspace_name - the name of the workspace from which to take input and store output. output_contigset_name - the name of the output contigset read_libraries - a list of Illumina PairedEndLibrary files in FASTQ or BAM format. dna_source - (optional) the source of the DNA used for sequencing 'single_cell': DNA amplified from a single cell via MDA anything else: Standard DNA sample from multiple cells. Default value is None. min_contig_length - (optional) integer to filter out contigs with length < min_contig_length from the SPAdes output. Default value is 0 implying no filter. kmer_sizes - (optional) K-mer sizes, Default values: 33, 55, 77, 99, 127 (all values must be odd, less than 128 and listed in ascending order) In the absence of these values, K values are automatically selected. skip_error_correction - (optional) Assembly only (No error correction). By default this is disabled.) -> structure: parameter "workspace_name" of String, parameter "output_contigset_name" of String, parameter "read_libraries" of list of type "paired_end_lib" (The workspace object name of a PairedEndLibrary file, whether of the KBaseAssembly or KBaseFile type.), parameter "dna_source" of String, parameter "min_contig_length" of Long, parameter "kmer_sizes" of list of Long, parameter "skip_error_correction" of type "bool" (A boolean. 0 = false, anything else = true.) :returns: instance of type "SPAdesOutput" (Output parameters for SPAdes run. report_name - the name of the KBaseReport.Report workspace object. report_ref - the workspace reference of the report.) -> structure: parameter "report_name" of String, parameter "report_ref" of String """ # ctx is the context object # return variables are: output #BEGIN run_metaSPAdes output = self.run_SPAdes(ctx,params)[0] #END run_metaSPAdes # At some point might do deeper type checking... if not isinstance(output, dict): raise ValueError('Method run_metaSPAdes return value ' + 'output is not type dict as required.') # return the results return [output] def status(self, ctx): #BEGIN_STATUS returnVal = {'state': "OK", 'message': "", 'version': self.VERSION, 'git_url': self.GIT_URL, 'git_commit_hash': self.GIT_COMMIT_HASH} del ctx # shut up pep8 #END_STATUS return [returnVal]
46.824096
143
0.594097
[ "MIT" ]
mclark58/kb_SPAdes
lib/kb_SPAdes/kb_SPAdesImpl.py
38,864
Python
import os import tensorflow as tf from tensorkit.log import logger, Color class Restore(object): def __init__(self): self._var_list = None self._restore_saver = None self._restore_optimistic = False self.restore_ckpt_file = None self._inited = False def init(self, var_list=None, ckpt_dir=None, ckpt_file=None, optimistic=False): """ :param var_list: vars for restore :param ckpt_dir: prefix of model files. :param ckpt_file: exact name of model file, priority is higher than `ckpt_dir` :param optimistic: only restore weights of same names with model. :return: """ assert (var_list is None) or (len(var_list) > 0), 'invalid var_list: {}'.format(var_list) assert ckpt_dir is not None or ckpt_file is not None, 'ckpt_dir and ckpt_file are both None' self._var_list = var_list self._restore_optimistic = optimistic if ckpt_file is None: assert os.path.exists(ckpt_dir), 'invalid checkpoint dir: %s' % ckpt_dir # get ckpt file. self.restore_ckpt_file = tf.train.latest_checkpoint(os.path.dirname(ckpt_dir + os.sep)) else: self.restore_ckpt_file = ckpt_file self._inited = True return self def restore(self, sess): assert self._inited, 'make sure init() before restore()' if self._restore_vars(sess): logger.info('- succeed restore variables from: {}'.format(self.restore_ckpt_file)) return True return False def _restore_vars(self, sess): """ :param sess: :return: boolean for successful or not """ if not self._restore_optimistic: if self.restore_ckpt_file is None: logger.warn( Color.yellow('No checkpoint file for restore vars, checkpoint file is None', bold=True)) return False self._restore_saver = tf.train.Saver(self._var_list, name='tk_restore') self._restore_saver.restore(sess, self.restore_ckpt_file) return True else: return self._optimistic_restore_model(sess) def _optimistic_restore_model(self, sess): """ restore weights of same names with model. :param sess: :return: """ if self.restore_ckpt_file is None: logger.warn(Color.yellow('No ckpt file for restore vars, ckpt file is None')) return False reader = tf.train.NewCheckpointReader(self.restore_ckpt_file) saved_shapes = reader.get_variable_to_shape_map() if self._var_list is None: restore_key2vars = {var.name.split(':')[0]: var for var in tf.global_variables()} elif isinstance(self._var_list, list): restore_key2vars = {var.name.split(':')[0]: var for var in self._var_list} elif isinstance(self._var_list, dict): restore_key2vars = self._var_list else: raise RuntimeError('type error {}'.format(self._var_list)) assert len(restore_key2vars) > 0 restore_key2vars = sorted([(k, v) for k, v in restore_key2vars.items() if k in saved_shapes]) msg = [] var_list = dict() with tf.variable_scope('', reuse=True): for key, var in restore_key2vars: var_shape = var.get_shape().as_list() if var_shape == saved_shapes[key]: var_list[key] = var var_name = var.name[:var.name.index(':')] msg.append('- restoring variable: {}'.format(var_name) if var_name == key else '- restoring variable {} from {}'.format(var_name, key)) else: msg.append(Color.yellow( '- variable({}) with inconsistent shape: {}(graph) != {}(ckpt)'.format( key, var_shape, saved_shapes[key]) )) if len(var_list) != 0: msg += ['- total variable count: {}'.format(len(var_list))] logger.info('\n'.join(msg)) saver = tf.train.Saver(var_list, name='tk_restore') saver.restore(sess, self.restore_ckpt_file) return True else: logger.warn(Color.yellow('No vars need to restore from file: {}'.format(self.restore_ckpt_file))) return False def __str__(self): content = 'RESTORE_OPTIMISTIC: %s' \ '\nRESTORE_CHECKPOINT_FILE: %s' % (self._restore_optimistic, self.restore_ckpt_file) return content
42.225225
109
0.588436
[ "MIT" ]
nonu116/HDR-GAN
tensorkit/restore.py
4,687
Python
import config as c import random as r def print_map(map_grid): print("= " * (len(map_grid) + 2)) for row in map_grid: print("||", end='') print(*row, sep=" ", end='') print("||") print("= " * (len(map_grid) + 2)) # Builds map with all of one type of tile # Should be WALL or FLOOR def init_empty_map(dimension, default_tile): map_grid = [] for i in range(dimension): map_grid.append([default_tile] * dimension) return map_grid # def build_ruins(dimension, p_mod): # map_grid = init_empty_map(dimension, c.FLOOR) # build_dungeon_walls(map_grid, p_mod) # return map_grid # Randomly populate wall tiles across an empty dungeon floor def build_dungeon_walls(map_grid, p_mod): for y in range(0, len(map_grid)): for x in range(0, len(map_grid)): # Determine if wall tile will be populated if r.randint(0,100) / 100 < p_mod: map_grid[y][x] = c.WALL def build_wall_clusters(map_grid, p_mod): for y in range(0, len(map_grid) - 1): for x in range(0, len(map_grid) - 1): # Determine if a few tiles will be populated if r.randint(0,100) / 100 < p_mod: build_cluster(map_grid, y, x) # Populate a cluster of 2-3 tiles on the map # Does not check for overlap of existing wall tiles def build_cluster(map_grid, row, column): itr = r.randint(1,3) while itr > 0: map_grid[row][column] = c.WALL next_direction = r.choice(get_valid_cardinals(map_grid, row, column, False)) row += c.CARDINAL_VECTORS[next_direction][c.Y_INDEX] column += c.CARDINAL_VECTORS[next_direction][c.X_INDEX] itr -= 1 # Returns a subset of cardinal directions which you could move from a given tile on a map # 'diaganol' is a flag for whether or not to consider diaganol adjacency def get_valid_cardinals(map_grid, row, column, diaganol): valid_cardinals = [] if row > 0: valid_cardinals.append(c.NORTH) if column > 0: valid_cardinals.append(c.WEST) if row < len(map_grid) - 1: valid_cardinals.append(c.SOUTH) if column < len(map_grid) - 1: valid_cardinals.append(c.EAST) if diaganol: if row > 0 and column > 0: valid_cardinals.append(c.NORTHWEST) if row > 0 and column < len(map_grid) - 1: valid_cardinals.append(c.NORTHEAST) if row < len(map_grid) - 1 and column > 0: valid_cardinals.append(c.SOUTHWEST) if row < len(map_grid) - 1 and column < len(map_grid) - 1: valid_cardinals.append(c.SOUTHEAST) return valid_cardinals # Clears all tiles of a given type, which have no adjacent matching tiles # Default clear state is a FLOOR tile # This considers diagonal adjacency def remove_adjacentless_tiles(map_grid, tile_type): for y in range(0, len(map_grid)): for x in range(0, len(map_grid)): if map_grid[y][x] == tile_type and has_adjacent_tile(map_grid, y, x) is not True: map_grid[y][x] = c.FLOOR # TODO Debug def has_adjacent_tile(map_grid, y, x): tile_type = map_grid[y][x] cardinals = get_valid_cardinals(map_grid, y, x, True) for cardinal in cardinals: y_adj = y + c.CARDINAL_VECTORS[cardinal][c.Y_INDEX] x_adj = x + c.CARDINAL_VECTORS[cardinal][c.X_INDEX] if map_grid[y_adj][x_adj] == tile_type: return True return False
37.315217
93
0.645791
[ "MIT" ]
MBogert/dungeon_maker
map.py
3,433
Python
# Nodes represent a definition of a value in our graph of operators. from typing import TYPE_CHECKING, Union, Callable, Any, Tuple, List, Optional, Dict, Set from ._compatibility import compatibility from .immutable_collections import immutable_dict, immutable_list import torch import builtins import types from torch.fx.operator_schemas import normalize_function, normalize_module, ArgsKwargsPair if TYPE_CHECKING: from .graph import Graph BaseArgumentTypes = Union[str, int, float, bool, torch.dtype, torch.Tensor, torch.device, torch.memory_format] base_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined] Target = Union[Callable[..., Any], str] Argument = Optional[Union[ Tuple[Any, ...], # actually Argument, but mypy can't represent recursive types List[Any], # actually Argument Dict[str, Any], # actually Argument slice, # Slice[Argument, Argument, Argument], but slice is not a templated type in typing 'Node', BaseArgumentTypes ]] _side_effectful_functions: Set[Callable] = { torch._assert, torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit} # this is fixed on master, WAR for 1.5 def _find_module_of_method(orig_method: Callable[..., Any]) -> str: name = orig_method.__name__ module = orig_method.__module__ if module is not None: return module for guess in [torch, torch.nn.functional]: if getattr(guess, name, None) is orig_method: return guess.__name__ raise RuntimeError(f'cannot find module for {orig_method}') # Borrowed from CPython typing module # https://github.com/python/cpython/blob/f90dc36c15d7fee0efaf6d39e97be0bdf2683e93/Lib/typing.py#L156 def _type_repr(obj): """Return the repr() of an object, special-casing types (internal helper). If obj is a type, we return a shorter version than the default type.__repr__, based on the module and qualified name, which is typically enough to uniquely identify a type. For everything else, we fall back on repr(obj). """ # HACK: In Python 3.6, type aliases from ``typing`` are instances of ``type``, but in # later Python versions, type aliases are not instances of ``type``!! We want # all type aliases to fall through to ``repr``, so if we have a type that is # in the module typing, don't go down this path. if isinstance(obj, type) and obj.__module__ != 'typing': if obj.__module__ == 'builtins': return obj.__qualname__ return f'{obj.__module__}.{obj.__qualname__}' if obj is ...: return('...') if isinstance(obj, types.FunctionType): return obj.__name__ return repr(obj) def _get_qualified_name(func: Callable[..., Any]) -> str: # things like getattr just appear in builtins if getattr(builtins, func.__name__, None) is func: return func.__name__ name = func.__name__ module = _find_module_of_method(func) module = module.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module return f'{module}.{name}' def _format_arg(arg) -> str: if isinstance(arg, list): items = ', '.join(_format_arg(a) for a in arg) return f'[{items}]' elif isinstance(arg, tuple): items = ', '.join(_format_arg(a) for a in arg) maybe_comma = ',' if len(arg) == 1 else '' return f'({items}{maybe_comma})' elif isinstance(arg, dict): items_str = ', '.join(f'{k}: {_format_arg(v)}' for k, v in arg.items()) return f'{{{items_str}}}' if isinstance(arg, Node): return '%' + str(arg) else: return str(arg) @compatibility(is_backward_compatible=True) class Node: """ ``Node`` is the data structure that represents individual operations within a ``Graph``. For the most part, Nodes represent callsites to various entities, such as operators, methods, and Modules (some exceptions include nodes that specify function inputs and outputs). Each ``Node`` has a function specified by its ``op`` property. The ``Node`` semantics for each value of ``op`` are as follows: - ``placeholder`` represents a function input. The ``name`` attribute specifies the name this value will take on. ``target`` is similarly the name of the argument. ``args`` holds either: 1) nothing, or 2) a single argument denoting the default parameter of the function input. ``kwargs`` is don't-care. Placeholders correspond to the function parameters (e.g. ``x``) in the graph printout. - ``get_attr`` retrieves a parameter from the module hierarchy. ``name`` is similarly the name the result of the fetch is assigned to. ``target`` is the fully-qualified name of the parameter's position in the module hierarchy. ``args`` and ``kwargs`` are don't-care - ``call_function`` applies a free function to some values. ``name`` is similarly the name of the value to assign to. ``target`` is the function to be applied. ``args`` and ``kwargs`` represent the arguments to the function, following the Python calling convention - ``call_module`` applies a module in the module hierarchy's ``forward()`` method to given arguments. ``name`` is as previous. ``target`` is the fully-qualified name of the module in the module hierarchy to call. ``args`` and ``kwargs`` represent the arguments to invoke the module on, *including the self argument*. - ``call_method`` calls a method on a value. ``name`` is as similar. ``target`` is the string name of the method to apply to the ``self`` argument. ``args`` and ``kwargs`` represent the arguments to invoke the module on, *including the self argument* - ``output`` contains the output of the traced function in its ``args[0]`` attribute. This corresponds to the "return" statement in the Graph printout. """ @compatibility(is_backward_compatible=True) def __init__(self, graph: 'Graph', name: str, op: str, target: 'Target', args: Tuple['Argument', ...], kwargs: Dict[str, 'Argument'], return_type : Optional[Any] = None) -> None: """ Instantiate an instance of ``Node``. Note: most often, you want to use the Graph APIs, i.e. ``Graph.call_module``, ``Graph.call_method``, etc. rather than instantiating a ``Node`` directly. Args: graph (Graph): The ``Graph`` to which this ``Node`` should belong. name (str): The name to which the output of this ``Node`` should be assigned op (str): The opcode for this ``Node``. Can be one of 'placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output' target ('Target'): The target this op should call. See the broader ``Node`` docstring for more details. args (Tuple['Argument']): The args to be passed to ``target`` kwargs (Dict[str, 'Argument']): The kwargs to be passed to ``target`` return_type (Optional[Any]): The python type expression representing the type of the output of this node. This field can be used for annotation of values in the generated code or for other types of analyses. """ self.graph = graph self.name = name # unique name of value being created assert op in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output', 'root'] self.op = op # the kind of operation = placeholder|call_method|call_module|call_function|get_attr if op == 'call_function': if not callable(target): raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} ' 'but a Callable is expected') else: if not isinstance(target, str): raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} ' 'but a str is expected') self.target = target # for method/module/function, the name of the method/module/function/attr # being invoked, e.g add, layer1, or torch.add # All `Node`-valued inputs. Key is the Node, value is don't-care. # The public API for this is `all_input_nodes`, this private attribute # should not be accessed directly. self._input_nodes : Dict[Node, None] = {} self.__update_args_kwargs(map_arg(args, lambda x: x), map_arg(kwargs, lambda x: x)) # type: ignore[arg-type] # All of the nodes that use the value produced by this Node # Note one user may correspond to several uses, e.g. the node fo ``x + x`` # would appear once here, but represents two uses. # # Is a dict to act as an "ordered set". Keys are significant, value dont-care self.users : Dict['Node', None] = {} # Type expression representing the output value of this node. # This should contain the same class of Type objects that would appear # as type annotations for function inputs/outputs. # # For placeholder nodes, this value will be used to type-annotate the # generated function parameters. # For the return node, this value will be used to type-annotate the # generated function return type. (Note this is a special case. ``return`` # does not produce a value, it's more of a notation. Thus, this value # describes the type of args[0] in the ``return`` node. self.type : Optional[Any] = return_type self._prev = self self._next = self self._erased = False # If set, use this fn to print this node self._repr_fn : Optional[Callable[[Node], str]] = None self._stack_trace : Optional[str] = None # Dictionary to store metadata passes need to do their # transformations. This metadata is preserved across node copies self.meta : Dict[str, Any] = {} @property def next(self) -> 'Node': """ Returns the next ``Node`` in the linked list of Nodes. Returns: The next ``Node`` in the linked list of Nodes. """ return self._next @property def prev(self) -> 'Node': """ Returns the previous ``Node`` in the linked list of Nodes. Returns: The previous ``Node`` in the linked list of Nodes. """ return self._prev @compatibility(is_backward_compatible=True) def prepend(self, x: 'Node') -> None: """ Insert x before this node in the list of nodes in the graph. Example:: Before: p -> self bx -> x -> ax After: p -> x -> self bx -> ax Args: x (Node): The node to put before this node. Must be a member of the same graph. """ assert self.graph == x.graph, "Attempting to move a Node into a different Graph" x._remove_from_list() p = self._prev p._next, x._prev = x, p x._next, self._prev = self, x @compatibility(is_backward_compatible=True) def append(self, x: 'Node') -> None: """ Insert x after this node in the list of nodes in the graph. Equvalent to ``self.next.prepend(x)`` Args: x (Node): The node to put after this node. Must be a member of the same graph. """ self._next.prepend(x) def _remove_from_list(self): p, n = self._prev, self._next p._next, n._prev = n, p @property def args(self) -> Tuple[Argument, ...]: """ The tuple of arguments to this ``Node``. The interpretation of arguments depends on the node's opcode. See the :class:`Node` docstring for more information. Assignment to this property is allowed. All accounting of uses and users is updated automatically on assignment. """ return self._args @args.setter def args(self, a : Tuple[Argument, ...]): """ Set the tuple of arguments to this Node. The interpretation of arguments depends on the node's opcode. See the ``fx.Graph`` docstring for more information. """ # DO NOT CALL `__update_args_kwargs` directly. The correct way to # set `args` is via direct assignment, i.e. `node.args = new_args` self.__update_args_kwargs(map_arg(a, lambda x: x), self._kwargs) # type: ignore[arg-type] @property def kwargs(self) -> Dict[str, Argument]: """ The dict of keyword arguments to this ``Node``. The interpretation of arguments depends on the node's opcode. See the :class:`Node` docstring for more information. Assignment to this property is allowed. All accounting of uses and users is updated automatically on assignment. """ return self._kwargs @kwargs.setter def kwargs(self, k : Dict[str, Argument]): """ Set the dict of kwargs to this Node. The interpretation of arguments depends on the node's opcode. See the ``fx.Graph`` docstring for more information. """ # DO NOT CALL `__update_args_kwargs` directly. The correct way to # set `args` is via direct assignment, i.e. `node.kwargs = new_kwargs` self.__update_args_kwargs(self._args, map_arg(k, lambda x: x)) # type: ignore[arg-type] @property def all_input_nodes(self) -> List['Node']: """ Return all Nodes that are inputs to this Node. This is equivalent to iterating over ``args`` and ``kwargs`` and only collecting the values that are Nodes. Returns: List of ``Nodes`` that appear in the ``args`` and ``kwargs`` of this ``Node``, in that order. """ return list(self._input_nodes.keys()) @compatibility(is_backward_compatible=True) def update_arg(self, idx : int, arg : Argument) -> None: """ Update an existing positional argument to contain the new value ``arg``. After calling, ``self.args[idx] == arg``. Args: idx (int): The index into ``self.args`` of the element to update arg (Argument): The new argument value to write into ``args`` """ args = list(self.args) args[idx] = arg self.args = tuple(args) @compatibility(is_backward_compatible=True) def update_kwarg(self, key : str, arg : Argument) -> None: """ Update an existing keyword argument to contain the new value ``arg``. After calling, ``self.kwargs[key] == arg``. Args: key (str): The key in ``self.kwargs`` of the element to update arg (Argument): The new argument value to write into ``kwargs`` """ kwargs = dict(self.kwargs) kwargs[key] = arg self.kwargs = kwargs @property def stack_trace(self) -> Optional[str]: """ Return the Python stack trace that was recorded during tracing, if any. This property is usually populated by `Tracer.create_proxy`. To record stack traces during tracing for debug purposes, set `record_stack_traces = True` on the `Tracer` instance. """ return self._stack_trace @stack_trace.setter def stack_trace(self, trace : Optional[str]): self._stack_trace = trace def __update_args_kwargs(self, new_args : Tuple['Argument', ...], new_kwargs : Dict[str, 'Argument']): """ This API is internal. Do *not* call it directly. """ self._args = new_args self._kwargs = new_kwargs for old_use in self._input_nodes.keys(): old_use.users.pop(self) self._input_nodes = {} map_arg(self._args, lambda n: self._input_nodes.setdefault(n)) map_arg(self._kwargs, lambda n: self._input_nodes.setdefault(n)) for new_use in self._input_nodes.keys(): new_use.users.setdefault(self) def __repr__(self) -> str: if self._repr_fn: return self._repr_fn(self) return self.name def _pretty_print_target(self, target): """ Make target printouts more user-friendly. 1) builtins will be printed as `builtins.xyz` 2) operators will be printed as `operator.xyz` 3) other callables will be printed with qualfied name, e.g. torch.add """ if isinstance(target, str): return target if hasattr(target, '__module__'): if not hasattr(target, '__name__'): # Just to be defensive, if we don't have `__name__`, get the # qualname. Not sure if this happens for any members of `operator` # or `builtins`. This fallback path is not as good, since e.g. # things in `operator` have `_operator` as their __module__. return _get_qualified_name(target) if target.__module__ == 'builtins': return f'builtins.{target.__name__}' elif target.__module__ == '_operator': return f'operator.{target.__name__}' return _get_qualified_name(target) @compatibility(is_backward_compatible=True) def format_node(self, placeholder_names: List[str] = None, maybe_return_typename: List[str] = None) -> Optional[str]: """ Return a descriptive string representation of ``self``. This method can be used with no arguments as a debugging utility. This function is also used internally in the ``__str__`` method of ``Graph``. Together, the strings in ``placeholder_names`` and ``maybe_return_typename`` make up the signature of the autogenerated ``forward`` function in this Graph's surrounding GraphModule. ``placeholder_names`` and ``maybe_return_typename`` should not be used otherwise. Args: placeholder_names: A list that will store formatted strings representing the placeholders in the generated ``forward`` function. Internal use only. maybe_return_typename: A single-element list that will store a formatted string representing the output of the generated ``forward`` function. Internal use only. Returns: str: If 1) we're using ``format_node`` as an internal helper in the ``__str__`` method of ``Graph``, and 2) ``self`` is a placeholder Node, return ``None``. Otherwise, return a descriptive string representation of the current Node. """ if self.op == 'placeholder': assert isinstance(self.target, str) arg_str = self.target arg_str += arg_str + f': {_type_repr(self.type)}' if self.type else '' if placeholder_names: placeholder_names.append(arg_str) return None maybe_typename = f'{_type_repr(self.type)} ' if self.type else '' default_val = '(default=' + str(self.args[0]) + ')' if self.args else '' return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = {self.op}[target={self.target}]{default_val}' elif self.op == 'get_attr': maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else '' return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = ' \ f'{self.op}[target={self._pretty_print_target(self.target)}]' elif self.op == 'output': if self.type and maybe_return_typename: maybe_return_typename[0] = f' -> {_type_repr(self.type)}' return f'return {self.args[0]}' else: maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else '' return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = ' \ f'{self.op}[target={self._pretty_print_target(self.target)}](' \ f'args = {_format_arg(self.args)}, kwargs = {_format_arg(self.kwargs)})' @compatibility(is_backward_compatible=True) def replace_all_uses_with(self, replace_with : 'Node') -> List['Node']: """ Replace all uses of ``self`` in the Graph with the Node ``replace_with``. Args: replace_with (Node): The node to replace all uses of ``self`` with. Returns: The list of Nodes on which this change was made. """ to_process = list(self.users) for use_node in to_process: def maybe_replace_node(n : Node) -> Node: if n == self: return replace_with else: return n new_args = map_arg(use_node.args, maybe_replace_node) new_kwargs = map_arg(use_node.kwargs, maybe_replace_node) assert isinstance(new_args, tuple) assert isinstance(new_kwargs, dict) use_node.__update_args_kwargs(new_args, new_kwargs) assert len(self.users) == 0 return to_process @compatibility(is_backward_compatible=False) def is_impure(self): """ Returns whether this op is impure, i.e. if its op is a placeholder or output, or if a call_function or call_module which is impure. Returns: bool: If the op is impure or not. """ if self.op in {"placeholder", "output"}: return True # Check if an impure function. if self.op == "call_function": return self.target in _side_effectful_functions # Check if an impure module. if self.op == "call_module": assert ( self.graph.owning_module is not None ), "self.graph.owning_module not set for purity check" target_mod = self.graph.owning_module.get_submodule(self.target) assert ( target_mod is not None ), f"Did not find expected submodule target {self.target}" return getattr(target_mod, "_is_impure", False) return False @compatibility(is_backward_compatible=False) def normalized_arguments( self, root : torch.nn.Module, arg_types : Optional[Tuple[Any]] = None, kwarg_types : Optional[Dict[str, Any]] = None, normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]: """ Returns normalized arguments to Python targets. This means that `args/kwargs` will be matched up to the module/functional's signature and return exclusively kwargs in positional order if `normalize_to_only_use_kwargs` is true. Also populates default values. Does not support positional-only parameters or varargs parameters. Supports module calls. May require `arg_types` and `kwarg_types` in order to disambiguate overloads. Args: root (torch.nn.Module): Module upon which to resolve module targets. arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. Returns: Returns NamedTuple ArgsKwargsPair, or `None` if not successful. """ if self.op == 'call_function': assert callable(self.target) return normalize_function(self.target, self.args, self.kwargs, arg_types, kwarg_types) # type: ignore[arg-type] elif self.op == 'call_module': assert isinstance(self.target, str) return normalize_module(root, self.target, self.args, self.kwargs) # type: ignore[arg-type] return None @compatibility(is_backward_compatible=True) def replace_input_with(self, old_input: 'Node', new_input: 'Node'): """ Loop through input nodes of ``self``, and replace all instances of ``old_input`` with ``new_input``. Args: old_input (Node): The old input node to be replaced. new_input (Node): The new input node to replace ``old_input``. """ def maybe_replace_node(n : Node) -> Node: return new_input if n == old_input else n new_args = map_arg(self.args, maybe_replace_node) new_kwargs = map_arg(self.kwargs, maybe_replace_node) assert isinstance(new_args, tuple) assert isinstance(new_kwargs, dict) self.__update_args_kwargs(new_args, new_kwargs) @compatibility(is_backward_compatible=True) def map_arg(a: Argument, fn: Callable[[Node], Argument]) -> Argument: """ Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys. """ assert callable(fn), "torch.fx.map_arg(a, fn): fn must be a callable" return map_aggregate(a, lambda x: fn(x) if isinstance(x, Node) else x) @compatibility(is_backward_compatible=True) def map_aggregate(a: Argument, fn: Callable[[Argument], Argument]) -> Argument: """ Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys. """ if isinstance(a, tuple): return tuple(map_aggregate(elem, fn) for elem in a) elif isinstance(a, list): return immutable_list(map_aggregate(elem, fn) for elem in a) elif isinstance(a, dict): return immutable_dict((k, map_aggregate(v, fn)) for k, v in a.items()) elif isinstance(a, slice): return slice(map_aggregate(a.start, fn), map_aggregate(a.stop, fn), map_aggregate(a.step, fn)) else: return fn(a)
44.253333
133
0.611216
[ "MIT" ]
Westlanderz/AI-Plat1
venv/Lib/site-packages/torch/fx/node.py
26,552
Python
""" Copyright 2021 Anderson Faustino da Silva Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import sys from absl import app, flags, logging from tqdm import tqdm from optcache.essentials import IO from optcache.essentials import Goals from optcache.algorithms import SGA flags.DEFINE_integer('generations', 100, 'Number of generations') flags.DEFINE_integer('seed', None, 'The seed') flags.DEFINE_integer('dimension', 100, 'Poblem dimension (individual length)') flags.DEFINE_integer('population', 100, 'Population size') flags.DEFINE_integer('param_s', 1, 'Number of best individuals to use or size of the tournament') flags.DEFINE_float('param_m', 1.0, 'Distribution index') flags.DEFINE_float('cr', 0.9, 'Crossover probability') flags.DEFINE_float('m', 0.1, 'Mutation probability') flags.DEFINE_enum('mutation', 'polynomial', ['polynomial', 'gaussian', 'uniform'], 'Mutation') flags.DEFINE_enum('selection', 'tournament', ['tournament', 'truncated'], 'Selection') flags.DEFINE_enum('crossover', 'exponential', ['exponential', 'binomial', 'single'], 'Cossover') flags.DEFINE_string('passes_filename', None, 'Filename (yaml) that describes the passes to use') flags.mark_flag_as_required('passes_filename') def execute(argv): """Generate genetic sequences for each benchmark""" del argv FLAGS = flags.FLAGS # The benchmarks benchmarks = IO.load_yaml(FLAGS.benchmarks_filename) if not benchmarks: logging.error('There are no benchmarks to process') sys.exit(1) # Verify benchmark directory if not os.path.isdir(FLAGS.benchmarks_directory): logging.error('Benchmarks directory {} does not exist.'.format( FLAGS.benchmarks_directory) ) sys.exit(1) # Create the results directory try: os.makedirs(FLAGS.results_directory) except FileExistsError: pass # Initialize a SGA object sga = SGA(FLAGS.generations, FLAGS.population, FLAGS.cr, FLAGS.m, FLAGS.param_m, FLAGS.param_s, FLAGS.crossover, FLAGS.mutation, FLAGS.selection, FLAGS.seed, FLAGS.dimension, FLAGS.passes_filename, Goals.prepare_goals(FLAGS.goals, FLAGS.weights), 'opt', FLAGS.benchmarks_directory, FLAGS.working_set, FLAGS.times, FLAGS.tool, FLAGS.verify_output) # Process each benchmark for benchmark in tqdm(benchmarks, desc='Processing'): index = benchmark.find('.') bench_dir = benchmark[:index] bench_name = benchmark[index+1:] bench_dir = os.path.join(FLAGS.results_directory, bench_dir) # Create the results directory for the suite try: os.makedirs(bench_dir) except FileExistsError: pass filename = '{}/{}.yaml'.format(bench_dir, bench_name) if FLAGS.verify_report and os.path.isfile(filename): continue sga.run(benchmark) if sga.results: IO.dump_yaml(sga.results, filename, FLAGS.report_only_the_best) # Execute if __name__ == '__main__': flags.DEFINE_list('goals', None, 'Goals') flags.DEFINE_list('weights', None, 'Weights') flags.DEFINE_string('benchmarks_directory', None, 'Benchmarks directory') flags.DEFINE_integer('working_set', 0, 'Working set', lower_bound=0) flags.DEFINE_integer('times', 3, 'Execution/compile times', lower_bound=3) flags.DEFINE_enum('tool', 'perf', ['perf', 'hyperfine'], 'Execution tool') flags.DEFINE_boolean('verify_output', False, 'The value of the goal is only valid if the ouput is correct') # app flags.DEFINE_string('benchmarks_filename', None, 'Benchmarks') flags.DEFINE_string('results_directory', None, 'Results directory') flags.DEFINE_boolean('verify_report', True, 'Do not process the benchmark if a report exists') flags.DEFINE_boolean('report_only_the_best', False, 'Store only the best result') flags.mark_flag_as_required('goals') flags.mark_flag_as_required('weights') flags.mark_flag_as_required('benchmarks_filename') flags.mark_flag_as_required('benchmarks_directory') flags.mark_flag_as_required('results_directory') app.run(execute)
31.732984
87
0.550074
[ "Apache-2.0" ]
ComputerSystemsLab/OptimizationCache
examples/algorithms/sga.py
6,061
Python
from flask import Flask app = Flask(__name__) app.config.from_object('instapurge.settings') app.secret_key = app.config['SECRET_KEY'] import instapurge.views
20
45
0.79375
[ "MIT" ]
kushmansingh/instapurge
instapurge/__init__.py
160
Python
#!/usr/bin/env python import versioneer from setuptools import setup long_description = """\ pg8000 ------ pg8000 is a Pure-Python interface to the PostgreSQL database engine. It is \ one of many PostgreSQL interfaces for the Python programming language. pg8000 \ is somewhat distinctive in that it is written entirely in Python and does not \ rely on any external libraries (such as a compiled python module, or \ PostgreSQL's libpq library). pg8000 supports the standard Python DB-API \ version 2.0. pg8000's name comes from the belief that it is probably about the 8000th \ PostgreSQL interface for Python.""" cmdclass = dict(versioneer.get_cmdclass()) version = versioneer.get_version() setup( name="pg8000", version=version, cmdclass=cmdclass, description="PostgreSQL interface library", long_description=long_description, author="Mathieu Fenniak", author_email="[email protected]", url="https://github.com/tlocke/pg8000", license="BSD", python_requires='>=3.5', install_requires=['scramp==1.1.0'], classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: Jython", "Programming Language :: Python :: Implementation :: PyPy", "Operating System :: OS Independent", "Topic :: Database :: Front-Ends", "Topic :: Software Development :: Libraries :: Python Modules", ], keywords="postgresql dbapi", packages=("pg8000",) )
34.818182
79
0.679373
[ "BSD-3-Clause" ]
JonathanRRogers/pg8000
setup.py
1,915
Python
import os def app_config(app): app.config.from_object('src.conf.flask_api_conf') if os.environ.get('FLASK_API_CONF') is not None: app.config.from_envvar('FLASK_API_CONF') sqlalchemy_database_uri_str = os.environ.get('FLASK_API_SQLALCHEMY_DATABASE_URI') if sqlalchemy_database_uri_str is not None: app.config['SQLALCHEMY_DATABASE_URI'] = sqlalchemy_database_uri_str init_sample_data_str = os.environ.get('FLASK_API_INIT_SAMPLE_DATA') if init_sample_data_str is not None: app.config['INIT_SAMPLE_DATA'] = eval(init_sample_data_str) cleanup_invalidated_tokens_interval_seconds_str = os.environ.get('FLASK_API_CLEANUP_INVALIDATED_TOKENS_INTERVAL_SECONDS') if cleanup_invalidated_tokens_interval_seconds_str is not None: app.config['CLEANUP_INVALIDATED_TOKENS_INTERVAL_SECONDS'] = eval(cleanup_invalidated_tokens_interval_seconds_str) return app
43.52381
125
0.793217
[ "Apache-2.0" ]
chaalia/flask-api
src/conf/app_config.py
914
Python
import numpy as np import matplotlib.pyplot as plt import os, sys sys.path.append(os.path.join(os.path.dirname(__file__))) import plot_settings from test_utilities import gausspuls_coeff, gausspulse, gauss_ft # time domain plot fc = 5e6 bandwidth = 2/3 bwr = -6 t_vals = np.linspace(-3/fc, 3/fc, 200) h = gausspulse(t_vals, fc, bandwidth, bwr) plt.figure() plt.plot(t_vals, h) plt.xlim([-6e-7, 6e-7]) plt.grid() plt.xlabel("Time [seconds]") ax = plt.gca() ax.axes.yaxis.set_ticklabels([]) plt.tight_layout() fp = os.path.join(os.path.dirname(__file__), "figures", "_fig1p6a.pdf") plt.savefig(fp, dpi=300) # frequency domain pulse f_vals = np.linspace(-3*fc-1e3, 3*fc+1e3, 1000) a = gausspuls_coeff(fc, bandwidth, bwr) H = gauss_ft(f_vals, a, fc=fc) H = H / max(H) plt.figure() plt.semilogx(f_vals, 20*np.log10(np.abs(H))) plt.axvline(x=fc, c='r', label="$f_c$") plt.grid() plt.autoscale(enable=True, axis='x', tight=True) plt.ylabel("[dB]") plt.legend(loc=3) plt.xlabel("Frequency [Hz]") plt.ylim([-40,0]) plt.tight_layout() fp = os.path.join(os.path.dirname(__file__), "figures", "_fig1p6b.pdf") plt.savefig(fp, dpi=300) plt.show()
22.411765
71
0.702537
[ "MIT" ]
ebezzam/frius
report_results/fig1p6_pulse_shape.py
1,143
Python
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apibox.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
28.782609
73
0.678248
[ "MIT" ]
woodonggyu/apibox
manage.py
662
Python
#!/usr/bin/python3 import subprocess import sys import json import math import os from os.path import expanduser from tempfile import TemporaryFile def get_workspace(): handle = subprocess.Popen( ["i3-msg", "-t", "get_workspaces"], stdout=subprocess.PIPE) output = handle.communicate()[0] data = json.loads(output.decode()) data = sorted(data, key=lambda k: k['name']) for i in data: if(i['focused']): return i['name'] def get_workspaces(): handle = subprocess.Popen( ["i3-msg", "-t", "get_workspaces"], stdout=subprocess.PIPE) output = handle.communicate()[0] data = json.loads(output.decode()) data = sorted(data, key=lambda k: k['name']) arr = [] for i in data: arr.append(i['name']) return arr def move_to(num): subprocess.Popen( ["i3-msg", "move container to workspace " + str(num)], stdout=subprocess.PIPE) def go_to(num): subprocess.Popen(["i3-msg", "workspace "+str(num)], stdout=subprocess.PIPE) def dmenu_fetch(inputstr): t = TemporaryFile() t.write(bytes(inputstr, 'UTF-8')) t.seek(0) dmenu_run = subprocess.Popen( ["dmenu", "-b"], stdout=subprocess.PIPE, stdin=t) output = (dmenu_run.communicate()[0]).decode().strip() return output def open_app(workspace): home = expanduser("~") cache = home+"/.cache/dmenu_run" check_new_programs(home, cache) applications = open(cache) dmenu_run = subprocess.Popen( ["dmenu", "-b"], stdout=subprocess.PIPE, stdin=applications) output = (dmenu_run.communicate()[0]).decode().strip() subprocess.Popen( ["i3-msg", "workspace " + workspace + "; exec " + output], stdout=subprocess.PIPE) def check_new_programs(home, cachefile): PATH = os.environ.get('PATH') check = subprocess.Popen( [home + "/.i3/scripts/dmenu_update"], stdout=subprocess.PIPE) check.communicate() if len(sys.argv) < 1: print("Error not enough arguements") else: command = sys.argv[1] switch_number = 1 # default switch number if len(sys.argv) == 3: # they passed in a number to move to try: switch_number = int(sys.argv[2]) except ValueError: pass # get the workspace number workspace_name = get_workspace() workspace_val = 1 # default value if name parseing fails workspace_prefix = '' try: match_set = '0123456789-' # only look for digits in the number workspace_val = int( ''.join( filter( lambda x: x in match_set, workspace_name))) # include - in the ignore list incase it is a negative number workspace_prefix = ''.join( filter( lambda x: x not in match_set, workspace_name)) except ValueError: pass print(workspace_prefix) # handle the commands if command == 'up': workspace_val += 10 elif command == 'down': workspace_val -= 10 elif command == 'next': workspace_val += 1 elif command == 'prev': workspace_val -= 1 elif command == 'go': # go to workspace in block workspace_rounded = int(math.floor(workspace_val/10))*10 workspace_rounded += switch_number go_to(workspace_prefix + str(workspace_rounded)) elif command == 'move': # move the current container to the selected workspace workspace_rounded = int(math.floor(workspace_val/10))*10 workspace_rounded += switch_number move_to(workspace_prefix + str(workspace_rounded)) elif command == 'open': open_app(workspace_name) elif command == 'dynamic': # dynamic tagging command2 = sys.argv[2] workspaces = get_workspaces() inputstr = '\n'.join(workspaces) result = dmenu_fetch(inputstr) if command2 == 'go': go_to(result) elif command2 == 'move': move_to(result) if len(sys.argv) == 3: # not a go or move, command2 is argv2 command2 = sys.argv[2] if command == 'up' or command == 'down' or command == 'prev' or command == 'next': if command2 == 'go': go_to(workspace_prefix + str(workspace_val)) elif command2 == 'move': move_to(workspace_prefix + str(workspace_val))
30.791667
90
0.600361
[ "MIT" ]
SecLion77/i3_config
scripts/workspace_controller.py
4,434
Python
# this is a library that can be used to update and create parts # of the bulkdata import urllib import json import time from django.conf import settings from apps.bulk.models import Character, Corporation, Alliance from apps.static.models import Crpnpccorporations from connection import connection #Make sure not to many requests are made to evewho.com def who_connect(): timestamp = int(time.time()) if who_connect.timestamp + 30 >= timestamp: who_connect.counter += 1 if who_connect.counter == settings.EVE_WHO_REQUESTS: time.sleep(30) who_connect.counter = 0 who_connect.timestamp = timestamp else: who_connect.timestamp = timestamp who_connect.counter = 0 who_connect.timestamp = int(time.time()) who_connect.counter = 0 #get the basepart of the api url def get_url(category, pk, page=0): return "http://evewho.com/api.php?type=%s&id=%d&page=%d" % ( category, pk, page ) # get the data from url def json_object(url): response = urllib.urlopen(url) data = json.loads(response.read()) return data #temp function def remaining_alliances(): id_list = [] for alli in Alliance.objects.all(): if not Corporation.objects.filter(allianceid=alli.allianceid).exists(): id_list.append(alli.allianceid) for pk in id_list: pages = True page = 0 while pages: who_connect() data = json_object(get_url("allilist", pk, page=page)) for char in data['characters']: if not Character.objects.filter( characterid=char['character_id'] ).exists(): Character.objects.create( characterid=char["character_id"], corporationid=char["corporation_id"], allianceid=char["alliance_id"], name=char["name"], ) if not Corporation.objects.filter( corporationid=char["corporation_id"] ).exists(): corp = getattr(connection, "corporationsheet")( char["corporation_id"] ) try: corp = Corporation( corporationid=corp.corporationID, corporationname=corp.corporationName, ticker=corp.ticker, ceoid=corp.ceoID, ceoname=corp.ceoName, allianceid=corp.allianceID, alliancename=corp.allianceName, stationid=corp.stationID, description=unicode(corp.description), url=corp.url, taxrate=int(corp.taxRate), membercount=corp.memberCount, ) corp.save() print corp.corporationname except Exception, e: print e if len(data['characters']) == 200: page += 1 else: pages = False
32.460784
79
0.522501
[ "MIT" ]
Sult/evehub
utils/whoapi.py
3,311
Python
# Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The VMware API utility module. """ from oslo.config import cfg from oslo.vmware import vim_util as vutil import suds from nova.i18n import _ from nova.openstack.common import log as logging vmware_opts = cfg.IntOpt('maximum_objects', default=100, help='The maximum number of ObjectContent data ' 'objects that should be returned in a single ' 'result. A positive value will cause the ' 'operation to suspend the retrieval when the ' 'count of objects reaches the specified ' 'maximum. The server may still limit the count ' 'to something less than the configured value. ' 'Any remaining objects may be retrieved with ' 'additional requests.') CONF = cfg.CONF CONF.register_opt(vmware_opts, 'vmware') LOG = logging.getLogger(__name__) def object_to_dict(obj, list_depth=1): """Convert Suds object into serializable format. The calling function can limit the amount of list entries that are converted. """ d = {} for k, v in suds.sudsobject.asdict(obj).iteritems(): if hasattr(v, '__keylist__'): d[k] = object_to_dict(v, list_depth=list_depth) elif isinstance(v, list): d[k] = [] used = 0 for item in v: used = used + 1 if used > list_depth: break if hasattr(item, '__keylist__'): d[k].append(object_to_dict(item, list_depth=list_depth)) else: d[k].append(item) else: d[k] = v return d def get_moref(value, type): return vutil.get_moref(value, type) def get_object_properties(vim, collector, mobj, type, properties): """Gets the properties of the Managed object specified.""" client_factory = vim.client.factory if mobj is None: return None usecoll = collector if usecoll is None: usecoll = vim.service_content.propertyCollector property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = (properties is None or len(properties) == 0) property_spec.pathSet = properties property_spec.type = type object_spec = client_factory.create('ns0:ObjectSpec') object_spec.obj = mobj object_spec.skip = False property_filter_spec.propSet = [property_spec] property_filter_spec.objectSet = [object_spec] options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx(usecoll, specSet=[property_filter_spec], options=options) def get_dynamic_property(vim, mobj, type, property_name): """Gets a particular property of the Managed Object.""" property_dict = get_dynamic_properties(vim, mobj, type, [property_name]) return property_dict.get(property_name) def get_dynamic_properties(vim, mobj, type, property_names): """Gets the specified properties of the Managed Object.""" obj_content = get_object_properties(vim, None, mobj, type, property_names) if obj_content is None: return {} if hasattr(obj_content, 'token'): cancel_retrieve(vim, obj_content.token) property_dict = {} if obj_content.objects: if hasattr(obj_content.objects[0], 'propSet'): dynamic_properties = obj_content.objects[0].propSet if dynamic_properties: for prop in dynamic_properties: property_dict[prop.name] = prop.val # The object may have information useful for logging if hasattr(obj_content.objects[0], 'missingSet'): for m in obj_content.objects[0].missingSet: LOG.warning(_("Unable to retrieve value for %(path)s " "Reason: %(reason)s"), {'path': m.path, 'reason': m.fault.localizedMessage}) return property_dict def get_objects(vim, type, properties_to_collect=None, all=False): """Gets the list of objects of the type specified.""" return vutil.get_objects(vim, type, CONF.vmware.maximum_objects, properties_to_collect, all) def get_inner_objects(vim, base_obj, path, inner_type, properties_to_collect=None, all=False): """Gets the list of inner objects of the type specified.""" client_factory = vim.client.factory base_type = base_obj._type traversal_spec = vutil.build_traversal_spec(client_factory, 'inner', base_type, path, False, []) object_spec = vutil.build_object_spec(client_factory, base_obj, [traversal_spec]) property_spec = vutil.build_property_spec(client_factory, type_=inner_type, properties_to_collect=properties_to_collect, all_properties=all) property_filter_spec = vutil.build_property_filter_spec(client_factory, [property_spec], [object_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx( vim.service_content.propertyCollector, specSet=[property_filter_spec], options=options) def cancel_retrieve(vim, token): """Cancels the retrieve operation.""" return vim.CancelRetrievePropertiesEx( vim.service_content.propertyCollector, token=token) def continue_to_get_objects(vim, token): """Continues to get the list of objects of the type specified.""" return vim.ContinueRetrievePropertiesEx( vim.service_content.propertyCollector, token=token) def get_prop_spec(client_factory, spec_type, properties): """Builds the Property Spec Object.""" prop_spec = client_factory.create('ns0:PropertySpec') prop_spec.type = spec_type prop_spec.pathSet = properties return prop_spec def get_obj_spec(client_factory, obj, select_set=None): """Builds the Object Spec object.""" obj_spec = client_factory.create('ns0:ObjectSpec') obj_spec.obj = obj obj_spec.skip = False if select_set is not None: obj_spec.selectSet = select_set return obj_spec def get_prop_filter_spec(client_factory, obj_spec, prop_spec): """Builds the Property Filter Spec Object.""" prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec') prop_filter_spec.propSet = prop_spec prop_filter_spec.objectSet = obj_spec return prop_filter_spec def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties): """Gets the list of properties for the collection of objects of the type specified. """ client_factory = vim.client.factory if len(obj_list) == 0: return [] prop_spec = get_prop_spec(client_factory, type, properties) lst_obj_specs = [] for obj in obj_list: lst_obj_specs.append(get_obj_spec(client_factory, obj)) prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs, [prop_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx( vim.service_content.propertyCollector, specSet=[prop_filter_spec], options=options) def get_about_info(vim): """Get the About Info from the service content.""" return vim.service_content.about
39.431193
79
0.646696
[ "Apache-2.0" ]
Hybrid-Cloud/badam
fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py
8,596
Python
#################### # ES-DOC CIM Questionnaire # Copyright (c) 2017 ES-DOC. All rights reserved. # # University of Colorado, Boulder # http://cires.colorado.edu/ # # This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT]. #################### from django.contrib import messages from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.shortcuts import render_to_response from django.utils.translation import ugettext_lazy as _ from Q.questionnaire.models.models_customizations import QModelCustomization from Q.questionnaire.models.models_realizations import QModelRealization, get_new_realizations, get_existing_realizations, set_owner from Q.questionnaire.models.models_users import is_member_of, is_user_of from Q.questionnaire.views.views_base import validate_view_arguments as validate_view_arguments_base, add_parameters_to_context, get_key_from_request, get_or_create_cached_object from Q.questionnaire.views.views_errors import q_error from Q.questionnaire.views.views_legacy import redirect_legacy_projects from Q.questionnaire.q_utils import evaluate_lazy_object, add_parameters_to_url def validate_view_arguments(project_name=None, ontology_key=None, document_type=None): """ extends the "validate_view_arguments" fn in "views_base" by adding a check that there is a default customization associated w/ this project/ontology/proxy :param project_name: :param ontology_key: :param document_type: :return: """ model_customization = None validity, project, ontology, model_proxy, msg = validate_view_arguments_base( project_name=project_name, ontology_key=ontology_key, document_type=document_type ) if not validity: return validity, project, ontology, model_proxy, model_customization, msg try: model_customization = QModelCustomization.objects.get( project=project, proxy=model_proxy, is_default=True, ) except ObjectDoesNotExist: msg = _( "There is no default customization associated with this document type for this project." "<br/>Please <a href='mailto:{0}?subject=Missing%20Customization&body=Please%20create%20a%20customization%20for%20the%20%22{1}%22%20document%20type.'>contact</a>" " the project administrator for assistance." ).format(project.email, model_proxy.fully_qualified_name) validity = False return validity, project, ontology, model_proxy, model_customization, msg return validity, project, ontology, model_proxy, model_customization, msg @redirect_legacy_projects def q_edit_new(request, project_name=None, ontology_key=None, document_type=None): # save any request parameters... # (in case of redirection) context = add_parameters_to_context(request) # check the arguments... validity, project, ontology, model_proxy, model_customization, msg = validate_view_arguments( project_name=project_name, ontology_key=ontology_key, document_type=document_type ) if not validity: return q_error(request, msg) # check authentication... # (not using "@login_required" b/c some projects ignore authentication) current_user = request.user if project.authenticated: if not current_user.is_authenticated(): next_page = add_parameters_to_url(reverse("account_login"), next=request.path) return HttpResponseRedirect(next_page) if not is_user_of(current_user, project): next_page = reverse("project", kwargs={"project_name": project_name}) msg = "You have tried to view a restricted resource for this project. Please consider joining." messages.add_message(request, messages.WARNING, msg) return HttpResponseRedirect(next_page) # get (or set) realization objects from the cache... session_key = get_key_from_request(request) cached_realizations_key = "{0}_realizations".format(session_key) model_realization = get_or_create_cached_object(request.session, cached_realizations_key, get_new_realizations, **{ "project": project, "ontology": ontology, "model_proxy": model_proxy, "key": model_proxy.name, } ) if current_user.is_authenticated(): set_owner(model_realization, evaluate_lazy_object(current_user)) model_realization.is_root = True # TODO: COME UP W/ A BETTER WAY OF DEALING W/ "is_root" # no forms are created here, # instead the load-on-demand paradigm is used, # work out various paths, so that ng can reload things as needed... view_url_dirname = request.path.rsplit('/', 1)[0] api_url_dirname = reverse("realization-list").rsplit('/', 1)[0] # gather all the extra information required by the template... template_context = { "project": project, "ontology": ontology, "proxy": model_proxy, "view_url_dirname": view_url_dirname, "api_url_dirname": api_url_dirname, "session_key": session_key, "customization": model_customization, "realization": model_realization, "read_only": "false", # passing "false" instead of False b/c this is a JS variable } return render_to_response('questionnaire/q_edit.html', template_context, context_instance=context) @redirect_legacy_projects def q_edit_existing(request, project_name=None, ontology_key=None, document_type=None, realization_pk=None): # save any request parameters... # (in case of redirection) context = add_parameters_to_context(request) # check the arguments... validity, project, ontology, model_proxy, model_customization, msg = validate_view_arguments( project_name=project_name, ontology_key=ontology_key, document_type=document_type ) if not validity: return q_error(request, msg) # check authentication... # (not using "@login_required" b/c some projects ignore authentication) current_user = request.user if project.authenticated: if not current_user.is_authenticated(): next_page = add_parameters_to_url(reverse("account_login"), next=request.path) return HttpResponseRedirect(next_page) if not is_user_of(current_user, project): next_page = reverse("project", kwargs={"project_name": project_name}) msg = "You have tried to view a restricted resource for this project. Please consider joining." messages.add_message(request, messages.WARNING, msg) return HttpResponseRedirect(next_page) # get (or set) realization objects from the cache... # note that unlike in "q_edit_new" above, this bit is enclosed in a try/catch block try: session_key = get_key_from_request(request) cached_realizations_key = "{0}_realizations".format(session_key) model_realization = get_or_create_cached_object(request.session, cached_realizations_key, get_existing_realizations, **{ "project": project, "ontology": ontology, "model_proxy": model_proxy, "model_id": realization_pk } ) except ObjectDoesNotExist: msg = "Cannot find a document with an id of '{0}' for that project/ontology/document type combination.".format(realization_pk) return q_error(request, msg) # no forms are created here, # instead the load-on-demand paradigm is used, # work out various paths, so that ng can reload things as needed... # (notice these are slightly different than in "q_edit_new" above view_url_dirname = request.path.rsplit('/', 1)[0] api_url_dirname = reverse("realization-detail", kwargs={"pk": model_realization.pk}).rsplit('/', 2)[0] # gather all the extra information required by the template... template_context = { "project": project, "ontology": ontology, "proxy": model_proxy, "view_url_dirname": view_url_dirname, "api_url_dirname": api_url_dirname, "session_key": session_key, "customization": model_customization, "realization": model_realization, "read_only": "false", # passing "false" instead of False b/c this is a JS variable } return render_to_response('questionnaire/q_edit.html', template_context, context_instance=context) @redirect_legacy_projects def q_view_new(request, project_name=None, ontology_key=None, document_type=None): """ this is never exposed by templates but a user might still try to navigate explicitly to this URL just return an error telling them not to try that :param request: :param project_name: :param ontology_key: :param document_type: :return: """ # save any request parameters... # (in case of redirection) context = add_parameters_to_context(request) # check the arguments... validity, project, ontology, model_proxy, model_customization, msg = validate_view_arguments( project_name=project_name, ontology_key=ontology_key, document_type=document_type ) if not validity: return q_error(request, msg) # and then let the user know that they can't vew a _new_ document... msg = "The ES-DOC Questionnaire only supports viewing of <em>existing</em> documents." return q_error(request, msg) @redirect_legacy_projects def q_view_existing(request, project_name=None, ontology_key=None, document_type=None, realization_pk=None): """ this is exactly the same as "q_edit_existing" except: there are no authentication checks, the template_context & template are different. :param request: :param project_name: :param ontology_key: :param document_type: :param realization_pk: :return: """ # save any request parameters... # (in case of redirection) context = add_parameters_to_context(request) # check the arguments... validity, project, ontology, model_proxy, model_customization, msg = validate_view_arguments( project_name=project_name, ontology_key=ontology_key, document_type=document_type ) if not validity: return q_error(request, msg) # no need to check authentication # get (or set) realization objects from the cache... # note that unlike in "q_edit_new" above, this bit is enclosed in a try/catch block try: session_key = get_key_from_request(request) cached_realizations_key = "{0}_realizations".format(session_key) model_realization = get_or_create_cached_object(request.session, cached_realizations_key, get_existing_realizations, **{ "project": project, "ontology": ontology, "model_proxy": model_proxy, "model_id": realization_pk } ) except ObjectDoesNotExist: msg = "Cannot find a document with an id of '{0}' for that project/ontology/document type combination.".format(realization_pk) return q_error(request, msg) # no forms are created here, # instead the load-on-demand paradigm is used, # work out various paths, so that ng can reload things as needed... # (notice these are slightly different than in "q_edit_new" above view_url_dirname = request.path.rsplit('/', 1)[0] api_url_dirname = reverse("realization-detail", kwargs={"pk": model_realization.pk}).rsplit('/', 2)[0] # gather all the extra information required by the template... template_context = { "project": project, "ontology": ontology, "proxy": model_proxy, "view_url_dirname": view_url_dirname, "api_url_dirname": api_url_dirname, "session_key": session_key, "customization": model_customization, "realization": model_realization, "read_only": "true", # passing "true" instead of True b/c this is a JS variable } return render_to_response('questionnaire/q_view.html', template_context, context_instance=context) @redirect_legacy_projects def q_get_existing(request, project_name=None, ontology_key=None, document_type=None): """ this is meant to be used from external requests (ie: further_info_url) where uniquely identifying model fields (including pk) are passed if a unique realization cannot be found then an error is returned otherwise the response is routed to "q_edit_existing" :param request: :param project_name: :param ontology_key: :param document_type: :param realization_pk: :return: """ # check the arguments... validity, project, ontology, model_proxy, model_customization, msg = validate_view_arguments( project_name=project_name, ontology_key=ontology_key, document_type=document_type ) if not validity: return q_error(request, msg) model_realizations = QModelRealization.objects.filter(project=project, proxy=model_proxy) additional_parameters = request.GET.copy() for key, value in additional_parameters.iteritems(): if key == "pk" or key == "guid": try: return HttpResponseRedirect(reverse("edit_existing", kwargs={ "project_name": project_name, "ontology_key": ontology_key, "document_type": document_type, "realization_pk": model_realizations.get(**{key: value}).pk })) except (ObjectDoesNotExist, ValueError): msg = "There is no '{0}' document with a {1} of '{2}'".format(model_proxy, key, value) return q_error(request, msg) else: try: property_proxy = model_proxy.property_proxies.get(name=key) if property_proxy.field_type == "ATOMIC": model_realizations = model_realizations.filter(properties__proxy=property_proxy).has_atomic_value(value) elif property_proxy.field_type == "ENUMERATION": formatted_values = [fv for fv in map(lambda v: v.strip(), value.split(',')) if fv] model_realizations = model_realizations.filter(properties__proxy=property_proxy).has_enumeration_values(formatted_values) else: # property_proxy_field_type == "RELATIONSHIP" # TODO: msg = "Unable to support getting a document by relationship_field" return q_error(request, msg) except ObjectDoesNotExist: msg = "There is no '{0}' property for the '{0}' document_type".format(key, model_proxy) return q_error(request, msg) if model_realizations.count() != 1: msg = "Unable to uniquely identify '{0}' document_type with the following properties: '{1}'".format( model_proxy, ", ".join(["{0}: {1}".format(p[0], p[1]) for p in additional_parameters.items()]) ) return q_error(request, msg) return HttpResponseRedirect(reverse("edit_existing", kwargs={ "project_name": project_name, "ontology_key": ontology_key, "document_type": document_type, "realization_pk": model_realizations.first().pk }))
41.569519
178
0.684827
[ "MIT" ]
ES-DOC/esdoc-questionnaire
Q/questionnaire/views/views_realizations.py
15,547
Python
# -*- coding: utf-8 -*- # Copyright 2015 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tests import unittest from synapse.events.builder import EventBuilder from synapse.crypto.event_signing import add_hashes_and_signatures from unpaddedbase64 import decode_base64 import nacl.signing # Perform these tests using given secret key so we get entirely deterministic # signatures output that we can test against. SIGNING_KEY_SEED = decode_base64( "YJDBA9Xnr2sVqXD9Vj7XVUnmFZcZrlw8Md7kMW+3XA1" ) KEY_ALG = "ed25519" KEY_VER = 1 KEY_NAME = "%s:%d" % (KEY_ALG, KEY_VER) HOSTNAME = "domain" class EventSigningTestCase(unittest.TestCase): def setUp(self): self.signing_key = nacl.signing.SigningKey(SIGNING_KEY_SEED) self.signing_key.alg = KEY_ALG self.signing_key.version = KEY_VER def test_sign_minimal(self): builder = EventBuilder( { 'event_id': "$0:domain", 'origin': "domain", 'origin_server_ts': 1000000, 'signatures': {}, 'type': "X", 'unsigned': {'age_ts': 1000000}, }, ) add_hashes_and_signatures(builder, HOSTNAME, self.signing_key) event = builder.build() self.assertTrue(hasattr(event, 'hashes')) self.assertIn('sha256', event.hashes) self.assertEquals( event.hashes['sha256'], "6tJjLpXtggfke8UxFhAKg82QVkJzvKOVOOSjUDK4ZSI", ) self.assertTrue(hasattr(event, 'signatures')) self.assertIn(HOSTNAME, event.signatures) self.assertIn(KEY_NAME, event.signatures["domain"]) self.assertEquals( event.signatures[HOSTNAME][KEY_NAME], "2Wptgo4CwmLo/Y8B8qinxApKaCkBG2fjTWB7AbP5Uy+" "aIbygsSdLOFzvdDjww8zUVKCmI02eP9xtyJxc/cLiBA", ) def test_sign_message(self): builder = EventBuilder( { 'content': { 'body': "Here is the message content", }, 'event_id': "$0:domain", 'origin': "domain", 'origin_server_ts': 1000000, 'type': "m.room.message", 'room_id': "!r:domain", 'sender': "@u:domain", 'signatures': {}, 'unsigned': {'age_ts': 1000000}, } ) add_hashes_and_signatures(builder, HOSTNAME, self.signing_key) event = builder.build() self.assertTrue(hasattr(event, 'hashes')) self.assertIn('sha256', event.hashes) self.assertEquals( event.hashes['sha256'], "onLKD1bGljeBWQhWZ1kaP9SorVmRQNdN5aM2JYU2n/g", ) self.assertTrue(hasattr(event, 'signatures')) self.assertIn(HOSTNAME, event.signatures) self.assertIn(KEY_NAME, event.signatures["domain"]) self.assertEquals( event.signatures[HOSTNAME][KEY_NAME], "Wm+VzmOUOz08Ds+0NTWb1d4CZrVsJSikkeRxh6aCcUw" "u6pNC78FunoD7KNWzqFn241eYHYMGCA5McEiVPdhzBA" )
31.469565
77
0.622824
[ "Apache-2.0" ]
iot-factory/synapse
tests/crypto/test_event_signing.py
3,619
Python
from core.datasource import DataSource from core.model.member import KarmaMember, Member # karma database service class, perform operations on the configured mongodb. from util.config import config, profile class KarmaService: def __init__(self): self._karma = DataSource(config['database']['host'], config['database']['port'], config['database']['username'], config['database']['password'], config['database']['name']).db.karma self._filter_query = dict(guild_id="", member_id="") self._channel_query = dict(guild_id="", member_id="", channel_id="", message_id="") self._increase_karma = {"$inc": {'karma': 1}} self._decrease_karma = {"$inc": {'karma': -1}} # update or insert karma member if not exist on first karma # check on inc if inc or dec query should be applied. def upsert_karma_member(self, member: KarmaMember, inc: bool) -> None: self._channel_query['guild_id'] = member.guild_id self._channel_query['member_id'] = member.member_id self._channel_query['channel_id'] = member.channel_id self._channel_query['message_id'] = member.message_id if inc: self._karma.update_one(filter=self._channel_query, update=self._increase_karma, upsert=True) else: self._karma.delete_one(filter=self._channel_query) # remove all karma, regardless of channel def delete_all_karma(self, guild_id: str, member_id: str) -> None: filter_member = dict(guild_id=guild_id, member_id=member_id) self._karma.delete_many(filter=filter_member) # aggregate overall karma of a member def aggregate_member_by_karma(self, member: KarmaMember) -> int: self._filter_query['guild_id'] = member.guild_id self._filter_query['member_id'] = member.member_id pipeline = [{"$unwind": "$karma"}, {"$match": self._filter_query}, {"$group": {"_id": {"member_id": "$member_id"}, "karma": {"$sum": "$karma"}}}] doc_cursor = self._karma.aggregate(pipeline) for doc in doc_cursor: return doc['karma'] def aggregate_member_by_channels(self, member: KarmaMember): self._filter_query['guild_id'] = member.guild_id self._filter_query['member_id'] = member.member_id pipeline = [{"$unwind": "$karma"}, {"$match": self._filter_query}, {"$group": {"_id": {"member_id": "$member_id", "channel_id": "$channel_id"}, "karma": {"$sum": "$karma"}}}, {"$limit": profile()['channels']}, {"$sort": {"karma": -1}}] doc_cursor = self._karma.aggregate(pipeline) return doc_cursor class BlockerService: def __init__(self): self._blacklist = DataSource(config['database']['host'], config['database']['port'], config['database']['username'], config['database']['password'], config['database']['name']).db.blacklist self._filter_query = dict(guild_id="", member_id="") def blacklist(self, member: Member): self._filter_query['guild_id'] = member.guild_id self._filter_query['member_id'] = member.member_id self._blacklist.update_one(filter=self._filter_query, update={'$set': { 'guild_id': '{}'.format(member.guild_id), 'member_id': '{}'.format(member.member_id) }}, upsert=True) def whitelist(self, member: Member): self._filter_query['guild_id'] = member.guild_id self._filter_query['member_id'] = member.member_id self._blacklist.delete_one(filter=self._filter_query) def find_member(self, member: Member): self._filter_query['guild_id'] = member.guild_id self._filter_query['member_id'] = member.member_id return self._blacklist.find_one(filter=self._filter_query)
47.428571
100
0.622239
[ "MIT" ]
ianagbip1oti/aura
core/service/karma_service.py
3,984
Python
# Copyright 2021 solo-learn development team. # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies # or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE # FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import torch from solo.utils.misc import gather def test_gather_layer(): X = torch.randn(10, 30, requires_grad=True) X_gathered = gather(X) assert isinstance(X, torch.Tensor) dummy_loss = torch.mm(X_gathered, X_gathered.T).sum() dummy_loss.backward() assert X.grad is not None
43.84375
91
0.769066
[ "MIT" ]
Froskekongen/solo-learn
tests/utils/test_gather.py
1,403
Python
from pdb import set_trace as T from nmmo import Task from collections import namedtuple class Tier: REWARD_SCALE = 15 EASY = 4 / REWARD_SCALE NORMAL = 6 / REWARD_SCALE HARD = 11 / REWARD_SCALE def player_kills(realm, player): return player.history.playerKills def equipment(realm, player): return player.loadout.defense def exploration(realm, player): return player.history.exploration def foraging(realm, player): return (player.skills.fishing.level + player.skills.hunting.level) / 2.0 PlayerKills = [ Task(player_kills, 1, Tier.EASY), Task(player_kills, 3, Tier.NORMAL), Task(player_kills, 6, Tier.HARD)] Equipment = [ Task(equipment, 1, Tier.EASY), Task(equipment, 10, Tier.NORMAL), Task(equipment, 20, Tier.HARD)] Exploration = [ Task(exploration, 32, Tier.EASY), Task(exploration, 64, Tier.NORMAL), Task(exploration, 127, Tier.HARD)] Foraging = [ Task(foraging, 20, Tier.EASY), Task(foraging, 35, Tier.NORMAL), Task(foraging, 50, Tier.HARD)] All = PlayerKills + Equipment + Exploration + Foraging
25.977778
76
0.652695
[ "MIT" ]
NeuralMMO/baselines
tasks.py
1,169
Python
import numpy as np import sys import os import re import ntpath from subprocess import call #### DISCLAIMER: This script uses the `pythonSubmit.py` format #### that has been replaced by the `runSubmit.py` and #### `compasConfigDefault.yaml` combo as of v02.25.10. #### The `pythonSubmit.py` format will eventually become deprecated. # Check if we are using python 3 python_version = sys.version_info[0] print("python_version =", python_version) class pythonProgramOptions: """ A class to store and access COMPAS program options in python """ # Do './COMPAS --help' to see all options #-- Define variables # environment variable COMPAS_EXECUTABLE_PATH is used for docker runs # if COMPAS_EXECUTABLE_PATH is not set (== None) we assume this is an # interactive run with python3 # if COMPAS_EXECUTABLE_PATH is set (!= None) we assume this is a run # inside a docker container - we have different directories inside a # docker container (src, obj, bin), and the COMPAS executable resides # in the bin directory (rather than the src directory) compas_executable_override = os.environ.get('COMPAS_EXECUTABLE_PATH') if (compas_executable_override is None): # we should fix this one day - we should not assume that the COMPAS executable # is in the 'src' directory. The standard is to put the object files created # by the compile into the 'obj' directory, and the executable files created by # the link in the 'bin' directory. # # for now though, because this is how everybody expects it to be, we'll just check # that the path to the root directory (the parent directory of the directory in # which we expect the executable to reside - for now, 'src') is set to something. compas_root_dir = os.environ.get('COMPAS_ROOT_DIR') assert compas_root_dir is not None, "Unable to locate the COMPAS executable: check that the environment variable COMPAS_ROOT_DIR is set correctly, and the COMPAS executable exists." # construct path to executable # # ideally we wouldn't have the 'src' directory name (or any other directory name) # prepended to the executable name - if we just execute the executable name on its # own, as long as the user navigates to the directory in which the executable resides # they don't need to set the COMPAS_ROOT_DIR environment variable compas_executable = os.path.join(compas_root_dir, 'src/COMPAS') else: compas_executable = compas_executable_override # check that a file with the correct name exists where we expect it to assert os.path.isfile(compas_executable), "Unable to locate the COMPAS executable: check that the environment variable COMPAS_ROOT_DIR is set correctly, and the COMPAS executable exists." enable_warnings = False # option to enable/disable warning messages number_of_systems = 10 # number of systems per batch populationPrinting = False randomSeedFileName = 'randomSeed.txt' if os.path.isfile(randomSeedFileName): random_seed = int(np.loadtxt(randomSeedFileName)) else: random_seed = 0 # If you want a random seed, use: np.random.randint(2,2**63-1) # environment variable COMPAS_LOGS_OUTPUT_DIR_PATH is used primarily for docker runs # if COMPAS_LOGS_OUTPUT_DIR_PATH is set (!= None) it is used as the value for the # --output-path option # if COMPAS_LOGS_OUTPUT_DIR_PATH is not set (== None) the current working directory # is used as the value for the --output-path option compas_logs_output_override = os.environ.get('COMPAS_LOGS_OUTPUT_DIR_PATH') if (compas_logs_output_override is None): output = os.getcwd() output_container = None # names the directory to be created and in which log files are created. Default in COMPAS is "COMPAS_Output" else: output = compas_logs_output_override output_container = None # environment variable COMPAS_INPUT_DIR_PATH is used primarily for docker runs # if COMPAS_INPUT_DIR_PATH is set (!= None) it is prepended to input filenames # (such as grid_filename and logfile_definitions) # if COMPAS_INPUT_DIR_PATH is not set (== None) the current working directory # is prepended to input filenames compas_input_path_override = os.environ.get('COMPAS_INPUT_DIR_PATH') #-- option to make a grid of hyperparameter values at which to produce populations. #-- If this is set to true, it will divide the number_of_binaries parameter equally #-- amoungst the grid points (as closely as possible). See the hyperparameterGrid method below #-- for more details. If this is set to True, some hyperparameter values defined in this method'gridOutputs/'+str(i) #-- will be overwritten hyperparameterGrid = False hyperparameterList = False shareSeeds = False notes_hdrs = None # no annotations header strings (no annotations) notes = None # no annotations mode = 'BSE' # evolving single stars (SSE) or binaries (BSE)? grid_filename = 'grid.txt' # grid file name (e.g. 'mygrid.txt') if grid_filename != None: # if the grid filename supplied is already fully-qualified, leave it as is head, tail = ntpath.split(grid_filename) # split into pathname and base filename if head == '' or head == '.': # no path (or CWD) - add path as required grid_filename = tail or ntpath.basename(head) if compas_input_path_override == None: grid_filename = os.getcwd() + '/' + grid_filename.strip("'\"") else: grid_filename = compas_input_path_override + '/' + grid_filename.strip("'\"") logfile_definitions = None # logfile record definitions file name (e.g. 'logdefs.txt') if logfile_definitions != None: # if the grid filename supplied is already fully-qualified, leave it as is head, tail = ntpath.split(logfile_definitions) # split into pathname and base filename if head == '' or head == '.': # no path (or CWD) - add path as required logfile_definitions = tail or ntpath.basename(head) if compas_input_path_override == None: logfile_definitions = os.getcwd() + '/' + logfile_definitions.strip("'\"") else: logfile_definitions = compas_input_path_override + '/' + logfile_definitions.strip("'\"") initial_mass = None # initial mass for SSE initial_mass_1 = None # primary initial mass for BSE initial_mass_2 = None # secondary initial mass for BSE mass_ratio = None eccentricity = None # eccentricity for BSE semi_major_axis = None # semi-major axis for BSE orbital_period = None # orbital period for BSE use_mass_loss = True mass_transfer = True detailed_output = True # WARNING: this creates a data heavy file RLOFPrinting = True evolve_unbound_systems = False quiet = False metallicity = 0.0142 # metallicity for both SSE and BSE - Solar metallicity Asplund+2010 allow_rlof_at_birth = True # allow binaries that have one or both stars in RLOF at birth to evolve? allow_touching_at_birth = False # record binaries that have stars touching at birth in output files? chemically_homogeneous_evolution = 'PESSIMISTIC' # chemically homogeneous evolution. Options are 'NONE', 'OPTIMISTIC' and 'PESSIMISTIC' switch_log = False common_envelope_alpha = 1.0 common_envelope_lambda = 0.1 # Only if using 'LAMBDA_FIXED' common_envelope_lambda_prescription = 'LAMBDA_NANJING' # Xu & Li 2010 common_envelope_slope_Kruckow = -5.0/6.0 stellar_zeta_prescription = 'SOBERMAN' common_envelope_revised_energy_formalism = False common_envelope_maximum_donor_mass_revised_energy_formalism = 2.0 common_envelope_recombination_energy_density = 1.5E13 common_envelope_alpha_thermal = 1.0 # lambda = alpha_th*lambda_b + (1-alpha_th)*lambda_g common_envelope_lambda_multiplier = 1.0 # Multiply common envelope lambda by some constant common_envelope_allow_main_sequence_survive = True # Allow main sequence stars to survive CE. Was previously False by default common_envelope_mass_accretion_prescription = 'ZERO' common_envelope_mass_accretion_min = 0.04 # For 'MACLEOD+2014' [Msol] common_envelope_mass_accretion_max = 0.10 # For 'MACLEOD+2014' [Msol] envelope_state_prescription = 'LEGACY' common_envelope_allow_radiative_envelope_survive = False common_envelope_allow_immediate_RLOF_post_CE_survive = False mass_loss_prescription = 'VINK' luminous_blue_variable_prescription = 'HURLEY_ADD' luminous_blue_variable_multiplier = 1.5 overall_wind_mass_loss_multiplier = 1.0 wolf_rayet_multiplier = 1.0 cool_wind_mass_loss_multiplier = 1.0 check_photon_tiring_limit = False circularise_binary_during_mass_transfer = True angular_momentum_conservation_during_circularisation = False mass_transfer_angular_momentum_loss_prescription = 'ISOTROPIC' mass_transfer_accretion_efficiency_prescription = 'THERMAL' mass_transfer_fa = 0.5 # Only if using mass_transfer_accretion_efficiency_prescription = 'FIXED' mass_transfer_jloss = 1.0 # Only if using mass_transfer_angular_momentum_loss_prescription = 'FIXED' mass_transfer_rejuvenation_prescription = 'STARTRACK' mass_transfer_thermal_limit_accretor= 'CFACTOR' mass_transfer_thermal_limit_C= 10.0 eddington_accretion_factor = 1 # multiplication Factor for eddington accretion onto NS&BH case_BB_stability_prescription = 'ALWAYS_STABLE' zeta_Main_Sequence = 2.0 zeta_Radiative_Envelope_Giant = 6.5 maximum_evolution_time = 13700.0 # Maximum physical time a system can be evolved [Myrs] maximum_number_timesteps = 99999 timestep_multiplier = 0.1 # Optional multiplier relative to default time step duration initial_mass_function = 'KROUPA' initial_mass_min = 5.0 # Use 1.0 for LRNe, 5.0 for DCOs [Msol] initial_mass_max = 150.0 # Stellar tracks extrapolated above 50 Msol (Hurley+2000) [Msol] initial_mass_power = 0.0 semi_major_axis_distribution = 'FLATINLOG' semi_major_axis_min = 0.01 # [AU] semi_major_axis_max = 1000.0 # [AU] orbital_period_distribution = 'FLATINLOG' orbital_period_min = 1.1 # [days] orbital_period_max = 1000 # [days] mass_ratio_distribution = 'FLAT' mass_ratio_min = 0.01 mass_ratio_max = 1.0 minimum_secondary_mass = 0.1 # Brown dwarf limit [Msol] eccentricity_distribution = 'ZERO' eccentricity_min = 0.0 eccentricity_max = 1.0 metallicity_distribution = 'ZSOLAR' metallicity_min = 0.0001 metallicity_max = 0.03 pulsar_birth_magnetic_field_distribution = 'ZERO' pulsar_birth_magnetic_field_min = 11.0 # [log10(B/G)] pulsar_birth_magnetic_field_max = 13.0 # [log10(B/G)] pulsar_birth_spin_period_distribution = "ZERO" pulsar_birth_spin_period_min = 10.0 # [ms] pulsar_birth_spin_period_max = 100.0 # [ms] pulsar_magnetic_field_decay_timescale = 1000.0 # [Myr] pulsar_magnetic_field_decay_massscale = 0.025 # [Msol] pulsar_minimum_magnetic_field = 8.0 # [log10(B/G)] evolvePulsars = False rotational_velocity_distribution = 'ZERO' neutron_star_equation_of_state = 'SSE' neutrino_mass_loss_BH_formation = "FIXED_MASS" # "FIXED_FRACTION" neutrino_mass_loss_BH_formation_value = 0.1 # Either fraction or mass (Msol) to lose remnant_mass_prescription = 'FRYER2012' # fryer_supernova_engine = 'DELAYED' black_hole_kicks = 'FALLBACK' kick_magnitude_distribution = 'MAXWELLIAN' kick_magnitude_sigma_CCSN_NS = 265.0 # [km/s] kick_magnitude_sigma_CCSN_BH = 265.0 # [km/s] kick_magnitude_sigma_ECSN = 30.0 # [km/s] kick_magnitude_sigma_USSN = 30.0 # [km/s] fix_dimensionless_kick_magnitude = -1 kick_direction = 'ISOTROPIC' kick_direction_power = 0.0 kick_scaling_factor = 1.0 kick_magnitude_maximum = -1.0 kick_magnitude_random = None # (SSE) used to draw the kick magnitude for the star should it undergo a supernova event kick_magnitude = None # (SSE) (drawn) kick magnitude for the star should it undergo a supernova event [km/s] kick_magnitude_random_1 = None # (BSE) used to draw the kick magnitude for the primary star should it undergo a supernova event kick_magnitude_1 = None # (BSE) (drawn) kick magnitude for the primary star should it undergo a supernova event [km/s] kick_theta_1 = None # (BSE) angle between the orbital plane and the 'z' axis of the supernova vector for the primary star should it undergo a supernova event [radians] kick_phi_1 = None # (BSE) angle between 'x' and 'y', both in the orbital plane of the supernova vector, for the primary star should it undergo a supernova event [radians] kick_mean_anomaly_1 = None # (BSE) mean anomaly at the instant of the supernova for the primary star should it undergo a supernova event - should be uniform in [0, 2pi) [radians] kick_magnitude_random_2 = None # (BSE) used to draw the kick velocity for the secondary star should it undergo a supernova event kick_magnitude_2 = None # (BSE) (drawn) kick magnitude for the secondary star should it undergo a supernova event [km/s] kick_theta_2 = None # (BSE) angle between the orbital plane and the 'z' axis of the supernova vector for the secondary star should it undergo a supernova event [radians] kick_phi_2 = None # (BSE) angle between 'x' and 'y', both in the orbital plane of the supernova vector, for the secondary star should it undergo a supernova event [radians] kick_mean_anomaly_2 = None # (BSE) mean anomaly at the instant of the supernova for the secondary star should it undergo a supernova event - should be uniform in [0, 2pi) [radians] muller_mandel_kick_multiplier_BH = 200.0 # scaling prefactor for BH kicks when using the 'MULLERMANDEL' kick magnitude distribution muller_mandel_kick_multiplier_NS = 400.0 # scaling prefactor for NS kicks when using the 'MULLERMANDEL' kick magnitude distribution pair_instability_supernovae = True PISN_lower_limit = 60.0 # Minimum core mass for PISN [Msol] PISN_upper_limit = 135.0 # Maximum core mass for PISN [Msol] pulsation_pair_instability = True PPI_lower_limit = 35.0 # Minimum core mass for PPI [Msol] PPI_upper_limit = 60.0 # Maximum core mass for PPI [Msol] pulsational_pair_instability_prescription = 'MARCHANT' maximum_neutron_star_mass = 2.5 # [Msol] add_options_to_sysparms = 'GRID' # should all option values be added to system parameters files? options are 'ALWAYS', 'GRID', and 'NEVER' log_level = 0 log_classes = [] debug_level = 0 debug_classes = [] logfile_name_prefix = None logfile_type = 'HDF5' hdf5_chunk_size = 100000 hdf5_buffer_size = 1 # set the logfile names here # # set to None (e.g. logfile_BSE_supernovae = None) to use the default filename # set to a string (e.g. logfile_BSE_supernovae = 'mySNfilename') to use that string as the filename # set to empty string (e.g. logfile_BSE_supernovae = '""') to disable logging for that file (the file will not be created) # # We don't really need the 'BSE' or 'SSE' prefixes any more - they were put there because # prior to the implementation of the containing folder it was too hard to locate the files # created by a COMPAS run - especially the detailed output files. Now that the output # files are created inside a containing folder for each run there is really no need for # the prefixes - and if we don't have the prefixes we can share some of the options # (e.g. specifying the supernovae filename doesn't need to have separate options for # SSE and BSE - we really just need one (we only ever run in one mode or the other)) # # For now though, I'll leave them as is - we can change this when (if) we decide to # drop the prefixes logfile_common_envelopes = None logfile_detailed_output = None logfile_double_compact_objects = None logfile_rlof_parameters = None logfile_pulsar_evolution = None logfile_supernovae = None logfile_switch_log = None logfile_system_parameters = None debug_to_file = False errors_to_file = False def booleanChoices(self): booleanChoices = [ self.enable_warnings, self.use_mass_loss, self.mass_transfer, self.detailed_output, self.evolve_unbound_systems, self.populationPrinting, self.RLOFPrinting, self.circularise_binary_during_mass_transfer, self.angular_momentum_conservation_during_circularisation, self.pair_instability_supernovae, self.pulsation_pair_instability, self.quiet, self.common_envelope_allow_main_sequence_survive, self.common_envelope_allow_radiative_envelope_survive, self.common_envelope_allow_immediate_RLOF_post_CE_survive, self.evolvePulsars, self.debug_to_file, self.errors_to_file, self.allow_rlof_at_birth, self.allow_touching_at_birth, self.switch_log, self.check_photon_tiring_limit ] return booleanChoices def booleanCommands(self): booleanCommands = [ '--enable-warnings', '--use-mass-loss', '--mass-transfer', '--detailed-output', '--evolve-unbound-systems', '--population-data-printing', '--rlof-printing', '--circularise-binary-during-mass-transfer', '--angular-momentum-conservation-during-circularisation', '--pair-instability-supernovae', '--pulsational-pair-instability', '--quiet', '--common-envelope-allow-main-sequence-survive', '--common-envelope-allow-radiative-envelope-survive', '--common-envelope-allow-immediate-rlof-post-ce-survive', '--evolve-pulsars', '--debug-to-file', '--errors-to-file', '--allow-rlof-at-birth', '--allow-touching-at-birth', '--switch-log', '--check-photon-tiring-limit' ] return booleanCommands def numericalChoices(self): numericalChoices = [ self.number_of_systems, self.initial_mass, self.initial_mass_1, self.initial_mass_2, self.eccentricity, self.semi_major_axis, self.orbital_period, self.metallicity, self.common_envelope_alpha, self.common_envelope_lambda, self.common_envelope_slope_Kruckow, self.common_envelope_alpha_thermal, self.common_envelope_lambda_multiplier, self.luminous_blue_variable_multiplier, self.overall_wind_mass_loss_multiplier, self.wolf_rayet_multiplier, self.cool_wind_mass_loss_multiplier, self.mass_transfer_fa, self.mass_transfer_jloss, self.maximum_evolution_time, self.maximum_number_timesteps, self.timestep_multiplier, self.initial_mass_min, self.initial_mass_max, self.initial_mass_power, self.semi_major_axis_min, self.semi_major_axis_max, self.mass_ratio, self.mass_ratio_min, self.mass_ratio_max, self.minimum_secondary_mass, self.eccentricity_min, self.eccentricity_max, self.metallicity_min, self.metallicity_max, self.pulsar_birth_magnetic_field_min, self.pulsar_birth_magnetic_field_max, self.pulsar_birth_spin_period_min, self.pulsar_birth_spin_period_max, self.pulsar_magnetic_field_decay_timescale, self.pulsar_magnetic_field_decay_massscale, self.pulsar_minimum_magnetic_field, self.orbital_period_min, self.orbital_period_max, self.kick_magnitude_sigma_CCSN_NS, self.kick_magnitude_sigma_CCSN_BH, self.fix_dimensionless_kick_magnitude, self.kick_direction_power, self.random_seed, self.mass_transfer_thermal_limit_C, self.eddington_accretion_factor, self.PISN_lower_limit, self.PISN_upper_limit, self.PPI_lower_limit, self.PPI_upper_limit, self.maximum_neutron_star_mass, self.kick_magnitude_sigma_ECSN, self.kick_magnitude_sigma_USSN, self.kick_scaling_factor, self.common_envelope_maximum_donor_mass_revised_energy_formalism, self.common_envelope_recombination_energy_density, self.common_envelope_mass_accretion_max, self.common_envelope_mass_accretion_min, self.zeta_Main_Sequence, self.zeta_Radiative_Envelope_Giant, self.kick_magnitude_maximum, self.kick_magnitude_random, self.kick_magnitude, self.kick_magnitude_random_1, self.kick_magnitude_1, self.kick_theta_1, self.kick_phi_1, self.kick_mean_anomaly_1, self.kick_magnitude_random_2, self.kick_magnitude_2, self.kick_theta_2, self.kick_phi_2, self.kick_mean_anomaly_2, self.muller_mandel_kick_multiplier_BH, self.muller_mandel_kick_multiplier_NS, self.log_level, self.debug_level, self.hdf5_chunk_size, self.hdf5_buffer_size, self.neutrino_mass_loss_BH_formation_value ] return numericalChoices def numericalCommands(self): numericalCommands = [ '--number-of-systems', '--initial-mass', '--initial-mass-1', '--initial-mass-2', '--eccentricity', '--semi-major-axis', '--orbital-period', '--metallicity', '--common-envelope-alpha', '--common-envelope-lambda', '--common-envelope-slope-kruckow', '--common-envelope-alpha-thermal', '--common-envelope-lambda-multiplier', '--luminous-blue-variable-multiplier', '--overall-wind-mass-loss-multiplier', '--wolf-rayet-multiplier', '--cool-wind-mass-loss-multiplier', '--mass-transfer-fa', '--mass-transfer-jloss', '--maximum-evolution-time', '--maximum-number-timestep-iterations', '--timestep-multiplier', '--initial-mass-min', '--initial-mass-max', '--initial-mass-power', '--semi-major-axis-min', '--semi-major-axis-max', '--mass-ratio', '--mass-ratio-min', '--mass-ratio-max', '--minimum-secondary-mass', '--eccentricity-min', '--eccentricity-max', '--metallicity-min', '--metallicity-max', '--pulsar-birth-magnetic-field-distribution-min', '--pulsar-birth-magnetic-field-distribution-max', '--pulsar-birth-spin-period-distribution-min', '--pulsar-birth-spin-period-distribution-max', '--pulsar-magnetic-field-decay-timescale', '--pulsar-magnetic-field-decay-massscale', '--pulsar-minimum-magnetic-field', '--orbital-period-min', '--orbital-period-max', '--kick-magnitude-sigma-CCSN-NS', '--kick-magnitude-sigma-CCSN-BH', '--fix-dimensionless-kick-magnitude', '--kick-direction-power', '--random-seed', '--mass-transfer-thermal-limit-C', '--eddington-accretion-factor', '--pisn-lower-limit', '--pisn-upper-limit', '--ppi-lower-limit', '--ppi-upper-limit', '--maximum-neutron-star-mass', '--kick-magnitude-sigma-ECSN', '--kick-magnitude-sigma-USSN', '--kick-scaling-factor', '--maximum-mass-donor-nandez-ivanova', '--common-envelope-recombination-energy-density', '--common-envelope-mass-accretion-max', '--common-envelope-mass-accretion-min', '--zeta-main-sequence', '--zeta-radiative-envelope-giant', '--kick-magnitude-max', '--kick-magnitude-random', '--kick-magnitude', '--kick-magnitude-random-1', '--kick-magnitude-1', '--kick-theta-1', '--kick-phi-1', '--kick-mean-anomaly-1', '--kick-magnitude-random-2', '--kick-magnitude-2', '--kick-theta-2', '--kick-phi-2', '--kick-mean-anomaly-2', '--muller-mandel-kick-multiplier-BH', '--muller-mandel-kick-multiplier-NS', '--log-level', '--debug-level', '--hdf5-chunk-size', '--hdf5-buffer-size', '--neutrino-mass-loss-BH-formation-value' ] return numericalCommands def stringChoices(self): stringChoices = [ self.notes_hdrs, self.notes, self.mode, self.case_BB_stability_prescription, self.chemically_homogeneous_evolution, self.luminous_blue_variable_prescription, self.mass_loss_prescription, self.mass_transfer_angular_momentum_loss_prescription, self.mass_transfer_accretion_efficiency_prescription, self.mass_transfer_rejuvenation_prescription, self.initial_mass_function, self.semi_major_axis_distribution, self.orbital_period_distribution, self.mass_ratio_distribution, self.eccentricity_distribution, self.metallicity_distribution, self.rotational_velocity_distribution, self.remnant_mass_prescription, self.fryer_supernova_engine, self.black_hole_kicks, self.kick_magnitude_distribution, self.kick_direction, self.output, self.output_container, self.common_envelope_lambda_prescription, self.stellar_zeta_prescription, self.mass_transfer_thermal_limit_accretor, self.pulsational_pair_instability_prescription, self.neutron_star_equation_of_state, self.pulsar_birth_magnetic_field_distribution, self.pulsar_birth_spin_period_distribution, self.common_envelope_mass_accretion_prescription, self.envelope_state_prescription, self.logfile_name_prefix, self.logfile_type, self.logfile_definitions, self.grid_filename, self.logfile_common_envelopes, self.logfile_detailed_output, self.logfile_double_compact_objects, self.logfile_pulsar_evolution, self.logfile_rlof_parameters, self.logfile_supernovae, self.logfile_switch_log, self.logfile_system_parameters, self.neutrino_mass_loss_BH_formation, self.add_options_to_sysparms ] return stringChoices def stringCommands(self): stringCommands = [ '--notes-hdrs', '--notes', '--mode', '--case-BB-stability-prescription', '--chemically-homogeneous-evolution', '--luminous-blue-variable-prescription', '--mass-loss-prescription', '--mass-transfer-angular-momentum-loss-prescription', '--mass-transfer-accretion-efficiency-prescription', '--mass-transfer-rejuvenation-prescription', '--initial-mass-function', '--semi-major-axis-distribution', '--orbital-period-distribution', '--mass-ratio-distribution', '--eccentricity-distribution', '--metallicity-distribution', '--rotational-velocity-distribution', '--remnant-mass-prescription', '--fryer-supernova-engine', '--black-hole-kicks', '--kick-magnitude-distribution', '--kick-direction', '--output-path', '--output-container', '--common-envelope-lambda-prescription', '--stellar-zeta-prescription', '--mass-transfer-thermal-limit-accretor', '--pulsational-pair-instability-prescription', '--neutron-star-equation-of-state', '--pulsar-birth-magnetic-field-distribution', '--pulsar-birth-spin-period-distribution', '--common-envelope-mass-accretion-prescription', '--envelope-state-prescription', '--logfile-name-prefix', '--logfile-type', '--logfile-definitions', '--grid', '--logfile-common-envelopes', '--logfile-detailed-output', '--logfile-double-compact-objects', '--logfile-pulsar-evolution', '--logfile-rlof-parameters', '--logfile-supernovae', '--logfile-switch-log', '--logfile-system-parameters', '--neutrino-mass-loss-BH-formation', '--add-options-to-sysparms' ] return stringCommands def listChoices(self): listChoices = [ self.log_classes, self.debug_classes ] return listChoices def listCommands(self): listCommands = [ '--log-classes', '--debug-classes' ] return listCommands def generateCommandLineOptionsDict(self): """ This function generates a dictionary mapping COMPAS options to their specified values (or empty strings for boolean options). These can be combined into a string and run directly as a terminal command, or passed to the stroopwafel interface where some of them may be overwritten. Options not to be included in the command line should be set to pythons None (except booleans, which should be set to False) Parameters ----------- self : pythonProgramOptions Contains program options Returns -------- commands : str or list of strs """ booleanChoices = self.booleanChoices() booleanCommands = self.booleanCommands() nBoolean = len(booleanChoices) assert len(booleanCommands) == nBoolean numericalChoices = self.numericalChoices() numericalCommands = self.numericalCommands() nNumerical = len(numericalChoices) assert len(numericalCommands) == nNumerical stringChoices = self.stringChoices() stringCommands = self.stringCommands() nString = len(stringChoices) assert len(stringCommands) == nString listChoices = self.listChoices() listCommands = self.listCommands() nList = len(listChoices) assert len(listCommands) == nList ### Collect all options into a dictionary mapping option name to option value command = {'compas_executable' : self.compas_executable} for i in range(nBoolean): if booleanChoices[i] == True: command.update({booleanCommands[i] : ''}) elif booleanChoices[i] == False: command.update({booleanCommands[i] : 'False'}) for i in range(nNumerical): if not numericalChoices[i] == None: command.update({numericalCommands[i] : str(numericalChoices[i])}) for i in range(nString): if not stringChoices[i] == None: command.update({stringCommands[i] : cleanStringParameter(stringChoices[i])}) for i in range(nList): if listChoices[i]: command.update({listCommands[i] : ' '.join(map(str,listChoices[i]))}) return command def combineCommandLineOptionsDictIntoShellCommand(commandOptions): """ Write out the compas input parameters into a shell string. Ensure the Compas executable is first, and not repeated. Options are non-ordered. """ shellCommand = commandOptions['compas_executable'] del commandOptions['compas_executable'] for key, val in commandOptions.items(): shellCommand += ' ' + key + ' ' + val return shellCommand def cleanStringParameter(str_param): """ clean up string parameters to avoid confusing Boost """ if str_param is not None: # strip any quotes from the ends of the string str_param = str_param.strip("'\"") # escape any unescaped spaces or quotes within the string escapes = [" ", "'", "\""] for escape in escapes: str_param = re.sub(r"(?<!\\){}".format(escape), r"\{}".format(escape), str_param) return str_param if __name__ == "__main__": #-- Get the program options programOptions = pythonProgramOptions() commandOptions = programOptions.generateCommandLineOptionsDict() #-- Convert options into a shell string shellCommand = combineCommandLineOptionsDictIntoShellCommand(commandOptions) #-- Run exectute COMPAS shell string print(shellCommand) call(shellCommand,shell=True)
44.583744
218
0.613088
[ "MIT" ]
IsobelMarguarethe/COMPAS
utils/example_plots/methods_paper_plots/fig_5_HR_diagram/pythonSubmit.py
36,202
Python
from django.contrib.auth import get_user_model from django.utils.translation import gettext_lazy as _ from rest_framework import HTTP_HEADER_ENCODING, authentication from .exceptions import AuthenticationFailed, InvalidToken, TokenError from .settings import api_settings AUTH_HEADER_TYPES = api_settings.AUTH_HEADER_TYPES if not isinstance(api_settings.AUTH_HEADER_TYPES, (list, tuple)): AUTH_HEADER_TYPES = (AUTH_HEADER_TYPES,) AUTH_HEADER_TYPE_BYTES = set( h.encode(HTTP_HEADER_ENCODING) for h in AUTH_HEADER_TYPES ) class JWTAuthentication(authentication.BaseAuthentication): """ An authentication plugin that authenticates requests through a JSON web token provided in a request header. """ www_authenticate_realm = 'api' media_type = 'application/json' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.user_model = get_user_model() def authenticate(self, request): header = self.get_header(request) if header is None: return None raw_token = self.get_raw_token(header) if raw_token is None: return None validated_token = self.get_validated_token(raw_token) return self.get_user(validated_token), validated_token def authenticate_header(self, request): return '{0} realm="{1}"'.format( AUTH_HEADER_TYPES[0], self.www_authenticate_realm, ) def get_header(self, request): """ Extracts the header containing the JSON web token from the given request. """ header = request.META.get(api_settings.AUTH_HEADER_NAME) if isinstance(header, str): # Work around django test client oddness header = header.encode(HTTP_HEADER_ENCODING) return header def get_raw_token(self, header): """ Extracts an unvalidated JSON web token from the given "Authorization" header value. """ parts = header.split() if len(parts) == 0: # Empty AUTHORIZATION header sent return None if parts[0] not in AUTH_HEADER_TYPE_BYTES: # Assume the header does not contain a JSON web token return None if len(parts) != 2: raise AuthenticationFailed( _('Authorization header must contain two space-delimited values'), code='bad_authorization_header', ) return parts[1] def get_validated_token(self, raw_token): """ Validates an encoded JSON web token and returns a validated token wrapper object. """ messages = [] for AuthToken in api_settings.AUTH_TOKEN_CLASSES: try: return AuthToken(raw_token) except TokenError as e: messages.append({'token_class': AuthToken.__name__, 'token_type': AuthToken.token_type, 'message': e.args[0]}) raise InvalidToken({ 'detail': _('Given token not valid for any token type'), 'messages': messages, }) def get_user(self, validated_token): """ Attempts to find and return a user using the given validated token. """ try: user_id = validated_token[api_settings.USER_ID_CLAIM] except KeyError: raise InvalidToken(_('Token contained no recognizable user identification')) try: user = self.user_model.objects.get(**{api_settings.USER_ID_FIELD: user_id}) except self.user_model.DoesNotExist: raise AuthenticationFailed(_('User not found'), code='user_not_found') if not user.is_active: raise AuthenticationFailed(_('User is inactive'), code='user_inactive') return user class JWTTokenUserAuthentication(JWTAuthentication): def get_user(self, validated_token): """ Returns a stateless user object which is backed by the given validated token. """ if api_settings.USER_ID_CLAIM not in validated_token: # The TokenUser class assumes tokens will have a recognizable user # identifier claim. raise InvalidToken(_('Token contained no recognizable user identification')) return api_settings.TOKEN_USER_CLASS(validated_token) def default_user_authentication_rule(user): # Prior to Django 1.10, inactive users could be authenticated with the # default `ModelBackend`. As of Django 1.10, the `ModelBackend` # prevents inactive users from authenticating. App designers can still # allow inactive users to authenticate by opting for the new # `AllowAllUsersModelBackend`. However, we explicitly prevent inactive # users from authenticating to enforce a reasonable policy and provide # sensible backwards compatibility with older Django versions. return True if user is not None and user.is_active else False
34.168919
88
0.654143
[ "MIT" ]
BrianMarquez3/Python-Django
webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py
5,057
Python
import random import numpy as np import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F from .attention import Attention from .baseRNN import BaseRNN if torch.cuda.is_available(): import torch.cuda as device else: import torch as device class DecoderRNN(BaseRNN): r""" Provides functionality for decoding in a seq2seq framework, with an option for attention. Args: vocab_size (int): size of the vocabulary max_len (int): a maximum allowed length for the sequence to be processed hidden_size (int): the number of features in the hidden state `h` sos_id (int): index of the start of sentence symbol eos_id (int): index of the end of sentence symbol n_layers (int, optional): number of recurrent layers (default: 1) rnn_cell (str, optional): type of RNN cell (default: gru) bidirectional (bool, optional): if the encoder is bidirectional (default False) input_dropout_p (float, optional): dropout probability for the input sequence (default: 0) dropout_p (float, optional): dropout probability for the output sequence (default: 0) use_attention(bool, optional): flag indication whether to use attention mechanism or not (default: false) Attributes: KEY_ATTN_SCORE (str): key used to indicate attention weights in `ret_dict` KEY_LENGTH (str): key used to indicate a list representing lengths of output sequences in `ret_dict` KEY_SEQUENCE (str): key used to indicate a list of sequences in `ret_dict` Inputs: inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio - **inputs** (batch, seq_len, input_size): list of sequences, whose length is the batch size and within which each sequence is a list of token IDs. It is used for teacher forcing when provided. (default `None`) - **encoder_hidden** (num_layers * num_directions, batch_size, hidden_size): tensor containing the features in the hidden state `h` of encoder. Used as the initial hidden state of the decoder. (default `None`) - **encoder_outputs** (batch, seq_len, hidden_size): tensor with containing the outputs of the encoder. Used for attention mechanism (default is `None`). - **function** (torch.nn.Module): A function used to generate symbols from RNN hidden state (default is `torch.nn.functional.log_softmax`). - **teacher_forcing_ratio** (float): The probability that teacher forcing will be used. A random number is drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value, teacher forcing would be used (default is 0). Outputs: decoder_outputs, decoder_hidden, ret_dict - **decoder_outputs** (seq_len, batch, vocab_size): list of tensors with size (batch_size, vocab_size) containing the outputs of the decoding function. - **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden state of the decoder. - **ret_dict**: dictionary containing additional information as follows {*KEY_LENGTH* : list of integers representing lengths of output sequences, *KEY_SEQUENCE* : list of sequences, where each sequence is a list of predicted token IDs }. """ KEY_ATTN_SCORE = 'attention_score' KEY_LENGTH = 'length' KEY_SEQUENCE = 'sequence' def __init__(self, vocab_size, max_len, hidden_size, sos_id, eos_id, n_layers=1, rnn_cell='gru', bidirectional=False, input_dropout_p=0, dropout_p=0, use_attention=False): super(DecoderRNN, self).__init__(vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell) self.bidirectional_encoder = bidirectional self.rnn = self.rnn_cell(hidden_size, hidden_size, n_layers, batch_first=True, dropout=dropout_p) self.output_size = vocab_size self.max_length = max_len self.use_attention = use_attention self.eos_id = eos_id self.sos_id = sos_id self.init_input = None self.embedding = nn.Embedding(self.output_size, self.hidden_size) if use_attention: self.attention = Attention(self.hidden_size) self.out = nn.Linear(self.hidden_size, self.output_size) def forward_step(self, input_var, hidden, encoder_outputs, function): batch_size = input_var.size(0) output_size = input_var.size(1) embedded = self.embedding(input_var) embedded = self.input_dropout(embedded) output, hidden = self.rnn(embedded, hidden) attn = None if self.use_attention: output, attn = self.attention(output, encoder_outputs) predicted_softmax = function(self.out(output.view(-1, self.hidden_size))).view(batch_size, output_size, -1) return predicted_softmax, hidden, attn def forward(self, inputs=None, encoder_hidden=None, encoder_outputs=None, function=F.log_softmax, teacher_forcing_ratio=0): ret_dict = dict() if self.use_attention: ret_dict[DecoderRNN.KEY_ATTN_SCORE] = list() inputs, batch_size, max_length = self._validate_args(inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio) decoder_hidden = self._init_state(encoder_hidden) use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False decoder_outputs = [] sequence_symbols = [] lengths = np.array([max_length] * batch_size) def decode(step, step_output, step_attn): decoder_outputs.append(step_output) if self.use_attention: ret_dict[DecoderRNN.KEY_ATTN_SCORE].append(step_attn) symbols = decoder_outputs[-1].topk(1)[1] sequence_symbols.append(symbols) eos_batches = symbols.data.eq(self.eos_id) if eos_batches.dim() > 0: eos_batches = eos_batches.cpu().view(-1).numpy() update_idx = ((lengths > step) & eos_batches) != 0 lengths[update_idx] = len(sequence_symbols) return symbols # Manual unrolling is used to support random teacher forcing. # If teacher_forcing_ratio is True or False instead of a probability, the unrolling can be done in graph if use_teacher_forcing: decoder_input = inputs[:, :-1] decoder_output, decoder_hidden, attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs, function=function) for di in range(decoder_output.size(1)): step_output = decoder_output[:, di, :] if attn is not None: step_attn = attn[:, di, :] else: step_attn = None decode(di, step_output, step_attn) else: decoder_input = inputs[:, 0].unsqueeze(1) for di in range(max_length): decoder_output, decoder_hidden, step_attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs, function=function) step_output = decoder_output.squeeze(1) symbols = decode(di, step_output, step_attn) decoder_input = symbols ret_dict[DecoderRNN.KEY_SEQUENCE] = sequence_symbols ret_dict[DecoderRNN.KEY_LENGTH] = lengths.tolist() return decoder_outputs, decoder_hidden, ret_dict def _init_state(self, encoder_hidden): """ Initialize the encoder hidden state. """ if encoder_hidden is None: return None if isinstance(encoder_hidden, tuple): encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden]) else: encoder_hidden = self._cat_directions(encoder_hidden) return encoder_hidden def _cat_directions(self, h): """ If the encoder is bidirectional, do the following transformation. (#directions * #layers, #batch, hidden_size) -> (#layers, #batch, #directions * hidden_size) """ if self.bidirectional_encoder: h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2) return h def _validate_args(self, inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio): if self.use_attention: if encoder_outputs is None: raise ValueError("Argument encoder_outputs cannot be None when attention is used.") # inference batch size if inputs is None and encoder_hidden is None: batch_size = 1 else: if inputs is not None: batch_size = inputs.size(0) else: if self.rnn_cell is nn.LSTM: batch_size = encoder_hidden[0].size(1) elif self.rnn_cell is nn.GRU: batch_size = encoder_hidden.size(1) # set default input and max decoding length if inputs is None: if teacher_forcing_ratio > 0: raise ValueError("Teacher forcing has to be disabled (set 0) when no inputs is provided.") inputs = Variable(torch.LongTensor([self.sos_id] * batch_size), volatile=True).view(batch_size, 1) if torch.cuda.is_available(): inputs = inputs.cuda() max_length = self.max_length else: max_length = inputs.size(1) - 1 # minus the start of sequence symbol return inputs, batch_size, max_length
46.224299
125
0.641225
[ "Apache-2.0" ]
junyongk/pytorch-seq2seq
seq2seq/models/DecoderRNN.py
9,892
Python
import torch from mmcv import Config from mmcv.parallel import MMDataParallel from mmcv.runner import load_checkpoint from mmdet.apis import single_gpu_mergetiles_visualize from mmdet.core import wrap_fp16_model from mmdet.datasets import build_dataloader, build_dataset from mmdet.models import build_detector import argparse def parse_args(): parser = argparse.ArgumentParser(description='Visualize result with tile-cropped images') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') args = parser.parse_args() return args def main(): args = parse_args() cfg = Config.fromfile(args.config) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # build the dataloader dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=1, workers_per_gpu=1, #workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False) # build the model and load checkpoint model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: wrap_fp16_model(model) # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') checkpoint = load_checkpoint(model, args.checkpoint, map_location='cuda') # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: model.CLASSES = dataset.CLASSES model = MMDataParallel(model, device_ids=[0]) single_gpu_mergetiles_visualize(model, data_loader, 0.8) if __name__ == "__main__": main()
30.234375
93
0.716796
[ "Apache-2.0" ]
magnificent1208/r3det-on-mmdetection
rtools/dota_result_visualize.py
1,935
Python
#!/usr/bin/env python # encoding: utf-8 """ Mobi.py Created by Elliot Kroo on 2009-12-25. Copyright (c) 2009 Elliot Kroo. All rights reserved. """ import sys import os import unittest from struct import * from pprint import pprint import mobi.utils from mobi.lz77 import uncompress_lz77 class Mobi: def parse(self) -> object: """ reads in the file, then parses record tables""" self.contents = self.f.read() self.header = self.parseHeader() self.records = self.parseRecordInfoList() self.readRecord0() def readRecord(self, recordnum, disable_compression=False): if self.config: if self.config['palmdoc']['Compression'] == 1 or disable_compression: return self.contents[self.records[recordnum]['record Data Offset']:self.records[recordnum+1]['record Data Offset']]; # elif self.config['palmdoc']['Compression'] == 2: # result = uncompress_lz77(self.contents[self.records[recordnum]['record Data Offset']:self.records[recordnum+1]['record Data Offset']-self.config['mobi']['extra bytes']]) # return result def readImageRecord(self, imgnum): if self.config: recordnum = self.config['mobi']['First Image index'] + imgnum; return self.readRecord(recordnum, disable_compression=True); def author(self): "Returns the author of the book" return self.config['exth']['records'][100] def title(self): "Returns the title of the book" return self.config['mobi']['Full Name'] ########### Private API ########################### def __init__(self, filename: object) -> object: try: if isinstance(filename, str): self.f = open(filename, "rb"); else: self.f = filename; except IOError as e: sys.stderr.write("Could not open %s! " % filename); raise e; self.offset = 0; def __iter__(self): if not self.config: return for record in range(1, self.config['mobi']['First Non-book index'] - 1): yield self.readRecord(record) def parseRecordInfoList(self): records = {}; # read in all records in info list for recordID in range(self.header['number of records']): headerfmt = '>II' headerlen = calcsize(headerfmt) fields = [ "record Data Offset", "UniqueID", ] # create tuple with info results = zip(fields, unpack(headerfmt, self.contents[self.offset:self.offset+headerlen])) # increment offset into file self.offset += headerlen # convert tuple to dictionary resultsDict = utils.toDict(results); # futz around with the unique ID record, as the uniqueID's top 8 bytes are # really the "record attributes": resultsDict['record Attributes'] = (resultsDict['UniqueID'] & 0xFF000000) >> 24; resultsDict['UniqueID'] = resultsDict['UniqueID'] & 0x00FFFFFF; # store into the records dict records[resultsDict['UniqueID']] = resultsDict; return records; def parseHeader(self): headerfmt = '>32shhIIIIII4s4sIIH' headerlen = calcsize(headerfmt) fields = [ "name", "attributes", "version", "created", "modified", "backup", "modnum", "appInfoId", "sortInfoID", "type", "creator", "uniqueIDseed", "nextRecordListID", "number of records" ] # unpack header, zip up into list of tuples results = zip(fields, unpack(headerfmt, self.contents[self.offset:self.offset+headerlen])) # increment offset into file self.offset += headerlen # convert tuple array to dictionary resultsDict = utils.toDict(results); return resultsDict def readRecord0(self): palmdocHeader = self.parsePalmDOCHeader(); MobiHeader = self.parseMobiHeader(); exthHeader = None if MobiHeader['Has EXTH Header']: exthHeader = self.parseEXTHHeader(); self.config = { 'palmdoc': palmdocHeader, 'mobi' : MobiHeader, 'exth' : exthHeader } def parseEXTHHeader(self): headerfmt = '>III' headerlen = calcsize(headerfmt) fields = [ 'identifier', 'header length', 'record Count' ] # unpack header, zip up into list of tuples results = zip(fields, unpack(headerfmt, self.contents[self.offset:self.offset+headerlen])) # convert tuple array to dictionary resultsDict = utils.toDict(results); self.offset += headerlen; resultsDict['records'] = {}; for record in range(resultsDict['record Count']): recordType, recordLen = unpack(">II", self.contents[self.offset:self.offset+8]); recordData = self.contents[self.offset+8:self.offset+recordLen]; resultsDict['records'][recordType] = recordData; self.offset += recordLen; return resultsDict; def parseMobiHeader(self): headerfmt = '> IIII II 40s III IIIII IIII I 36s IIII 8s HHIIIII' headerlen = calcsize(headerfmt) fields = [ "identifier", "header length", "Mobi type", "text Encoding", "Unique-ID", "Generator version", "-Reserved", "First Non-book index", "Full Name Offset", "Full Name Length", "Language", "Input Language", "Output Language", "Format version", "First Image index", "First Huff Record", "Huff Record Count", "First DATP Record", "DATP Record Count", "EXTH flags", "-36 unknown bytes, if Mobi is long enough", "DRM Offset", "DRM Count", "DRM Size", "DRM Flags", "-Usually Zeros, unknown 8 bytes", "-Unknown", "Last Image Record", "-Unknown", "FCIS record", "-Unknown", "FLIS record", "Unknown" ] # unpack header, zip up into list of tuples results = zip(fields, unpack(headerfmt, self.contents[self.offset:self.offset+headerlen])) # convert tuple array to dictionary resultsDict = utils.toDict(results); resultsDict['Start Offset'] = self.offset; resultsDict['Full Name'] = (self.contents[ self.records[0]['record Data Offset'] + resultsDict['Full Name Offset'] : self.records[0]['record Data Offset'] + resultsDict['Full Name Offset'] + resultsDict['Full Name Length']]) resultsDict['Has DRM'] = resultsDict['DRM Offset'] != 0xFFFFFFFF; resultsDict['Has EXTH Header'] = (resultsDict['EXTH flags'] & 0x40) != 0; self.offset += resultsDict['header length']; def onebits(x, width=16): return len(list(filter(lambda x: x == "1", (str((x>>i)&1) for i in range(width-1, -1, -1))))); resultsDict['extra bytes'] = 2*onebits(unpack(">H", self.contents[self.offset-2:self.offset])[0] & 0xFFFE) return resultsDict; def parsePalmDOCHeader(self): headerfmt = '>HHIHHHH' headerlen = calcsize(headerfmt) fields = [ "Compression", "Unused", "text length", "record count", "record size", "Encryption Type", "Unknown" ] offset = self.records[0]['record Data Offset']; # create tuple with info results = zip(fields, unpack(headerfmt, self.contents[offset:offset+headerlen])) # convert tuple array to dictionary resultsDict = utils.toDict(results); self.offset = offset+headerlen; return resultsDict class MobiTests(unittest.TestCase): def setUp(self): self.mobitest = Mobi("../test/病者生存.mobi"); def testParse(self): self.mobitest.parse(); pprint (self.mobitest.config) def testRead(self): self.mobitest.parse(); content = "" for i in range(1,5): content += self.mobitest.readRecord(i); def testImage(self): self.mobitest.parse(); pprint (self.mobitest.records); for record in range(4): f = open("imagerecord%d.jpg" % record, 'w') f.write(self.mobitest.readImageRecord(record)); f.close(); def testAuthorTitle(self): self.mobitest.parse() self.assertEqual(self.mobitest.author(), 'Charles Darwin') self.assertEqual(self.mobitest.title(), 'The Origin of Species by means '+ 'of Natural Selection, 6th Edition') if __name__ == '__main__': unittest.main()
28.247387
179
0.634883
[ "Apache-2.0" ]
cloudylan/dbooklib
dbookbee/mobi/__init__.py
8,115
Python
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class SecurityRulesOperations: """SecurityRulesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2018_07_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _delete_initial( self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-07-01" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'), 'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore async def begin_delete( self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs ) -> AsyncLROPoller[None]: """Deletes the specified network security rule. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_security_group_name: The name of the network security group. :type network_security_group_name: str :param security_rule_name: The name of the security rule. :type security_rule_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, network_security_group_name=network_security_group_name, security_rule_name=security_rule_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore async def get( self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs ) -> "models.SecurityRule": """Get the specified network security rule. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_security_group_name: The name of the network security group. :type network_security_group_name: str :param security_rule_name: The name of the security rule. :type security_rule_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: SecurityRule, or the result of cls(response) :rtype: ~azure.mgmt.network.v2018_07_01.models.SecurityRule :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-07-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'), 'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('SecurityRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, security_rule_parameters: "models.SecurityRule", **kwargs ) -> "models.SecurityRule": cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-07-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'), 'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(security_rule_parameters, 'SecurityRule') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('SecurityRule', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('SecurityRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, security_rule_parameters: "models.SecurityRule", **kwargs ) -> AsyncLROPoller["models.SecurityRule"]: """Creates or updates a security rule in the specified network security group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_security_group_name: The name of the network security group. :type network_security_group_name: str :param security_rule_name: The name of the security rule. :type security_rule_name: str :param security_rule_parameters: Parameters supplied to the create or update network security rule operation. :type security_rule_parameters: ~azure.mgmt.network.v2018_07_01.models.SecurityRule :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either SecurityRule or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.SecurityRule] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, network_security_group_name=network_security_group_name, security_rule_name=security_rule_name, security_rule_parameters=security_rule_parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('SecurityRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore def list( self, resource_group_name: str, network_security_group_name: str, **kwargs ) -> AsyncIterable["models.SecurityRuleListResult"]: """Gets all security rules in a network security group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_security_group_name: The name of the network security group. :type network_security_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SecurityRuleListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.SecurityRuleListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRuleListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-07-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('SecurityRuleListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
50.288462
244
0.678967
[ "MIT" ]
Co0olboi/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_security_rules_operations.py
20,920
Python
from .activity import Activity from .dataset import Dataset from .utils import from_file
18
30
0.822222
[ "MIT" ]
b3by/pymudata
pymudata/__init__.py
90
Python