code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def _val_from_env(self, env, attr): val = os.environ[env] if attr in ('rules', 'exclude_rules'): return self._rules_from_env(val) elif attr == 'priority': return dict(self._priority_from_env(val)) elif attr in ('wait_command', 'history_limit', 'wait_slow_command', 'num_close_matches'): return int(val) elif attr in ('require_confirmation', 'no_colors', 'debug', 'alter_history', 'instant_mode'): return val.lower() == 'true' elif attr == 'slow_commands': return val.split(':') else: return val
Transforms env-strings to python.
def parse(self, tokenized): table, trees = _parse(tokenized, self.grammar) if all(r.lhs != self.start for r in table[(0, len(tokenized) - 1)]): raise ParseError('Parsing failed.') parse = trees[(0, len(tokenized) - 1)][self.start] return self._to_tree(revert_cnf(parse))
Parses input, which is a list of tokens.
def set_bios_configuration(irmc_info, settings): bios_config_data = { 'Server': { 'SystemConfig': { 'BiosConfig': {} } } } versions = elcm_profile_get_versions(irmc_info) server_version = versions['Server'].get('@Version') bios_version = \ versions['Server']['SystemConfig']['BiosConfig'].get('@Version') if server_version: bios_config_data['Server']['@Version'] = server_version if bios_version: bios_config_data['Server']['SystemConfig']['BiosConfig']['@Version'] = \ bios_version configs = {} for setting_param in settings: setting_name = setting_param.get("name") setting_value = setting_param.get("value") if isinstance(setting_value, six.string_types): if setting_value.lower() == "true": setting_value = True elif setting_value.lower() == "false": setting_value = False try: type_config, config = BIOS_CONFIGURATION_DICTIONARY[ setting_name].split("_") if type_config in configs.keys(): configs[type_config][config] = setting_value else: configs.update({type_config: {config: setting_value}}) except KeyError: raise BiosConfigNotFound("Invalid BIOS setting: %s" % setting_param) bios_config_data['Server']['SystemConfig']['BiosConfig'].update(configs) restore_bios_config(irmc_info, bios_config_data)
Set BIOS configurations on the server. :param irmc_info: node info :param settings: Dictionary containing the BIOS configuration. :raise: BiosConfigNotFound, if there is wrong settings for bios configuration.
def addFailure(self, test, err): super().addFailure(test, err) self.test_info(test) self._call_test_results('addFailure', test, err)
registers a test as failure :param test: test to register :param err: error the test gave
def create(self, archive, interval=None, **import_args): archive = archive.lower() if hasattr(archive, "lower") else archive if self.is_sync_table(archive, interval, **import_args): manager = SyncTableJobManager(self.client) else: manager = FileImportJobManager(self.client) import_job = manager.create(archive) if interval is None \ else manager.create(archive, interval) import_job.run(**import_args) if import_job.get_id() is None: raise CartoException(_("Import API returned corrupt job details \ when creating dataset")) import_job.refresh() count = 0 while import_job.state in ("enqueued", "queued", "pending", "uploading", "unpacking", "importing", "guessing") \ or (isinstance(manager, SyncTableJobManager) and import_job.state == "created"): if count >= MAX_NUMBER_OF_RETRIES: raise CartoException(_("Maximum number of retries exceeded \ when polling the import API for \ dataset creation")) time.sleep(INTERVAL_BETWEEN_RETRIES_S) import_job.refresh() count += 1 if import_job.state == "failure": raise CartoException(_("Dataset creation was not successful \ because of failed import (error: {error}") .format(error=json.dumps( import_job.get_error_text))) if (import_job.state != "complete" and import_job.state != "created" and import_job.state != "success") \ or import_job.success is False: raise CartoException(_("Dataset creation was not successful \ because of unknown import error")) if hasattr(import_job, "visualization_id") \ and import_job.visualization_id is not None: visualization_id = import_job.visualization_id else: table = TableManager(self.client).get(import_job.table_id) visualization_id = table.table_visualization.get_id() \ if table is not None else None try: return self.get(visualization_id) if visualization_id is not None \ else None except AttributeError: raise CartoException(_("Dataset creation was not successful \ because of unknown error"))
Creating a table means uploading a file or setting up a sync table :param archive: URL to the file (both remote URLs or local paths are supported) or StringIO object :param interval: Interval in seconds. If not None, CARTO will try to set up a sync table against the (remote) URL :param import_args: Arguments to be sent to the import job when run :type archive: str :type interval: int :type import_args: kwargs :return: New dataset object :rtype: Dataset :raise: CartoException
def create_optparser(progname=None): from . import report parser = ArgumentParser(prog=progname) parser.add_argument("dist", nargs=2, help="distributions to compare") add_general_optgroup(parser) add_distdiff_optgroup(parser) add_jardiff_optgroup(parser) add_classdiff_optgroup(parser) report.add_general_report_optgroup(parser) report.add_json_report_optgroup(parser) report.add_html_report_optgroup(parser) return parser
an OptionParser instance filled with options and groups appropriate for use with the distdiff command
def _build_grid_search_commands(script: str, params: typing.Iterable[str]) -> typing.Iterable[typing.List[str]]: param_space = OrderedDict() for arg in params: assert '=' in arg name = arg[:arg.index('=')] options = arg[arg.index('=') + 1:] options = ast.literal_eval(options) assert isinstance(options, list), options param_space[name] = options param_names = param_space.keys() commands = [] for values in itertools.product(*[param_space[name] for name in param_names]): command = str(script).split() for name, value in zip(param_names, values): command.append(str(name) + '="' + str(value) + '"') commands.append(command) return commands
Build all grid search parameter configurations. :param script: String of command prefix, e.g. ``cxflow train -v -o log``. :param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'`` or ``'text_param=["hello", "cio"]'``.
def revive(self, timeout=None): if timeout is not None: self.timeout = timeout self.revive_event.set()
Revive the timeout. :param timeout: If not `None`, specifies a new timeout value to use.
def detect_terminal(_environ=os.environ): if _environ.get('TMUX'): return 'tmux' elif subdict_by_key_prefix(_environ, 'BYOBU'): return 'byobu' elif _environ.get('TERM').startswith('screen'): return _environ['TERM'] elif _environ.get('COLORTERM'): return _environ['COLORTERM'] else: return _environ.get('TERM')
Detect "terminal" you are using. First, this function checks if you are in tmux, byobu, or screen. If not it uses $COLORTERM [#]_ if defined and fallbacks to $TERM. .. [#] So, if you are in Gnome Terminal you have "gnome-terminal" instead of "xterm-color"".
def exit_code(self, code): if code is not None and code in [0, 1, 3]: self._exit_code = code else: self.log.warning(u'Invalid exit code')
Set the App exit code. For TC Exchange Apps there are 3 supported exit codes. * 0 indicates a normal exit * 1 indicates a failure during execution * 3 indicates a partial failure Args: code (integer): The exit code value for the app.
def create(cls, arg): obj = cls() obj._connection = arg.connection obj._heading = arg.heading.make_subquery_heading() obj._arg = arg return obj
construct a subquery from arg
def product_data_request(self): msg = StandardSend(self._address, COMMAND_PRODUCT_DATA_REQUEST_0X03_0X00) self._send_msg(msg)
Request product data from a device. Not supported by all devices. Required after 01-Feb-2007.
def _getRegisteredExecutable(exeName): registered = None if sys.platform.startswith('win'): if os.path.splitext(exeName)[1].lower() != '.exe': exeName += '.exe' import _winreg try: key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\ exeName value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key) registered = (value, "from HKLM\\"+key) except _winreg.error: pass if registered and not os.path.exists(registered[0]): registered = None return registered
Windows allow application paths to be registered in the registry.
def validate_sum(parameter_container, validation_message, **kwargs): parameters = parameter_container.get_parameters(False) values = [] for parameter in parameters: if parameter.selected_option_type() in [SINGLE_DYNAMIC, STATIC]: values.append(parameter.value) sum_threshold = kwargs.get('max', 1) if None in values: clean_value = [x for x in values if x is not None] values.remove(None) if sum(clean_value) > sum_threshold: return { 'valid': False, 'message': validation_message } else: if sum(values) > sum_threshold: return { 'valid': False, 'message': validation_message } return { 'valid': True, 'message': '' }
Validate the sum of parameter value's. :param parameter_container: The container that use this validator. :type parameter_container: ParameterContainer :param validation_message: The message if there is validation error. :type validation_message: str :param kwargs: Keywords Argument. :type kwargs: dict :returns: Dictionary of valid and message. :rtype: dict Note: The code is not the best I wrote, since there are two alternatives. 1. If there is no None, the sum must be equal to 1 2. If there is no None, the sum must be less than 1
def init_celery(project_name): os.environ.setdefault('DJANGO_SETTINGS_MODULE', '%s.settings' % project_name) app = Celery(project_name) app.config_from_object('django.conf:settings') app.autodiscover_tasks(settings.INSTALLED_APPS, related_name='tasks') return app
init celery app without the need of redundant code
def price(self, minimum: float = 10.00, maximum: float = 1000.00) -> str: price = self.random.uniform(minimum, maximum, precision=2) return '{0} {1}'.format(price, self.currency_symbol())
Generate a random price. :param minimum: Max value of price. :param maximum: Min value of price. :return: Price.
def quick_response(self, status_code): translator = Translator(environ=self.environ) if status_code == 404: self.status(404) self.message(translator.trans('http_messages.404')) elif status_code == 401: self.status(401) self.message(translator.trans('http_messages.401')) elif status_code == 400: self.status(400) self.message(translator.trans('http_messages.400')) elif status_code == 200: self.status(200) self.message(translator.trans('http_messages.200'))
Quickly construct response using a status code
def write_long_at(self, n, pos, pack_into=Struct('>I').pack_into): if 0 <= n <= 0xFFFFFFFF: pack_into(self._output_buffer, pos, n) else: raise ValueError('Long %d out of range 0..0xFFFFFFFF', n) return self
Write an unsigned 32bit value at a specific position in the buffer. Used for writing tables and frames.
def mw(self): counter = collections.Counter(self.seq.lower()) mw_a = counter['a'] * 313.2 mw_t = counter['t'] * 304.2 mw_g = counter['g'] * 289.2 mw_c = counter['c'] * 329.2 mw_u = counter['u'] * 306.2 if self.material == 'dna': return mw_a + mw_t + mw_g + mw_c + 79.0 else: return mw_a + mw_u + mw_g + mw_c + 159.0
Calculate the molecular weight. :returns: The molecular weight of the current sequence in amu. :rtype: float
def autodiscover(): import os import sys import copy from django.utils.module_loading import module_has_submodule from modeltranslation.translator import translator from modeltranslation.settings import TRANSLATION_FILES, DEBUG from importlib import import_module from django.apps import apps mods = [(app_config.name, app_config.module) for app_config in apps.get_app_configs()] for (app, mod) in mods: module = '%s.translation' % app before_import_registry = copy.copy(translator._registry) try: import_module(module) except: translator._registry = before_import_registry if module_has_submodule(mod, 'translation'): raise for module in TRANSLATION_FILES: import_module(module) if DEBUG: try: if sys.argv[1] in ('runserver', 'runserver_plus'): models = translator.get_registered_models() names = ', '.join(m.__name__ for m in models) print('modeltranslation: Registered %d models for translation' ' (%s) [pid: %d].' % (len(models), names, os.getpid())) except IndexError: pass
Auto-discover INSTALLED_APPS translation.py modules and fail silently when not present. This forces an import on them to register. Also import explicit modules.
def TransposeTable(table): transposed = [] rows = len(table) cols = max(len(row) for row in table) for x in range(cols): transposed.append([]) for y in range(rows): if x < len(table[y]): transposed[x].append(table[y][x]) else: transposed[x].append(None) return transposed
Transpose a list of lists, using None to extend all input lists to the same length. For example: >>> TransposeTable( [ [11, 12, 13], [21, 22], [31, 32, 33, 34]]) [ [11, 21, 31], [12, 22, 32], [13, None, 33], [None, None, 34]]
async def render( text: TransText, request: Optional['Request'], multi_line=False) -> Union[Text, List[Text]]: if isinstance(text, str): out = [text] elif isinstance(text, StringToTranslate): out = await text.render_list(request) else: raise TypeError('Provided text cannot be rendered') if multi_line: return out else: return ' '.join(out)
Render either a normal string either a string to translate into an actual string for the specified request.
def send_webhook(config, payload): try: response = requests.post( config['webhook_url'], data=json.dumps(payload, cls=ModelJSONEncoder), headers={config['api_key_header_name']: config['api_key']}, ) except Exception as e: logger.warning('Unable to send webhook: ({1}) {2}'.format( e.__class__.__name__, e.message, )) else: logger.debug('Webhook response: ({0}) {1}'.format( response.status_code, response.text, ))
Sends a HTTP request to the configured server. All exceptions are suppressed but emit a warning message in the log.
def cipher(self): cipher = Cipher(*self.mode().aes_args(), **self.mode().aes_kwargs()) return WAES.WAESCipher(cipher)
Generate AES-cipher :return: Crypto.Cipher.AES.AESCipher
def GetSubFields(self, fd, field_names): if isinstance(fd, rdf_protodict.RDFValueArray): for value in fd: for res in self._GetSubField(value, field_names): yield res else: for res in self._GetSubField(fd, field_names): yield res
Gets all the subfields indicated by field_names. This resolves specifications like "Users.special_folders.app_data" where for each entry in the Users protobuf the corresponding app_data folder entry should be returned. Args: fd: The base RDFValue or Array. field_names: A list of strings indicating which subfields to get. Yields: All the subfields matching the field_names specification.
def _createCrossSection(self, linkResult, replaceParamFile): header = linkResult['header'] link = StreamLink(linkNumber=int(header['link']), type=header['xSecType'], numElements=header['nodes'], dx=vrp(header['dx'], replaceParamFile), erode=header['erode'], subsurface=header['subsurface']) link.channelInputFile = self xSection = linkResult['xSection'] if 'TRAPEZOID' in link.type or 'TRAP' in link.type: trapezoidCS = TrapezoidalCS(mannings_n=vrp(xSection['mannings_n'], replaceParamFile), bottomWidth=vrp(xSection['bottom_width'], replaceParamFile), bankfullDepth=vrp(xSection['bankfull_depth'], replaceParamFile), sideSlope=vrp(xSection['side_slope'], replaceParamFile), mRiver=vrp(xSection['m_river'], replaceParamFile), kRiver=vrp(xSection['k_river'], replaceParamFile), erode=xSection['erode'], subsurface=xSection['subsurface'], maxErosion=vrp(xSection['max_erosion'], replaceParamFile)) trapezoidCS.streamLink = link elif 'BREAKPOINT' in link.type: breakpointCS = BreakpointCS(mannings_n=vrp(xSection['mannings_n'], replaceParamFile), numPairs=xSection['npairs'], numInterp=vrp(xSection['num_interp'], replaceParamFile), mRiver=vrp(xSection['m_river'], replaceParamFile), kRiver=vrp(xSection['k_river'], replaceParamFile), erode=xSection['erode'], subsurface=xSection['subsurface'], maxErosion=vrp(xSection['max_erosion'], replaceParamFile)) breakpointCS.streamLink = link for b in xSection['breakpoints']: breakpoint = Breakpoint(x=b['x'], y=b['y']) breakpoint.crossSection = breakpointCS for n in linkResult['nodes']: node = StreamNode(nodeNumber=int(n['node']), x=n['x'], y=n['y'], elevation=n['elev']) node.streamLink = link return link
Create GSSHAPY Cross Section Objects Method
async def officers(self, root): officers = sorted( root.find('OFFICERS'), key=lambda elem: int(elem.find('ORDER').text) ) return [Officer(elem) for elem in officers]
Regional Officers. Does not include the Founder or the Delegate, unless they have additional titles as Officers. In the correct order. Returns ------- an :class:`ApiQuery` of a list of :class:`Officer`
def _conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False): logger.debug("Conditional: InducingPoints -- Mok") Kmm = Kuu(feat, kern, jitter=settings.numerics.jitter_level) Kmn = Kuf(feat, kern, Xnew) Knn = kern.K(Xnew, full_output_cov=full_output_cov) if full_cov \ else kern.Kdiag(Xnew, full_output_cov=full_output_cov) M, L, N, K = [tf.shape(Kmn)[i] for i in range(Kmn.shape.ndims)] Kmm = tf.reshape(Kmm, (M * L, M * L)) if full_cov == full_output_cov: Kmn = tf.reshape(Kmn, (M * L, N * K)) Knn = tf.reshape(Knn, (N * K, N * K)) if full_cov else tf.reshape(Knn, (N * K,)) fmean, fvar = base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white) fmean = tf.reshape(fmean, (N, K)) fvar = tf.reshape(fvar, (N, K, N, K) if full_cov else (N, K)) else: Kmn = tf.reshape(Kmn, (M * L, N, K)) fmean, fvar = fully_correlated_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, full_output_cov=full_output_cov, q_sqrt=q_sqrt, white=white) return fmean, fvar
Multi-output GP with fully correlated inducing variables. The inducing variables are shaped in the same way as evaluations of K, to allow a default inducing point scheme for multi-output kernels. The covariance matrices used to calculate the conditional have the following shape: - Kuu: M x L x M x L - Kuf: M x L x N x P - Kff: N x P x N x P, N x P x P, N x P Further reference ----------------- - See `gpflow.conditionals._conditional` for a detailed explanation of conditional in the single-output case. - See the multiouput notebook for more information about the multiouput framework. Parameters ---------- :param f: variational mean, ML x 1 :param q_sqrt: standard-deviations or cholesky, ML x 1 or 1 x ML x ML
def start(self): if self._started: raise RuntimeError("thread already started") def run(): try: self.run(*self._args, **self._kwargs) except SystemExit: pass finally: self._deactivate() self._glet = scheduler.greenlet(run) self._ident = id(self._glet) scheduler.schedule(self._glet) self._activate()
schedule to start the greenlet that runs this thread's function :raises: `RuntimeError` if the thread has already been started
def astype(self, out_dtype): out_dtype = np.dtype(out_dtype) if out_dtype == self.out_dtype: return self real_dtype = getattr(self, 'real_out_dtype', None) if real_dtype is None: return self._astype(out_dtype) else: if out_dtype == real_dtype: if self.__real_space is None: self.__real_space = self._astype(out_dtype) return self.__real_space elif out_dtype == self.complex_out_dtype: if self.__complex_space is None: self.__complex_space = self._astype(out_dtype) return self.__complex_space else: return self._astype(out_dtype)
Return a copy of this space with new ``out_dtype``. Parameters ---------- out_dtype : Output data type of the returned space. Can be given in any way `numpy.dtype` understands, e.g. as string (``'complex64'``) or built-in type (``complex``). ``None`` is interpreted as ``'float64'``. Returns ------- newspace : `FunctionSpace` The version of this space with given data type
def update(key, value): store = load() store[key] = value dump(store) return True
Update a key with a value in the minion datastore CLI Example: .. code-block:: bash salt '*' data.update <key> <value>
def dfs_edges(G, start, depth_limit=1, get_only=True, get_path=False): depth_limit = depth_limit - 1 output_nodes = array('L') output_depth = array('I') output_weights = array('f') apath = [] if G.node.get(start) is None: print('Start node not found') return output_nodes, output_weights, output_depth, apath visited = set() visited.add(start) stack = [(start, G.edges_iter(start, data=True), 1.0)] visited.add(start) while stack: if len(output_nodes) > 80100100: print("To many nodes for: {}".format(start)) del output_nodes del output_weights del output_depth output_nodes = array('L') output_depth = array('I') output_weights = array('f') gc.collect() break parent, children, weight = stack[-1] try: parent_, child, child_keys = next(children) if child not in visited: weight = child_keys.get('weight', 1.0) * weight visited.add(child) if len(stack) >= depth_limit or weight <= 0.00001: visited.remove(child) else: stack.append((child, G.edges_iter(child, data=True), weight)) if get_only and child > 100000000000: continue output_nodes.append(child) output_weights.append(weight) output_depth.append(len(stack)) if get_path: apath.append([step[0] for step in stack]) except StopIteration: stack.pop() visited.remove(parent) return output_nodes, output_weights, output_depth, apath
Deepest first search.
def get_params(self): return odict([(key,param.value) for key,param in self.params.items()])
Get an odict of the parameter names and values
def CheckFile(self, filename): result = True artifact_reader = reader.YamlArtifactsReader() try: for artifact_definition in artifact_reader.ReadFile(filename): try: self._artifact_registry.RegisterDefinition(artifact_definition) except KeyError: logging.warning( 'Duplicate artifact definition: {0:s} in file: {1:s}'.format( artifact_definition.name, filename)) result = False artifact_definition_supports_macos = ( definitions.SUPPORTED_OS_DARWIN in ( artifact_definition.supported_os)) artifact_definition_supports_windows = ( definitions.SUPPORTED_OS_WINDOWS in ( artifact_definition.supported_os)) for source in artifact_definition.sources: if source.type_indicator in ( definitions.TYPE_INDICATOR_FILE, definitions.TYPE_INDICATOR_PATH): if (definitions.SUPPORTED_OS_DARWIN in source.supported_os or ( artifact_definition_supports_macos and not source.supported_os)): if not self._CheckMacOSPaths( filename, artifact_definition, source, source.paths): result = False elif (artifact_definition_supports_windows or definitions.SUPPORTED_OS_WINDOWS in source.supported_os): for path in source.paths: if not self._CheckWindowsPath( filename, artifact_definition, source, path): result = False elif source.type_indicator == ( definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY): if (filename != self.LEGACY_PATH and self._HasDuplicateRegistryKeyPaths( filename, artifact_definition, source)): result = False for key_path in source.keys: if not self._CheckWindowsRegistryKeyPath( filename, artifact_definition, key_path): result = False elif source.type_indicator == ( definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE): for key_value_pair in source.key_value_pairs: if not self._CheckWindowsRegistryKeyPath( filename, artifact_definition, key_value_pair['key']): result = False except errors.FormatError as exception: logging.warning( 'Unable to validate file: {0:s} with error: {1!s}'.format( filename, exception)) result = False return result
Validates the artifacts definition in a specific file. Args: filename (str): name of the artifacts definition file. Returns: bool: True if the file contains valid artifacts definitions.
def create_asset_delivery_policy(access_token, ams_account, key_delivery_url): path = '/AssetDeliveryPolicies' endpoint = ''.join([ams_rest_endpoint, path]) body = '{ \ "Name":"AssetDeliveryPolicy", \ "AssetDeliveryProtocol":"4", \ "AssetDeliveryPolicyType":"3", \ "AssetDeliveryConfiguration":"[{ \ \\"Key\\":\\"2\\", \ \\"Value\\":\\"' + key_delivery_url + '\\"}]" \ }' return do_ams_post(endpoint, path, body, access_token)
Create Media Service Asset Delivery Policy. Args: access_token (str): A valid Azure authentication token. ams_account (str): Media Service Account. Returns: HTTP response. JSON body.
def search(self, account_name): accounts = self.list() for a in accounts: if a['name'] == account_name: return a['id']
Get a list of all the Accounts for the current user and return the ID of the one with the specified name.
def check_stripe_api_version(app_configs=None, **kwargs): from . import settings as djstripe_settings messages = [] default_version = djstripe_settings.DEFAULT_STRIPE_API_VERSION version = djstripe_settings.get_stripe_api_version() if not validate_stripe_api_version(version): msg = "Invalid Stripe API version: {}".format(version) hint = "STRIPE_API_VERSION should be formatted as: YYYY-MM-DD" messages.append(checks.Critical(msg, hint=hint, id="djstripe.C004")) if version != default_version: msg = ( "The Stripe API version has a non-default value of '{}'. " "Non-default versions are not explicitly supported, and may " "cause compatibility issues.".format(version) ) hint = "Use the dj-stripe default for Stripe API version: {}".format(default_version) messages.append(checks.Warning(msg, hint=hint, id="djstripe.W001")) return messages
Check the user has configured API version correctly.
def intersection(self, *others): r result = self.__copy__() _elements = result._elements _total = result._total for other in map(self._as_mapping, others): for element, multiplicity in list(_elements.items()): new_multiplicity = other.get(element, 0) if new_multiplicity < multiplicity: if new_multiplicity > 0: _elements[element] = new_multiplicity _total -= multiplicity - new_multiplicity else: del _elements[element] _total -= multiplicity result._total = _total return result
r"""Return a new multiset with elements common to the multiset and all others. >>> ms = Multiset('aab') >>> sorted(ms.intersection('abc')) ['a', 'b'] You can also use the ``&`` operator for the same effect. However, the operator version will only accept a set as other operator, not any iterable, to avoid errors. >>> ms = Multiset('aab') >>> sorted(ms & Multiset('aaac')) ['a', 'a'] For a variant of the operation which modifies the multiset in place see :meth:`intersection_update`. Args: others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T] or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T]. Returns: The multiset resulting from the intersection of the sets.
def _get_event_source_obj(awsclient, evt_source): event_source_map = { 'dynamodb': event_source.dynamodb_stream.DynamoDBStreamEventSource, 'kinesis': event_source.kinesis.KinesisEventSource, 's3': event_source.s3.S3EventSource, 'sns': event_source.sns.SNSEventSource, 'events': event_source.cloudwatch.CloudWatchEventSource, 'cloudfront': event_source.cloudfront.CloudFrontEventSource, 'cloudwatch_logs': event_source.cloudwatch_logs.CloudWatchLogsEventSource, } evt_type = _get_event_type(evt_source) event_source_func = event_source_map.get(evt_type, None) if not event_source: raise ValueError('Unknown event source: {0}'.format( evt_source['arn'])) return event_source_func(awsclient, evt_source)
Given awsclient, event_source dictionary item create an event_source object of the appropriate event type to schedule this event, and return the object.
def sum_distances(self, indices, distance_matrix): combs_tup = np.array(tuple(combinations(indices, 2))) combs = np.array([[i[0] for i in combs_tup], [i[1] for i in combs_tup]]) dist = np.sqrt( np.sum(np.square(distance_matrix[combs[0], combs[1]]), axis=0)) return dist
Calculate combinatorial distance between a select group of trajectories, indicated by indices Arguments --------- indices : tuple distance_matrix : numpy.ndarray (M,M) Returns ------- numpy.ndarray Notes ----- This function can perhaps be quickened by calculating the sum of the distances. The calculated distances, as they are right now, are only used in a relative way. Purely summing distances would lead to the same result, at a perhaps quicker rate.
def fetch(self, addon_id, data={}, **kwargs): return super(Addon, self).fetch(addon_id, data, **kwargs)
Fetch addon for given Id Args: addon_id : Id for which addon object has to be retrieved Returns: addon dict for given subscription Id
def fs_put(self, rpath, data): try: self.begin() self.file_put_contents(rpath, data) manifest = self.read_local_manifest() manifest['files'][rpath] = self.get_single_file_info(rpath) self.write_local_manifest(manifest) self.commit() except: self.rollback(); raise
Add a file to the FS
def mask(in_file, mask_file, new_name): import nibabel as nb import os in_nii = nb.load(in_file) mask_nii = nb.load(mask_file) data = in_nii.get_data() data[mask_nii.get_data() == 0] = 0 new_nii = nb.Nifti1Image(data, in_nii.affine, in_nii.header) new_nii.to_filename(new_name) return os.path.abspath(new_name)
Apply a binary mask to an image. Parameters ---------- in_file : str Path to a NIfTI file to mask mask_file : str Path to a binary mask new_name : str Path/filename for the masked output image. Returns ------- str Absolute path of the masked output image. Notes ----- in_file and mask_file must be in the same image space and have the same dimensions.
def assign_value(self, comp_def, value, src_ref): super().assign_value(comp_def, value, src_ref) comp_def.properties['incrthreshold'] = value
Set both alias and actual value
def _overrideMasterSettings(self): cod = self._getGuiSettings() self._appName = APP_NAME self._appHelpString = tealHelpString self._useSimpleAutoClose = self._do_usac self._showExtraHelpButton = False self._saveAndCloseOnExec = cfgGetBool(cod, 'saveAndCloseOnExec', True) self._showHelpInBrowser = cfgGetBool(cod, 'showHelpInBrowser', False) self._writeProtectOnSaveAs = cfgGetBool(cod, 'writeProtectOnSaveAsOpt', True) self._flagNonDefaultVals = cfgGetBool(cod, 'flagNonDefaultVals', None) self._optFile = APP_NAME.lower()+".optionDB" ltblu = " drktl = " self._frmeColor = cod.get('frameColor', drktl) self._taskColor = cod.get('taskBoxColor', ltblu) self._bboxColor = cod.get('buttonBoxColor', ltblu) self._entsColor = cod.get('entriesColor', ltblu) self._flagColor = cod.get('flaggedColor', 'brown') if self._canExecute and self._taskParsObj: self._canExecute = self._taskParsObj.canExecute() self._showExecuteButton = self._canExecute hhh = self.getHelpString(self.pkgName+'.'+self.taskName) if hhh: hhh = hhh.lower() if hhh.find('<html') >= 0 or hhh.find('</html>') > 0: self._knowTaskHelpIsHtml = True elif hhh.startswith('http:') or hhh.startswith('https:'): self._knowTaskHelpIsHtml = True elif hhh.startswith('file:') and \ (hhh.endswith('.htm') or hhh.endswith('.html')): self._knowTaskHelpIsHtml = True
Override so that we can run in a different mode.
def write(self, content): with io.open(self.target, 'w', encoding='utf-8') as fp: fp.write(content) if not content.endswith(u'\n'): fp.write(u'\n')
Save content on disk
def update(self, global_size=None, lower_extent=None, upper_extent=None, description=None): if global_size is not None: self._global_size = global_size if lower_extent is not None: self._lower_extent = lower_extent if upper_extent is not None: self._upper_extent = upper_extent if description is not None: self._description = description self.validate()
Update the dimension properties Parameters ---------- global_size : int Global dimension size (Default value = None) lower_extent : int Lower dimension extent (Default value = None) upper_extent : int Upper dimension extent (Default value = None) description : str Dimension description (Default value = None)
def element_wise(self, other, op): if not isscalar(other) and not self.shape == other.shape: raise ValueError("shapes %s and %s must be equal" % (self.shape, other.shape)) if not isscalar(other) and isinstance(other, Data) and not self.mode == other.mode: raise NotImplementedError if isscalar(other): return self.map(lambda x: op(x, other)) if self.mode == 'local' and isinstance(other, ndarray): return self._constructor(op(self.values, other)).__finalize__(self) if self.mode == 'local' and isinstance(other, Data): return self._constructor(op(self.values, other.values)).__finalize__(self) if self.mode == 'spark' and isinstance(other, Data): def func(record): (k1, x), (k2, y) = record return k1, op(x, y) rdd = self.tordd().zip(other.tordd()).map(func) barray = BoltArraySpark(rdd, shape=self.shape, dtype=self.dtype, split=self.values.split) return self._constructor(barray).__finalize__(self)
Apply an elementwise operation to data. Both self and other data must have the same mode. If self is in local mode, other can also be a numpy array. Self and other must have the same shape, or other must be a scalar. Parameters ---------- other : Data or numpy array Data to apply elementwise operation to op : function Binary operator to use for elementwise operations, e.g. add, subtract
def doLayout(self, width): self.width = width font_sizes = [0] + [frag.get("fontSize", 0) for frag in self] self.fontSize = max(font_sizes) self.height = self.lineHeight = max(frag * self.LINEHEIGHT for frag in font_sizes) y = (self.lineHeight - self.fontSize) for frag in self: frag["y"] = y return self.height
Align words in previous line.
def run(command, options, args): if command == "backend": subprocess.call(("sqlite3", db_path)) if command == "add": dp = pdt.Calendar() due = mktime(dp.parse(options.due)[0]) if options.due else None print "added tasks..." [Task(desc, due).add() for desc in args] return filters = args if len(args) else None rows = Query(filters, options).find() tasks = [Task(r["desc"], r["due"]) for r in rows] if command == "list": for t in tasks: print "\t *", t if command == "done": print "done with..." finished_tasks = [] for t in tasks: finished = t.done() if finished: finished_tasks.append(t) if not finished_tasks: return print "" print "finished tasks:" for t in finished_tasks: print "\t X", t if command == "remove": print "remove..." removed_tasks = [] for t in tasks: removed = t.remove() if removed: removed_tasks.append(t) if not removed_tasks: return print "" print "removed tasks:" for t in removed_tasks: print "\t RM", t
Run the requested command. args is either a list of descriptions or a list of strings to filter by
def check_complete(self): logger.debug('Running check_complete for task {0}'.format(self.name)) if self.remote_not_complete() or self.local_not_complete(): self._start_check_timer() return return_code = self.completed_task() if self.terminate_sent: self.stderr += '\nDAGOBAH SENT SIGTERM TO THIS PROCESS\n' if self.kill_sent: self.stderr += '\nDAGOBAH SENT SIGKILL TO THIS PROCESS\n' if self.remote_failure: return_code = -1 self.stderr += '\nAn error occurred with the remote machine.\n' self.stdout_file = None self.stderr_file = None self._task_complete(success=True if return_code == 0 else False, return_code=return_code, stdout=self.stdout, stderr=self.stderr, start_time=self.started_at, complete_time=datetime.utcnow())
Runs completion flow for this task if it's finished.
def get_icon_by_extension(fname, scale_factor): application_icons = {} application_icons.update(BIN_FILES) application_icons.update(DOCUMENT_FILES) if osp.isdir(fname): return icon('DirOpenIcon', scale_factor) else: basename = osp.basename(fname) __, extension = osp.splitext(basename.lower()) mime_type, __ = mime.guess_type(basename) icon_by_extension = icon('FileIcon', scale_factor) if extension in OFFICE_FILES: icon_by_extension = icon(OFFICE_FILES[extension], scale_factor) if extension in LANGUAGE_ICONS: icon_by_extension = icon(LANGUAGE_ICONS[extension], scale_factor) else: if extension == '.ipynb': if is_dark_interface(): icon_by_extension = QIcon( get_image_path('notebook_dark.svg')) else: icon_by_extension = QIcon( get_image_path('notebook_light.svg')) elif mime_type is not None: try: file_type, bin_name = mime_type.split('/') except ValueError: file_type = 'text' if file_type == 'text': icon_by_extension = icon('TextFileIcon', scale_factor) elif file_type == 'audio': icon_by_extension = icon('AudioFileIcon', scale_factor) elif file_type == 'video': icon_by_extension = icon('VideoFileIcon', scale_factor) elif file_type == 'image': icon_by_extension = icon('ImageFileIcon', scale_factor) elif file_type == 'application': if bin_name in application_icons: icon_by_extension = icon( application_icons[bin_name], scale_factor) return icon_by_extension
Return the icon depending on the file extension
def pre_run_cell(self, cellno, code): self.cellid = cellno import ast if findloop(ast.parse(code)): from acorn.logging.decoration import set_streamlining set_streamlining(True) from time import time self.pre = { "m": "loop", "a": None, "s": time(), "r": None, "c": code, }
Executes before the user-entered code in `ipython` is run. This intercepts loops and other problematic code that would produce lots of database entries and streamlines it to produce only a single entry. Args: cellno (int): the cell number that is about to be executed. code (str): python source code that is about to be executed.
def eval(self, command): for cmd, response in self.response_list: if not cmd.match(command): continue if response is None: return None elif hasattr(response, '__call__'): return response(command) else: return response if self.strict: raise Exception('Undefined command: ' + repr(command)) return None
Evaluate the given string against all registered commands and return the defined response. :type command: str :param command: The command that is evaluated. :rtype: str or None :return: The response, if one was defined.
def list_service_providers(self, retrieve_all=True, **_params): return self.list('service_providers', self.service_providers_path, retrieve_all, **_params)
Fetches service providers.
def configure(): log_levels = { 5: logging.NOTSET, 4: logging.DEBUG, 3: logging.INFO, 2: logging.WARNING, 1: logging.ERROR, 0: logging.CRITICAL } logging.captureWarnings(True) root_logger = logging.getLogger() if settings.CFG["debug"]: details_format = logging.Formatter( '%(name)s (%(filename)s:%(lineno)s) [%(levelname)s] %(message)s') details_hdl = logging.StreamHandler() details_hdl.setFormatter(details_format) root_logger.addHandler(details_hdl) else: brief_format = logging.Formatter('%(message)s') console_hdl = logging.StreamHandler() console_hdl.setFormatter(brief_format) root_logger.addHandler(console_hdl) root_logger.setLevel(log_levels[int(settings.CFG["verbosity"])]) configure_plumbum_log() configure_migrate_log() configure_parse_log()
Load logging configuration from our own defaults.
def OnCardRightClick(self, event): item = event.GetItem() if item: itemdata = self.readertreepanel.cardtreectrl.GetItemPyData(item) if isinstance(itemdata, smartcard.Card.Card): self.selectedcard = itemdata if not hasattr(self, "connectID"): self.connectID = wx.NewId() self.disconnectID = wx.NewId() self.Bind(wx.EVT_MENU, self.OnConnect, id=self.connectID) self.Bind( wx.EVT_MENU, self.OnDisconnect, id=self.disconnectID) menu = wx.Menu() if not hasattr(self.selectedcard, 'connection'): menu.Append(self.connectID, "Connect") else: menu.Append(self.disconnectID, "Disconnect") self.PopupMenu(menu) menu.Destroy()
Called when user right-clicks a node in the card tree control.
def get_objective(self, sampler): def objective(params): circuit = self.get_circuit(params) circuit.make_cache() return self.get_energy(circuit, sampler) return objective
Get an objective function to be optimized.
def extract(self, item, list_article_candidate): list_description = [] for article_candidate in list_article_candidate: if article_candidate.description != None: list_description.append((article_candidate.description, article_candidate.extractor)) if len(list_description) == 0: return None list_newspaper = [x for x in list_description if x[1] == "newspaper"] if len(list_newspaper) == 0: return list_description[0][0] else: return list_newspaper[0][0]
Compares the extracted descriptions. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely description
def conj_phrase(list_, cond='or'): if len(list_) == 0: return '' elif len(list_) == 1: return list_[0] elif len(list_) == 2: return ' '.join((list_[0], cond, list_[1])) else: condstr = ''.join((', ' + cond, ' ')) return ', '.join((', '.join(list_[:-2]), condstr.join(list_[-2:])))
Joins a list of words using English conjunction rules Args: list_ (list): of strings cond (str): a conjunction (or, and, but) Returns: str: the joined cconjunction phrase References: http://en.wikipedia.org/wiki/Conjunction_(grammar) Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> list_ = ['a', 'b', 'c'] >>> result = conj_phrase(list_, 'or') >>> print(result) a, b, or c Example1: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> list_ = ['a', 'b'] >>> result = conj_phrase(list_, 'and') >>> print(result) a and b
def write_to_file(self, filename): if filename.endswith('.cml'): from molmod.io import dump_cml dump_cml(filename, [self]) elif filename.endswith('.xyz'): from molmod.io import XYZWriter symbols = [] for n in self.numbers: atom = periodic[n] if atom is None: symbols.append("X") else: symbols.append(atom.symbol) xyz_writer = XYZWriter(filename, symbols) xyz_writer.dump(self.title, self.coordinates) del xyz_writer else: raise ValueError("Could not determine file format for %s." % filename)
Write the molecular geometry to a file. The file format is inferred from the extensions. Currently supported formats are: ``*.xyz``, ``*.cml`` Argument: | ``filename`` -- a filename
def merge(d, *dicts): for d_update in dicts: if not isinstance(d, dict): raise TypeError("{0} is not a dict".format(d)) dict_merge_pair(d, d_update) return d
Recursively merges dictionaries
def system_info(host, username, password, protocol=None, port=None): service_instance = salt.utils.vmware.get_service_instance(host=host, username=username, password=password, protocol=protocol, port=port) ret = salt.utils.vmware.get_inventory(service_instance).about.__dict__ if 'apiType' in ret: if ret['apiType'] == 'HostAgent': ret = dictupdate.update(ret, salt.utils.vmware.get_hardware_grains(service_instance)) return ret
Return system information about a VMware environment. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. CLI Example: .. code-block:: bash salt '*' vsphere.system_info 1.2.3.4 root bad-password
def cspace_convert(arr, start, end): converter = cspace_converter(start, end) return converter(arr)
Converts the colors in ``arr`` from colorspace ``start`` to colorspace ``end``. :param arr: An array-like of colors. :param start, end: Any supported colorspace specifiers. See :ref:`supported-colorspaces` for details.
def get_course_completions(cls, user, course_key): user_course_completions = cls.user_course_completion_queryset(user, course_key) return cls.completion_by_block_key(user_course_completions)
Returns a dictionary mapping BlockKeys to completion values for all BlockCompletion records for the given user and course_key. Return value: dict[BlockKey] = float
def setup_config(epab_version: str): logger = logging.getLogger('EPAB') logger.debug('setting up config') elib_config.ELIBConfig.setup( app_name='EPAB', app_version=epab_version, config_file_path='pyproject.toml', config_sep_str='__', root_path=['tool', 'epab'] ) elib_config.write_example_config('pyproject.toml.example') if not pathlib.Path('pyproject.toml').exists(): raise FileNotFoundError('pyproject.toml') elib_config.validate_config()
Set up elib_config package :param epab_version: installed version of EPAB as as string
def serialize(self, now=None): created = self.created if self.created is not None else now el = etree.Element(utils.lxmlns("mets") + self.subsection, ID=self.id_string) if created: el.set("CREATED", created) status = self.get_status() if status: el.set("STATUS", status) if self.contents: el.append(self.contents.serialize()) return el
Serialize this SubSection and all children to lxml Element and return it. :param str now: Default value for CREATED if none set :return: dmdSec/techMD/rightsMD/sourceMD/digiprovMD Element with all children
def update_billing_info(self, billing_info): url = urljoin(self._url, '/billing_info') response = billing_info.http_request(url, 'PUT', billing_info, {'Content-Type': 'application/xml; charset=utf-8'}) if response.status == 200: pass elif response.status == 201: billing_info._url = response.getheader('Location') else: billing_info.raise_http_error(response) response_xml = response.read() logging.getLogger('recurly.http.response').debug(response_xml) billing_info.update_from_element(ElementTree.fromstring(response_xml))
Change this account's billing information to the given `BillingInfo`.
def intersection(source, mask): output_layer_name = intersection_steps['output_layer_name'] output_layer_name = output_layer_name % ( source.keywords['layer_purpose']) parameters = {'INPUT': source, 'OVERLAY': mask, 'OUTPUT': 'memory:'} initialize_processing() feedback = create_processing_feedback() context = create_processing_context(feedback=feedback) result = processing.run('native:intersection', parameters, context=context) if result is None: raise ProcessingInstallationError intersect = result['OUTPUT'] intersect.setName(output_layer_name) intersect.keywords = dict(source.keywords) intersect.keywords['title'] = output_layer_name intersect.keywords['layer_purpose'] = \ layer_purpose_exposure_summary['key'] intersect.keywords['inasafe_fields'] = \ dict(source.keywords['inasafe_fields']) intersect.keywords['inasafe_fields'].update( mask.keywords['inasafe_fields']) intersect.keywords['hazard_keywords'] = \ dict(mask.keywords['hazard_keywords']) intersect.keywords['exposure_keywords'] = dict(source.keywords) intersect.keywords['aggregation_keywords'] = dict( mask.keywords['aggregation_keywords']) check_layer(intersect) return intersect
Intersect two layers. Issue https://github.com/inasafe/inasafe/issues/3186 :param source: The vector layer to clip. :type source: QgsVectorLayer :param mask: The vector layer to use for clipping. :type mask: QgsVectorLayer :return: The clip vector layer. :rtype: QgsVectorLayer .. versionadded:: 4.0
def validate_custom_interpreters_list(self): custom_list = self.get_option('custom_interpreters_list') valid_custom_list = [] for value in custom_list: if (osp.isfile(value) and programs.is_python_interpreter(value) and value != get_python_executable()): valid_custom_list.append(value) self.set_option('custom_interpreters_list', valid_custom_list)
Check that the used custom interpreters are still valid.
def is_on_cooldown(self, ctx): if not self._buckets.valid: return False bucket = self._buckets.get_bucket(ctx.message) return bucket.get_tokens() == 0
Checks whether the command is currently on cooldown. Parameters ----------- ctx: :class:`.Context.` The invocation context to use when checking the commands cooldown status. Returns -------- :class:`bool` A boolean indicating if the command is on cooldown.
def _request_eip(interface, vm_): params = {'Action': 'AllocateAddress'} params['Domain'] = interface.setdefault('domain', 'vpc') eips = aws.query(params, return_root=True, location=get_location(vm_), provider=get_provider(), opts=__opts__, sigver='4') for eip in eips: if 'allocationId' in eip: return eip['allocationId'] return None
Request and return Elastic IP
def _convert_unsigned(data, fmt): num = len(data) return struct.unpack( "{}{}".format(num, fmt.upper()).encode("utf-8"), struct.pack("{}{}".format(num, fmt).encode("utf-8"), *data) )
Convert data from signed to unsigned in bulk.
def version(self): this_path = os.path.dirname(os.path.realpath(__file__)) version_file = os.path.join(this_path, 'VERSION') return open(version_file).read().strip()
Return the version number of the Lending Club Investor tool Returns ------- string The version number string
def log_url (self, url_data): self.writeln() if self.has_part('url'): self.write_url(url_data) if url_data.name and self.has_part('name'): self.write_name(url_data) if url_data.parent_url and self.has_part('parenturl'): self.write_parent(url_data) if url_data.base_ref and self.has_part('base'): self.write_base(url_data) if url_data.url and self.has_part('realurl'): self.write_real(url_data) if url_data.checktime and self.has_part('checktime'): self.write_checktime(url_data) if url_data.dltime >= 0 and self.has_part('dltime'): self.write_dltime(url_data) if url_data.size >= 0 and self.has_part('dlsize'): self.write_size(url_data) if url_data.info and self.has_part('info'): self.write_info(url_data) if url_data.modified and self.has_part('modified'): self.write_modified(url_data) if url_data.warnings and self.has_part('warning'): self.write_warning(url_data) if self.has_part('result'): self.write_result(url_data) self.flush()
Write url checking info.
def _put_json(self, instance, space=None, rel_path=None, extra_params=None, id_field=None): model = type(instance) if space is None and model not in (Space, Event): raise Exception( 'In general, `API._put_json` should always ' 'be called with a `space` argument.' ) if not extra_params: extra_params = {} if not id_field: id_field = 'number' url = '{0}/{1}/{2}/{3}.json?{4}'.format( settings.API_ROOT_PATH, settings.API_VERSION, rel_path or model.rel_path, instance[id_field], urllib.urlencode(extra_params), ) response = requests.put( url=url, data=json.dumps(instance.data), headers={ 'X-Api-Key': self.key, 'X-Api-Secret': self.secret, 'Content-type': "application/json", }, ) if response.status_code == 204: return instance else: raise Exception( 'Code {0} returned from `{1}`. Response text: "{2}".'.format( response.status_code, url, response.text ) )
Base level method for adding new data to the API
def splits(cls, exts, fields, root='.data', train='train', validation='val', test='test2016', **kwargs): if 'path' not in kwargs: expected_folder = os.path.join(root, cls.name) path = expected_folder if os.path.exists(expected_folder) else None else: path = kwargs['path'] del kwargs['path'] return super(Multi30k, cls).splits( exts, fields, path, root, train, validation, test, **kwargs)
Create dataset objects for splits of the Multi30k dataset. Arguments: exts: A tuple containing the extension to path for each language. fields: A tuple containing the fields that will be used for data in each language. root: Root dataset storage directory. Default is '.data'. train: The prefix of the train data. Default: 'train'. validation: The prefix of the validation data. Default: 'val'. test: The prefix of the test data. Default: 'test'. Remaining keyword arguments: Passed to the splits method of Dataset.
def to_geojson(self, filename): with open(filename, 'w') as fd: json.dump(self.to_record(WGS84_CRS), fd)
Save vector as geojson.
def add_link(dataset, source, target, count=1): try: node = dataset[source] values, links = node if isinstance(links, list): try: idx = links.index(target) values[idx] += count except ValueError: links.append(target) values.append(count) elif links == target: node[0] += count else: node[0] = [values, count] node[1] = [links, target] except KeyError: dataset[source] = [count, target]
Add a link. Parameters ---------- dataset : `dict` of ([`int`, `str`] or [`list` of `int`, `list` of `str`]) Dataset. source : `iterable` of `str` Link source. target : `str` Link target. count : `int`, optional Link count (default: 1).
def makeMet(segID, N, CA, C, O, geo): CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_CG_length=geo.CB_CG_length CA_CB_CG_angle=geo.CA_CB_CG_angle N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle CG_SD_length=geo.CG_SD_length CB_CG_SD_angle=geo.CB_CG_SD_angle CA_CB_CG_SD_diangle=geo.CA_CB_CG_SD_diangle SD_CE_length=geo.SD_CE_length CG_SD_CE_angle=geo.CG_SD_CE_angle CB_CG_SD_CE_diangle=geo.CB_CG_SD_CE_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle) CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") sulfur_d= calculateCoordinates(CA, CB, CG, CG_SD_length, CB_CG_SD_angle, CA_CB_CG_SD_diangle) SD= Atom("SD", sulfur_d, 0.0, 1.0, " ", " SD", 0, "S") carbon_e= calculateCoordinates(CB, CG, SD, SD_CE_length, CG_SD_CE_angle, CB_CG_SD_CE_diangle) CE= Atom("CE", carbon_e, 0.0, 1.0, " ", " CE", 0, "C") res= Residue((' ', segID, ' '), "MET", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(SD) res.add(CE) return res
Creates a Methionine residue
def parse_eep(self, rorg_func=None, rorg_type=None, direction=None, command=None): if rorg_func is not None and rorg_type is not None: self.select_eep(rorg_func, rorg_type, direction, command) provides, values = self.eep.get_values(self._profile, self._bit_data, self._bit_status) self.parsed.update(values) return list(provides)
Parse EEP based on FUNC and TYPE
def result(self): if self.cancelled or (self._fn is not None): raise NotExecutedYet() if self._fn_exc is not None: six.reraise(*self._fn_exc) else: return self._fn_res
The result from the executed task. Raises NotExecutedYet if not yet executed.
def configure_duo_behavior(self, mount_point, push_info=None, user_agent=None, username_format='%s'): params = { 'username_format': username_format, } if push_info is not None: params['push_info'] = push_info if user_agent is not None: params['user_agent'] = user_agent api_path = '/v1/auth/{mount_point}/duo/config'.format( mount_point=mount_point, ) return self._adapter.post( url=api_path, json=params, )
Configure Duo second factor behavior. This endpoint allows you to configure how the original auth method username maps to the Duo username by providing a template format string. Supported methods: POST: /auth/{mount_point}/duo/config. Produces: 204 (empty body) :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :param push_info: A string of URL-encoded key/value pairs that provides additional context about the authentication attempt in the Duo Mobile app :type push_info: str | unicode :param user_agent: User agent to connect to Duo (default "") :type user_agent: str | unicode :param username_format: Format string given auth method username as argument to create Duo username (default '%s') :type username_format: str | unicode :return: The response of the configure_duo_behavior request. :rtype: requests.Response
def get_app_perms(model_or_app_label): from django.contrib.auth.models import Permission if isinstance(model_or_app_label, string_types): app_label = model_or_app_label else: app_label = model_or_app_label._meta.app_label qs = Permission.objects.filter(content_type__app_label=app_label) perms = ('%s.%s' % (app_label, p.codename) for p in qs.iterator()) return set(perms)
Get permission-string list of the specified django application. Parameters ---------- model_or_app_label : model class or string A model class or app_label string to specify the particular django application. Returns ------- set A set of perms of the specified django application. Examples -------- >>> perms1 = get_app_perms('auth') >>> perms2 = get_app_perms(Permission) >>> perms1 == perms2 True
def extract(self, m): self._clear() self.m = m if self.option != []: self._url_filter() self._email_filter() if 'tex' in self.option: self._tex_filter() if 'telephone' in self.option: self._telephone_filter() if 'QQ' in self.option: self._QQ_filter() if 'emoji' in self.option: self._emoji_filter() if 'wechat' in self.option: self._wechat_filter() self._filter() if 'blur' in self.option: self._blur = get_number(self.m, self._limit) return self._get_result()
extract info specified in option
def get_blob(self, index): blob = self._current_blob self.r.retrieve_timeslice(index) timeslice_info = Table.from_template({ 'frame_index': self.r.frame_index, 'slice_id': index, 'timestamp': self.r.utc_seconds, 'nanoseconds': self.r.utc_nanoseconds, 'n_frames': self.r.n_frames, }, 'TimesliceInfo') hits = self._extract_hits() hits.group_id = index blob['TimesliceInfo'] = timeslice_info blob[self._hits_blob_key] = hits return blob
Index is slice ID
def denoise_v1_m15(): hparams = xmoe2_v1() hparams.decoder_layers = [ "att" if l == "local_att" else l for l in hparams.decoder_layers] hparams.decoder_type = "denoising" hparams.noising_spec_train = {"type": "mask", "prob": 0.15} return hparams
Denoising experiment.
def log_to_api(self): if self.entries: try: headers = {'Content-Type': 'application/json'} self.session.post('/v2/logs/app', headers=headers, json=self.entries) except Exception: pass
Best effort API logger. Send logs to the ThreatConnect API and do nothing if the attempt fails.
def import_private_key(self, pem_text, password=None): if isinstance(pem_text, str) is True: pem_text = pem_text.encode() if password is not None and isinstance(password, str) is True: password = password.encode() self.__set_private_key( serialization.load_pem_private_key(pem_text, password=password, backend=default_backend()) )
Import a private key from data in PEM-format :param pem_text: text with private key :param password: If it is not None, then result will be decrypt with the given password :return: None
def run(self, args): args = vars(args) positionals = [] keywords = {} for action in self.argparser._actions: if not hasattr(action, 'label'): continue if action.label == 'positional': positionals.append(args[action.dest]) elif action.label == 'varargs': positionals.extend(args[action.dest]) elif action.label == 'keyword': keywords[action.dest] = args[action.dest] elif action.label == 'varkwargs': kwpairs = iter(args[action.dest] or []) for key in kwpairs: try: key, value = key.split('=', 1) except ValueError: value = next(kwpairs) key = key.strip('-') keywords[key] = value return self.func(*positionals, **keywords)
Convert the unordered args into function arguments.
def rebuild_indexes(self, chunk_size=1000, aggressive_clear=False, index_class=None): assert self.indexable, "Field not indexable" assert self.attached_to_model, \ '`rebuild_indexes` can only be called on a field attached to the model' for index in self._indexes: if index_class and not isinstance(index, index_class): continue index.rebuild(chunk_size=chunk_size, aggressive_clear=aggressive_clear)
Rebuild all indexes tied to this field Parameters ---------- chunk_size: int Default to 1000, it's the number of instances to load at once. aggressive_clear: bool Will be passed to the `aggressive` argument of the `clear_indexes` method. If `False`, all values will be normally deindexed. If `True`, the work will be done at low level, scanning for keys that may match the ones used by the indexes index_class: type Allow to build only index(es) for this index class instead of all indexes. Raises ------ AssertionError If called from an instance field. It must be called from the model field Also raised if the field is not indexable Examples -------- >>> MyModel.get_field('myfield').rebuild_indexes() >>> MyModel.get_field('myfield').clear_indexes(index_class=MyIndex)
def InitUser(): result = AppUser.query(AppUser.user == users.get_current_user()).fetch() if result: app_user = result[0] else: app_user = AppUser(user=users.get_current_user(), email=users.get_current_user().email()) app_user.put() return app_user
Initialize application user. Retrieve existing user credentials from datastore or add new user. Returns: AppUser instance of the application user.
def element_creator(namespace=None): ELEMENT_MAKER = _objectify.ElementMaker(namespace=namespace, annotate=False) def create_elem(tag, attr=None, text=None): if not attr: attr = {} if text: element = getattr(ELEMENT_MAKER, tag)(text, **attr) else: element = getattr(ELEMENT_MAKER, tag)(**attr) return element return create_elem
Create a simple namespace-aware objectify element creator. Args: namespace (str): Namespace to work in Returns: function: Namespace-aware element creator
def process_net_command_json(self, py_db, json_contents): DEBUG = False try: request = self.from_json(json_contents, update_ids_from_dap=True) except KeyError as e: request = self.from_json(json_contents, update_ids_from_dap=False) error_msg = str(e) if error_msg.startswith("'") and error_msg.endswith("'"): error_msg = error_msg[1:-1] def on_request(py_db, request): error_response = { 'type': 'response', 'request_seq': request.seq, 'success': False, 'command': request.command, 'message': error_msg, } return NetCommand(CMD_RETURN, 0, error_response, is_json=True) else: if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS and DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1: pydev_log.info('Process %s: %s\n' % ( request.__class__.__name__, json.dumps(request.to_dict(), indent=4, sort_keys=True),)) assert request.type == 'request' method_name = 'on_%s_request' % (request.command.lower(),) on_request = getattr(self, method_name, None) if on_request is None: print('Unhandled: %s not available in _PyDevJsonCommandProcessor.\n' % (method_name,)) return if DEBUG: print('Handled in pydevd: %s (in _PyDevJsonCommandProcessor).\n' % (method_name,)) py_db._main_lock.acquire() try: cmd = on_request(py_db, request) if cmd is not None: py_db.writer.add_command(cmd) finally: py_db._main_lock.release()
Processes a debug adapter protocol json command.
def run_server(dbpath=os.path.expanduser(config.dbserver.file), dbhostport=None, loglevel='WARN'): if dbhostport: dbhost, port = dbhostport.split(':') addr = (dbhost, int(port)) else: addr = (config.dbserver.listen, DBSERVER_PORT) dirname = os.path.dirname(dbpath) if not os.path.exists(dirname): os.makedirs(dirname) db('PRAGMA foreign_keys = ON') actions.upgrade_db(db) db.close() actions.reset_is_running(db) logging.basicConfig(level=getattr(logging, loglevel)) DbServer(db, addr).start()
Run the DbServer on the given database file and port. If not given, use the settings in openquake.cfg.
def update(packages, env=None, user=None): packages = ' '.join(packages.split(',')) cmd = _create_conda_cmd('update', args=[packages, '--yes', '-q'], env=env, user=user) return _execcmd(cmd, user=user)
Update conda packages in a conda env Attributes ---------- packages: list of packages comma delimited
def lowres_tensor(shape, underlying_shape, offset=None, sd=None): sd = sd or 0.01 init_val = sd * np.random.randn(*underlying_shape).astype("float32") underlying_t = tf.Variable(init_val) t = resize_bilinear_nd(underlying_t, shape) if offset is not None: if not isinstance(offset, list): offset = len(shape) * [offset] for n in range(len(offset)): if offset[n] is True: offset[n] = shape[n] / underlying_shape[n] / 2 if offset[n] is False: offset[n] = 0 offset[n] = int(offset[n]) padding = [(pad, 0) for pad in offset] t = tf.pad(t, padding, "SYMMETRIC") begin = len(shape) * [0] t = tf.slice(t, begin, shape) return t
Produces a tensor paramaterized by a interpolated lower resolution tensor. This is like what is done in a laplacian pyramid, but a bit more general. It can be a powerful way to describe images. Args: shape: desired shape of resulting tensor underlying_shape: shape of the tensor being resized into final tensor offset: Describes how to offset the interpolated vector (like phase in a Fourier transform). If None, apply no offset. If a scalar, apply the same offset to each dimension; if a list use each entry for each dimension. If a int, offset by that much. If False, do not offset. If True, offset by half the ratio between shape and underlying shape (analagous to 90 degrees). sd: Standard deviation of initial tensor variable. Returns: A tensor paramaterized by a lower resolution tensorflow variable.
def _add_updated_at_column(self, values): if not self._model.uses_timestamps(): return values column = self._model.get_updated_at_column() if "updated_at" not in values: values.update({column: self._model.fresh_timestamp_string()}) return values
Add the "updated_at" column to a dictionary of values. :param values: The values to update :type values: dict :return: The new dictionary of values :rtype: dict
def get_client_secret(self): self._client_secret = predix.config.get_env_value(predix.app.Manifest, 'client_secret') return self._client_secret
Return the client secret that should correspond with the client id.
def _should_set(self, key, mode): if mode is None or mode not in ["nx", "xx"]: return True if mode == "nx": if key in self.redis: return False elif key not in self.redis: return False return True
Determine if it is okay to set a key. If the mode is None, returns True, otherwise, returns True of false based on the value of ``key`` and the ``mode`` (nx | xx).