Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
383,300
def nmap_scan(): hs = HostSearch() config = Config() nmap_types = [, , , , ] options = {:, :, : config.get(, ), : , : } hs_parser = hs.argparser argparser = argparse.ArgumentParser(parents=[hs_parser], conflict_handler=, \ description="Scans hosts from the database using nmap, any arguments that are not in the help are passed to nmap") argparser.add_argument(, metavar=, \ help=, \ type=str, choices=nmap_types, default=, const=, nargs=) arguments, extra_nmap_args = argparser.parse_known_args() tags = nmap_types[nmap_types.index(arguments.type):] tags = ["!nmap_" + tag for tag in tags] hosts = hs.get_hosts(tags=tags) hosts = [host for host in hosts] nmap_args = [] nmap_args.extend(extra_nmap_args) nmap_args.extend(options[arguments.type].split()) print_notification("Running nmap with args: {} on {} hosts(s)".format(nmap_args, len(hosts))) if len(hosts): result = nmap(nmap_args, [str(h.address) for h in hosts]) for host in hosts: host.add_tag("nmap_{}".format(arguments.type)) host.save() print_notification("Nmap done, importing results") stats = import_nmap(result, "nmap_{}".format(arguments.type), check_function=all_hosts, import_services=True) stats[] = len(hosts) stats[] = arguments.type Logger().log(, "Performed nmap {} scan on {} hosts".format(arguments.type, len(hosts)), stats) else: print_notification("No hosts found")
Scans the given hosts with nmap.
383,301
def ensure_mapping_format(variables): if isinstance(variables, list): variables_dict = {} for map_dict in variables: variables_dict.update(map_dict) return variables_dict elif isinstance(variables, dict): return variables else: raise exceptions.ParamsError("variables format error!")
ensure variables are in mapping format. Args: variables (list/dict): original variables Returns: dict: ensured variables in dict format Examples: >>> variables = [ {"a": 1}, {"b": 2} ] >>> print(ensure_mapping_format(variables)) { "a": 1, "b": 2 }
383,302
def paint(self, painter, option, index): body_rect = QtCore.QRectF(option.rect) check_rect = QtCore.QRectF(body_rect) check_rect.setWidth(check_rect.height()) check_rect.adjust(6, 6, -6, -6) check_color = colors["idle"] if index.data(model.IsProcessing) is True: check_color = colors["active"] elif index.data(model.HasFailed) is True: check_color = colors["warning"] elif index.data(model.HasSucceeded) is True: check_color = colors["ok"] elif index.data(model.HasProcessed) is True: check_color = colors["ok"] metrics = painter.fontMetrics() label_rect = QtCore.QRectF(option.rect.adjusted( check_rect.width() + 12, 2, 0, -2)) assert label_rect.width() > 0 label = index.data(model.Label) label = metrics.elidedText(label, QtCore.Qt.ElideRight, label_rect.width() - 20) font_color = colors["idle"] if not index.data(model.IsChecked): font_color = colors["inactive"] painter.restore()
Paint checkbox and text _ |_| My label
383,303
def parse(self, url): parsed_url = urlparse.urlparse(url) try: default_config = self.CONFIG[parsed_url.scheme] except KeyError: raise ValueError( .format( self.__class__.__name__, url)) handler = self.get_handler_for_scheme(parsed_url.scheme) config = copy.deepcopy(default_config) return handler(parsed_url, config)
Return a configuration dict from a URL
383,304
def call_multiple_modules(module_gen): for args_seq in module_gen: module_name_or_path = args_seq[0] with replace_sys_args(args_seq): if re.match(VALID_PACKAGE_RE, module_name_or_path): runpy.run_module(module_name_or_path, run_name=) else: runpy.run_path(module_name_or_path, run_name=)
Call each module module_gen should be a iterator
383,305
def _setEncoderParams(self): self.rangeInternal = float(self.maxval - self.minval) self.resolution = float(self.rangeInternal) / (self.n - self.w) self.radius = self.w * self.resolution self.range = self.rangeInternal + self.resolution self.nInternal = self.n - 2 * self.padding self._bucketValues = None
Set the radius, resolution and range. These values are updated when minval and/or maxval change.
383,306
def convert_money(amount, currency_from, currency_to): new_amount = base_convert_money(amount, currency_from, currency_to) return moneyed.Money(new_amount, currency_to)
Convert 'amount' from 'currency_from' to 'currency_to' and return a Money instance of the converted amount.
383,307
def _find_usage_cloudtrail(self): trail_list = self.conn.describe_trails()[] trail_count = len(trail_list) if trail_list else 0 for trail in trail_list: data_resource_count = 0 if self.conn._client_config.region_name == trail[]: response = self.conn.get_event_selectors( TrailName=trail[] ) event_selectors = response[] for event_selector in event_selectors: data_resource_count += len( event_selector.get(, []) ) self.limits[]._add_current_usage( len(event_selectors), aws_type=, resource_id=trail[] ) self.limits[]._add_current_usage( data_resource_count, aws_type=, resource_id=trail[] ) else: logger.debug( % trail[] ) self.limits[]._add_current_usage( trail_count, aws_type=self.aws_type )
Calculate current usage for CloudTrail related metrics
383,308
def has_access(user, required_roles, match_all=True): if ROLE_ADMIN in user.roles: return True if isinstance(required_roles, str): if required_roles in user.roles: return True return False if match_all: for role in required_roles: if role not in user.roles: return False return True else: for role in required_roles: if role in user.roles: return True return False
Check if the user meets the role requirements. If mode is set to AND, all the provided roles must apply Args: user (:obj:`User`): User object required_roles (`list` of `str`): List of roles that the user must have applied match_all (`bool`): If true, all the required_roles must be applied to the user, else any one match will return `True` Returns: `bool`
383,309
def _load_item(self, key): key_u = key.upper() default = current_app.default_config.get(key_u) if isinstance(default, datetime.timedelta): current_app.config[key_u] = datetime.timedelta(self.getint(, key)) elif isinstance(default, bool): current_app.config[key_u] = self.getboolean(, key) elif isinstance(default, float): current_app.config[key_u] = self.getfloat(, key) elif isinstance(default, int): current_app.config[key_u] = self.getint(, key) else: current_app.config[key_u] = str(self.get(, key))
Load the specified item from the [flask] section. Type is determined by the type of the equivalent value in app.default_config or string if unknown.
383,310
def plot_burstness(corpus, B, **kwargs): try: import matplotlib.pyplot as plt import matplotlib.patches as mpatches except ImportError: raise RuntimeError() color = kwargs.get(, ) years = sorted(corpus.indices[].keys()) width = years[1] - years[0] height = 1.0 fig = plt.figure(figsize=(10,len(B)/4.)) f = 1 axes = {} for key, value in B.iteritems(): x,y = value ax = fig.add_subplot(len(B),1,f) f+=1 ax.set_yticks([]) ax.set_xbound(min(years), max(years) + 1) if not f == len(B)+1: ax.set_xticklabels([]) rect = mpatches.Rectangle((min(years), 0), sorted(x)[0]-min(years), height, fill=True, linewidth=0.0) rect.set_facecolor() rect.set_alpha(0.3) ax.add_patch(rect) for d in xrange(min(x), max(x)): try: i = x.index(d) except ValueError: continue xy = (d, 0.) state = y[i] rect = mpatches.Rectangle(xy, width, height, fill=True, linewidth=0.0) rect.set_facecolor(color) rect.set_alpha(state) ax.add_patch(rect) ax.set_ylabel(key, rotation=0, horizontalalignment=, verticalalignment=) plt.subplots_adjust(left=0.5) fig.tight_layout(h_pad=0.25) plt.show()
Generate a figure depicting burstness profiles for ``feature``. Parameters ---------- B Returns ------- fig : :class:`matplotlib.figure.Figure` Examples -------- .. code-block:: python >>> from tethne.analyze.corpus import burstness >>> fig = plot_burstness(corpus, 'citations', topn=2, perslice=True) >>> fig.savefig('~/burstness.png') Years prior to the first occurrence of each feature are grayed out. Periods in which the feature was bursty are depicted by colored blocks, the opacity of which indicates burstness intensity. .. figure:: _static/images/burstness.png :width: 600 :align: center
383,311
def get_provisioned_table_write_units(table_name): try: desc = DYNAMODB_CONNECTION.describe_table(table_name) except JSONResponseError: raise write_units = int( desc[u][u][u]) logger.debug(.format( table_name, write_units)) return write_units
Returns the number of provisioned write units for the table :type table_name: str :param table_name: Name of the DynamoDB table :returns: int -- Number of write units
383,312
def replace(self, *args, **kwargs): replacements = { "lower" : self.lower, "upper" : self.upper, "lower_inc" : self.lower_inc, "upper_inc" : self.upper_inc } replacements.update( dict(zip(("lower", "upper", "lower_inc", "upper_inc"), args))) replacements.update(kwargs) return self.__class__(**replacements)
replace(lower=None, upper=None, lower_inc=None, upper_inc=None) Returns a new instance of self with the given arguments replaced. It takes the exact same arguments as the constructor. >>> intrange(1, 5).replace(upper=10) intrange([1,10)) >>> intrange(1, 10).replace(lower_inc=False) intrange([2,10)) >>> intrange(1, 10).replace(5) intrange([5,10)) Note that range objects are immutable and are never modified in place.
383,313
def __check_table_rules(configuration): for table_name in configuration[]: table = configuration[][table_name] valid_units = [, ] if table[] not in valid_units: print() sys.exit(1) if table[] not in valid_units: print() sys.exit(1) if table[] not in valid_units: print( ) sys.exit(1) if table[] not in valid_units: print( ) sys.exit(1) if ( in table and table[] and table[] not in valid_units): print( ) sys.exit(1) if ( in table and table[] and table[] not in valid_units): print( ) sys.exit(1) if ( in table and table[] and table[] not in valid_units): print( ) sys.exit(1) if ( in table and table[] and table[] not in valid_units): print( ) sys.exit(1) if ( in table and table[] and table[] not in valid_units): print( ) sys.exit(1) if ( in table and table[] and table[] not in valid_units): print( ) sys.exit(1) if table[] < 1: print( ) sys.exit(1) valid_sns_message_types = [ , , , ] if table[]: for sns_type in table[]: if sns_type not in valid_sns_message_types: print(.format( sns_type)) table[].remove(sns_type) options = [ , , , , , , , , , , , , , , , ] non_default = [ , ] for option in options: if (option in non_default and option in table and table[option] and table[option] < 1): print(.format( option, table_name)) sys.exit(1) if (option in table and option not in non_default and table[option] < 1): print(.format( option, table_name)) sys.exit(1) if (int(table[]) > int(table[])): print( .format( table[], table[], table_name)) sys.exit(1) elif (int(table[]) > int(table[])): print( .format( table[], table[], table_name)) sys.exit(1)
Do some basic checks on the configuration
383,314
def markdown(text, html=False, valid_tags=GFM_TAGS): if text is None: return None if html: return Markup(sanitize_html(markdown_convert_html(gfm(text)), valid_tags=valid_tags)) else: return Markup(markdown_convert_text(gfm(text)))
Return Markdown rendered text using GitHub Flavoured Markdown, with HTML escaped and syntax-highlighting enabled.
383,315
def distance_matrix(a, b, periodic): a = a b = b[:, np.newaxis] return periodic_distance(a, b, periodic)
Calculate a distrance matrix between coordinates sets a and b
383,316
def add_dataset(self, name=None, label=None, x_column_label=None, y_column_label=None, index=None, control=False): if name is None and label is None and index is None: raise ValueError("Attempting to add a dataset without" + "supplying index or file information.") if index is None: trans_dict = DataImportContainer() if name is not None: trans_dict.file_name = name if label is not None: trans_dict.label = label if x_column_label is not None: trans_dict.x_column_label = x_column_label if y_column_label is not None: trans_dict.y_column_label = y_column_label if control: self.control = trans_dict else: if not in self.__dict__: self.file = [] self.file.append(trans_dict) else: if control: self.control = DataImportContainer() self.control.index = index else: if not in self.__dict__: self.indices = [] self.indices.append(index) return
Add a dataset to a specific plot. This method adds a dataset to a plot. Its functional use is imperative to the plot generation. It handles adding new files as well as indexing to files that are added to other plots. All Args default to None. However, these are note the defaults in the code. See DataImportContainer attributes for defaults in code. Args: name (str, optional): Name (path) for file. Required if reading from a file (at least one). Required if file_name is not in "general". Must be ".txt" or ".hdf5". Can include path from working directory. label (str, optional): Column label in the dataset corresponding to desired SNR value. Required if reading from a file (at least one). x_column_label/y_column_label (str, optional): Column label from input file identifying x/y values. This can override setting in "general". Default is `x`/`y`. index (int, optional): Index of plot with preloaded data. Required if not loading a file. control (bool, optional): If True, this dataset is set to the control. This is needed for Ratio plots. It sets the baseline. Default is False. Raises: ValueError: If no options are passes. This means no file indication nor index.
383,317
def check(text): err = "misc.waxed" msg = u"The modifier following must be an adj.: is correct" waxes = ["wax", "waxes", "waxed", "waxing"] modifiers = [("ebullient", "ebulliently"), ("ecstatic", "ecstatically"), ("eloquent", "eloquently"), ("enthusiastic", "enthusiastically"), ("euphoric", "euphorically"), ("indignant", "indignantly"), ("lyrical", "lyrically"), ("melancholic", "melancholically"), ("metaphorical", "metaphorically"), ("nostalgic", "nostalgically"), ("patriotic", "patriotically"), ("philosophical", "philosophically"), ("poetic", "poetically"), ("rhapsodic", "rhapsodically"), ("romantic", "romantically"), ("sentimental", "sentimentally") ] def pairs(word): return [[word + + pair[0], [word + + pair[1]]] for pair in modifiers] preferred = [] for word in waxes: preferred += pairs(word) return preferred_forms_check(text, preferred, err, msg)
Suggest the preferred forms.
383,318
def flush(self): if self._writable: with self._seek_lock: self._flush_raw_or_buffered() self._write_buffer = bytearray(self._buffer_size) self._buffer_seek = 0
Flush the write buffers of the stream if applicable.
383,319
def convert_weights_to_numpy(weights_dict): return dict([(k.replace("arg:", "").replace("aux:", ""), v.asnumpy()) for k, v in weights_dict.items()])
Convert weights to numpy
383,320
def get_component_product(self, other): return Point(self.x * other.x, self.y * other.y)
Returns the component product of this vector and the given other vector.
383,321
def _handle_lrr(self, data): msg = LRRMessage(data) if not self._ignore_lrr_states: self._lrr_system.update(msg) self.on_lrr_message(message=msg) return msg
Handle Long Range Radio messages. :param data: LRR message to parse :type data: string :returns: :py:class:`~alarmdecoder.messages.LRRMessage`
383,322
def set(self, key, val, bucket): if bucket not in self._cache: self._cache[bucket] = {} self._cache[bucket][key] = val
Set a cached item by key WARN: Regardless if the item is already in the cache, it will be udpated with the new value.
383,323
def _send_request(self, xml_request): if self._scheme == : return self._send_http_request(xml_request) else: return self._send_socket_request(xml_request)
Send the prepared XML request block to the CPS using the corect protocol. Args: xml_request -- A fully formed xml request string for the CPS. Returns: The raw xml response string. Raises: ConnectionError -- Can't establish a connection with the server.
383,324
def child_task(self, q, l, gq, gl): mp_util.child_close_fds() from ..lib import wx_processguard from ..lib.wx_loader import wx from MAVProxy.modules.mavproxy_misseditor import missionEditorFrame self.app = wx.App(False) self.app.frame = missionEditorFrame.MissionEditorFrame(parent=None,id=wx.ID_ANY) self.app.frame.set_event_queue(q) self.app.frame.set_event_queue_lock(l) self.app.frame.set_gui_event_queue(gq) self.app.frame.set_gui_event_queue_lock(gl) self.app.frame.Show() self.app.MainLoop()
child process - this holds GUI elements
383,325
def return_markers(self): ent_file = self._filename.with_suffix() if not ent_file.exists(): ent_file = self._filename.with_suffix() try: ent_notes = _read_ent(ent_file) except (FileNotFoundError, PermissionError): markers = [] else: allnote = [] for n in ent_notes: try: n[].keys() allnote.append(n[]) except AttributeError: lg.debug( .format(n[])) s_freq = self._hdr[][] pcname = note_time = [] note_name = [] note_note = [] for n in allnote: if n[] == : continue if not n[]: continue if not in n[].keys(): continue user1 = n[][] == user2 = False user3 = n[][] == pcname user4 = n[][] == user5 = n[][] == if user1 or user2 or user3 or user4 or user5: continue if len(n[][]) == 0: note_name.append() else: note_name.append(n[][].split()[0]) note_time.append(n[] / s_freq) note_note.append(n[]) markers = [] for time, name, note in zip(note_time, note_name, note_note): m = {: note + + name + , : time, : time, : None, } markers.append(m) return markers
Reads the notes of the Ktlx recordings.
383,326
def get_student_email(cmd_args, endpoint=): log.info("Attempting to get student email") if cmd_args.local: return None access_token = authenticate(cmd_args, endpoint=endpoint, force=False) if not access_token: return None try: return get_info(cmd_args, access_token)[] except IOError as e: return None
Attempts to get the student's email. Returns the email, or None.
383,327
def to_unicode(string): if isinstance(string, six.binary_type): return string.decode() if isinstance(string, six.text_type): return string if six.PY2: return unicode(string) return str(string)
Ensure a passed string is unicode
383,328
def compute_avg_adj_deg(G): r return np.sum(np.dot(G.A, G.A), axis=1) / (np.sum(G.A, axis=1) + 1.)
r""" Compute the average adjacency degree for each node. The average adjacency degree is the average of the degrees of a node and its neighbors. Parameters ---------- G: Graph Graph on which the statistic is extracted
383,329
def get_asset_lookup_session_for_repository(self, repository_id=None, proxy=None): return AssetLookupSession( self._provider_manager.get_asset_lookup_session_for_repository(repository_id, proxy), self._config_map)
Gets the ``OsidSession`` associated with the asset lookup service for the given repository. arg: repository_id (osid.id.Id): the ``Id`` of the repository arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetLookupSession) - an ``AssetLookupSession`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_asset_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_lookup()`` and ``supports_visible_federation()`` are ``true``.*
383,330
def batch_annotate_files( self, requests, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "batch_annotate_files" not in self._inner_api_calls: self._inner_api_calls[ "batch_annotate_files" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.batch_annotate_files, default_retry=self._method_configs["BatchAnnotateFiles"].retry, default_timeout=self._method_configs["BatchAnnotateFiles"].timeout, client_info=self._client_info, ) request = image_annotator_pb2.BatchAnnotateFilesRequest(requests=requests) return self._inner_api_calls["batch_annotate_files"]( request, retry=retry, timeout=timeout, metadata=metadata )
Service that performs image detection and annotation for a batch of files. Now only "application/pdf", "image/tiff" and "image/gif" are supported. This service will extract at most the first 10 frames (gif) or pages (pdf or tiff) from each file provided and perform detection and annotation for each image extracted. Example: >>> from google.cloud import vision_v1p4beta1 >>> >>> client = vision_v1p4beta1.ImageAnnotatorClient() >>> >>> # TODO: Initialize `requests`: >>> requests = [] >>> >>> response = client.batch_annotate_files(requests) Args: requests (list[Union[dict, ~google.cloud.vision_v1p4beta1.types.AnnotateFileRequest]]): The list of file annotation requests. Right now we support only one AnnotateFileRequest in BatchAnnotateFilesRequest. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.vision_v1p4beta1.types.AnnotateFileRequest` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.vision_v1p4beta1.types.BatchAnnotateFilesResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
383,331
def load(source, **kwargs) -> JsonObj: if isinstance(source, str): if in source: req = Request(source) req.add_header("Accept", "application/json, text/json;q=0.9") with urlopen(req) as response: jsons = response.read() else: with open(source) as f: jsons = f.read() elif hasattr(source, "read"): jsons = source.read() else: raise TypeError("Unexpected type {} for source {}".format(type(source), source)) return loads(jsons, **kwargs)
Deserialize a JSON source. :param source: a URI, File name or a .read()-supporting file-like object containing a JSON document :param kwargs: arguments. see: json.load for details :return: JsonObj representing fp
383,332
def earthquake_contour_preprocessor(impact_function): contour_path = create_smooth_contour(impact_function.hazard) if os.path.exists(contour_path): from safe.gis.tools import load_layer return load_layer(contour_path, tr(), )[0]
Preprocessor to create contour from an earthquake :param impact_function: Impact function to run. :type impact_function: ImpactFunction :return: The contour layer. :rtype: QgsMapLayer
383,333
def get_range_info(array, component): r = array.GetRange(component) comp_range = {} comp_range[] = r[0] comp_range[] = r[1] comp_range[] = array.GetComponentName(component) return comp_range
Get the data range of the array's component
383,334
def extern_store_bytes(self, context_handle, bytes_ptr, bytes_len): c = self._ffi.from_handle(context_handle) return c.to_value(binary_type(self._ffi.buffer(bytes_ptr, bytes_len)))
Given a context and raw bytes, return a new Handle to represent the content.
383,335
def ComputeRoot(hashes): if not len(hashes): raise Exception() if len(hashes) == 1: return hashes[0] tree = MerkleTree(hashes) return tree.Root.Hash
Compute the root hash. Args: hashes (list): the list of hashes to build the root from. Returns: bytes: the root hash.
383,336
def get_agent(msg): agent = msg[][] if isinstance(agent, list): agent = agent[0] return agent
Handy hack to handle legacy messages where 'agent' was a list.
383,337
def choice(self, board: Union[chess.Board, int], *, minimum_weight: int = 1, exclude_moves: Container[chess.Move] = (), random=random) -> Entry: chosen_entry = None for i, entry in enumerate(self.find_all(board, minimum_weight=minimum_weight, exclude_moves=exclude_moves)): if chosen_entry is None or random.randint(0, i) == i: chosen_entry = entry if chosen_entry is None: raise IndexError() return chosen_entry
Uniformly selects a random entry for the given position. :raises: :exc:`IndexError` if no entries are found.
383,338
def mkdir(path, mode=0o755, delete=False): logger.info("mkdir: %s" % path) if os.path.isdir(path): if not delete: return True if not remove(path): return False try: os.makedirs(path, mode) return True except Exception: logger.exception("Failed to mkdir: %s" % path) return False
Make a directory. Create a leaf directory and all intermediate ones. Works like ``mkdir``, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. This is recursive. Args: path (str): Directory to create mode (int): Directory mode delete (bool): Delete directory/file if exists Returns: bool: True if succeeded else False
383,339
def _path_pair(self, s): if s.startswith(b): parts = s[1:].split(b, 1) else: parts = s.split(b, 1) if len(parts) != 2: self.abort(errors.BadFormat, , , s) elif parts[1].startswith(b) and parts[1].endswith(b): parts[1] = parts[1][1:-1] elif parts[1].startswith(b) or parts[1].endswith(b): self.abort(errors.BadFormat, , , s) return [_unquote_c_string(s) for s in parts]
Parse two paths separated by a space.
383,340
def releases(self): r available_releases = {} available_revisions = getattr(self, self.release_scheme) for identifier, revision in available_revisions.items(): match = self.compiled_filter.match(identifier) if match: captures = match.groups() if captures: identifier = captures[0] available_releases[identifier] = Release( revision=revision, identifier=identifier, ) return available_releases
r""" A dictionary that maps release identifiers to :class:`Release` objects. Here's an example based on a mirror of the git project's repository which shows the last ten releases based on tags, where each release identifier captures a tag without its 'v' prefix: >>> from pprint import pprint >>> from vcs_repo_mgr.backends.git import GitRepo >>> repository = GitRepo(remote='https://github.com/git/git.git', ... release_scheme='tags', ... release_filter=r'^v(\d+(?:\.\d+)*)$') >>> pprint(repository.ordered_releases[-10:]) [Release(revision=Revision(..., tag='v2.2.2', ...), identifier='2.2.2'), Release(revision=Revision(..., tag='v2.3.0', ...), identifier='2.3.0'), Release(revision=Revision(..., tag='v2.3.1', ...), identifier='2.3.1'), Release(revision=Revision(..., tag='v2.3.2', ...), identifier='2.3.2'), Release(revision=Revision(..., tag='v2.3.3', ...), identifier='2.3.3'), Release(revision=Revision(..., tag='v2.3.4', ...), identifier='2.3.4'), Release(revision=Revision(..., tag='v2.3.5', ...), identifier='2.3.5'), Release(revision=Revision(..., tag='v2.3.6', ...), identifier='2.3.6'), Release(revision=Revision(..., tag='v2.3.7', ...), identifier='2.3.7'), Release(revision=Revision(..., tag='v2.4.0', ...), identifier='2.4.0')]
383,341
def ec(ns=None, cn=None, di=None, lo=None, iq=None, ico=None): return CONN.EnumerateClasses(ns, ClassName=cn, DeepInheritance=di, LocalOnly=lo, IncludeQualifiers=iq, IncludeClassOrigin=ico)
This function is a wrapper for :meth:`~pywbem.WBEMConnection.EnumerateClasses`. Enumerate the subclasses of a class, or the top-level classes in a namespace. Parameters: ns (:term:`string`): Name of the CIM namespace to be used (case independent). If `None`, defaults to the namespace of the `cn` parameter if specified as a `CIMClassName`, or to the default namespace of the connection. cn (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class whose subclasses are to be enumerated (case independent). `None` will enumerate the top-level classes. If specified as a `CIMClassName` object, its `host` attribute will be ignored. di (:class:`py:bool`): DeepInheritance flag: Include also indirect subclasses. `None` will cause the server default of `False` to be used. lo (:class:`py:bool`): LocalOnly flag: Exclude inherited properties. `None` will cause the server default of `True` to be used. iq (:class:`py:bool`): IncludeQualifiers flag: Include qualifiers. `None` will cause the server default of `True` to be used. ico (:class:`py:bool`): IncludeClassOrigin flag: Include class origin information for properties and methods in the retrieved class. `None` will cause the server default of `False` to be used. Returns: list of :class:`~pywbem.CIMClass`: The enumerated classes.
383,342
def matches(self, stream): if self.match_type != stream.stream_type: return False if self.match_id is not None: return self.match_id == stream.stream_id if self.match_spec == DataStreamSelector.MatchUserOnly: return not stream.system elif self.match_spec == DataStreamSelector.MatchSystemOnly: return stream.system elif self.match_spec == DataStreamSelector.MatchUserAndBreaks: return (not stream.system) or (stream.system and (stream.stream_id in DataStream.KnownBreakStreams)) return True
Check if this selector matches the given stream Args: stream (DataStream): The stream to check Returns: bool: True if this selector matches the stream
383,343
def put_abs (self, r, c, ch): r = constrain (r, 1, self.rows) c = constrain (c, 1, self.cols) if isinstance(ch, bytes): ch = self._decode(ch)[0] else: ch = ch[0] self.w[r-1][c-1] = ch
Screen array starts at 1 index.
383,344
def _do_close(self): AMQP_LOGGER.debug(, self.channel_id) self.is_open = False channel_id, self.channel_id = self.channel_id, None connection, self.connection = self.connection, None if connection: connection.channels.pop(channel_id, None) connection._avail_channel_ids.append(channel_id) self.callbacks.clear() self.cancel_callbacks.clear() self.events.clear() self.no_ack_consumers.clear()
Tear down this object, after we've agreed to close with the server.
383,345
def bucket_to_dataframe(name, buckets, append_name=None): expanded_buckets = [] for item in buckets: if type(item) is dict: single_dict = item else: single_dict = item.to_dict() single_dict[name] = single_dict.pop() if append_name: persistance_dict = single_dict.copy() for key in persistance_dict.keys(): single_dict[append_name + + key] = single_dict.pop(key) expanded_buckets.append(single_dict) return pd.DataFrame(expanded_buckets)
A function that turns elasticsearch aggregation buckets into dataframes :param name: The name of the bucket (will be a column in the dataframe) :type name: str :param bucket: a bucket from elasticsearch results :type bucket: list[dict] :returns: pandas.DataFrame
383,346
def credit_card_number(self, card_type=None): card = self._credit_card_type(card_type) prefix = self.random_element(card.prefixes) number = self._generate_number(self.numerify(prefix), card.length) return number
Returns a valid credit card number.
383,347
def _search_ldap(self, ldap, con, username): if self.auth_ldap_append_domain: username = username + "@" + self.auth_ldap_append_domain if self.auth_ldap_search_filter: filter_str = "(&%s(%s=%s))" % ( self.auth_ldap_search_filter, self.auth_ldap_uid_field, username, ) else: filter_str = "(%s=%s)" % (self.auth_ldap_uid_field, username) user = con.search_s( self.auth_ldap_search, ldap.SCOPE_SUBTREE, filter_str, [ self.auth_ldap_firstname_field, self.auth_ldap_lastname_field, self.auth_ldap_email_field, ], ) if user: if not user[0][0]: return None return user
Searches LDAP for user, assumes ldap_search is set. :param ldap: The ldap module reference :param con: The ldap connection :param username: username to match with auth_ldap_uid_field :return: ldap object array
383,348
def multi_muscle_align(data, samples, ipyclient): LOGGER.info("starting alignments") lbview = ipyclient.load_balanced_view() start = time.time() printstr = " aligning clusters | {} | s6 |" elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer) path = os.path.join(data.tmpdir, data.name + ".chunk_*") clustbits = glob.glob(path) jobs = {} for idx in xrange(len(clustbits)): args = [data, samples, clustbits[idx]] jobs[idx] = lbview.apply(persistent_popen_align3, *args) allwait = len(jobs) elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer) while 1: finished = [i.ready() for i in jobs.values()] fwait = sum(finished) elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(allwait, fwait, printstr.format(elapsed), spacer=data._spacer) time.sleep(0.1) if all(finished): break keys = jobs.keys() for idx in keys: if not jobs[idx].successful(): LOGGER.error("error in persistent_popen_align %s", jobs[idx].exception()) raise IPyradWarningExit("error in step 6 {}".format(jobs[idx].exception())) del jobs[idx] print("")
Sends the cluster bits to nprocessors for muscle alignment. They return with indel.h5 handles to be concatenated into a joint h5.
383,349
def approveproposal(self, proposal_ids, account=None, approver=None, **kwargs): from .proposal import Proposal if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account, blockchain_instance=self) is_key = approver and approver[:3] == self.prefix if not approver and not is_key: approver = account elif approver and not is_key: approver = Account(approver, blockchain_instance=self) else: approver = PublicKey(approver) if not isinstance(proposal_ids, (list, set, tuple)): proposal_ids = {proposal_ids} op = [] for proposal_id in proposal_ids: proposal = Proposal(proposal_id, blockchain_instance=self) update_dict = { "fee": {"amount": 0, "asset_id": "1.3.0"}, "fee_paying_account": account["id"], "proposal": proposal["id"], "prefix": self.prefix, } if is_key: update_dict.update({"key_approvals_to_add": [str(approver)]}) else: update_dict.update({"active_approvals_to_add": [approver["id"]]}) op.append(operations.Proposal_update(**update_dict)) if is_key: self.txbuffer.appendSigner(approver, "active") return self.finalizeOp(op, account["name"], "active", **kwargs) return self.finalizeOp(op, approver, "active", **kwargs)
Approve Proposal :param list proposal_id: Ids of the proposals :param str appprover: The account or key to use for approval (defaults to ``account``) :param str account: (optional) the account to allow access to (defaults to ``default_account``)
383,350
def get_combo(self, symbol): for parent, legs in self.instrument_combos.items(): if symbol == parent or symbol in legs.keys(): return { "parent": self.get_instrument(parent), "legs": legs, } return { "parent": None, "legs": {}, }
get group by child symbol
383,351
def _JModule(spec, javaname): cls = _JImportFactory(spec, javaname) out = cls(spec.name) return out
(internal) Front end for creating a java module dynamically
383,352
def can_overlap(self, contig, strand=None): return (self.on_contig(contig) and (strand is None or self.on_strand(strand)))
Is this locus on the same contig and (optionally) on the same strand?
383,353
def populateViewTree(self, view): vuid = view.getUniqueId() text = view.__smallStr__() if view.getParent() is None: self.viewTree.insert(, Tkinter.END, vuid, text=text) else: self.viewTree.insert(view.getParent().getUniqueId(), Tkinter.END, vuid, text=text, tags=()) self.viewTree.set(vuid, , if view.isTarget() else ) self.viewTree.tag_bind(, , self.viewTreeItemClicked)
Populates the View tree.
383,354
def query_one(cls, *args, **kwargs): doc = cls._coll.find_one(*args, **kwargs) if doc: return cls.from_storage(doc)
Same as collection.find_one, but return Document then dict
383,355
def urljoin(base, path=None): if path is None: url = base else: if not base.endswith(): base += url = urllib.parse.urljoin(base, str(path)) return url
Join a base url with a relative path.
383,356
def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None): from .utils import connection_with_anon, connection_with_gs path = addextension(path, ext) scheme, bucket_name, keylist = self.getfiles( path, start=start, stop=stop, recursive=recursive) if not keylist: raise FileNotFoundError("No objects found for " % path) credentials = self.credentials self.nfiles = len(keylist) if spark and isinstance(self.engine, spark): def getsplit(kvIter): if scheme == or scheme == : conn = connection_with_anon(credentials) bucket = conn.get_bucket(bucket_name) elif scheme == : conn = boto.storage_uri(bucket_name, ) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) for kv in kvIter: idx, keyname = kv key = bucket.get_key(keyname) buf = key.get_contents_as_string() yield idx, buf, keyname npartitions = min(npartitions, self.nfiles) if npartitions else self.nfiles rdd = self.engine.parallelize(enumerate(keylist), npartitions) return rdd.mapPartitions(getsplit) else: if scheme == or scheme == : conn = connection_with_anon(credentials) bucket = conn.get_bucket(bucket_name) elif scheme == : conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) def getsplit(kv): idx, keyName = kv key = bucket.get_key(keyName) buf = key.get_contents_as_string() return idx, buf, keyName return [getsplit(kv) for kv in enumerate(keylist)]
Sets up Spark RDD across S3 or GS objects specified by dataPath. Returns RDD of <string bucket keyname, string buffer> k/v pairs.
383,357
def open_netcdf_writer(self, flatten=False, isolate=False, timeaxis=1): self._netcdf_writer = netcdftools.NetCDFInterface( flatten=bool(flatten), isolate=bool(isolate), timeaxis=int(timeaxis))
Prepare a new |NetCDFInterface| object for writing data.
383,358
def _add_text_ngrams(self, witness, minimum, maximum): text_id = self._get_text_id(witness) self._logger.info(.format( minimum, maximum, witness.get_filename())) skip_sizes = [] for size in range(minimum, maximum + 1): if self._has_ngrams(text_id, size): self._logger.info( .format(size)) skip_sizes.append(size) for size, ngrams in witness.get_ngrams(minimum, maximum, skip_sizes): self._add_text_size_ngrams(text_id, size, ngrams)
Adds n-gram data from `witness` to the data store. :param witness: witness to get n-grams from :type witness: `WitnessText` :param minimum: minimum n-gram size :type minimum: `int` :param maximum: maximum n-gram size :type maximum: `int`
383,359
def _get_motor_parameters(json_file): with open(json_file) as motor_fd: global_config = json.load(motor_fd) motors = global_config["motors"] motor_config = {} for motor in motors: motor_config[motor] = motors[motor] return motor_config
Returns a dictionary with joints as keys, and a description (dict) of each joint as value
383,360
def has_nrows( state, incorrect_msg="Your query returned a table with {{n_stu}} row{{ if n_stu > 1 else }} while it should return a table with {{n_sol}} row{{ if n_sol > 1 else }}.", ): has_result(state) n_stu = len(next(iter(state.student_result.values()))) n_sol = len(next(iter(state.solution_result.values()))) if n_stu != n_sol: _msg = state.build_message( incorrect_msg, fmt_kwargs={"n_stu": n_stu, "n_sol": n_sol} ) state.do_test(_msg) return state
Test whether the student and solution query results have equal numbers of rows. Args: incorrect_msg: If specified, this overrides the automatically generated feedback message in case the number of rows in the student and solution query don't match.
383,361
def _hijacked_run_baton_query( self, baton_binary: BatonBinary, program_arguments: List[str]=None, input_data: Any=None) -> List[Dict]: if baton_binary == BatonBinary.BATON_CHMOD: current_frame = inspect.currentframe() def frame_code_in_same_file(frame) -> bool: return frame_back.f_code.co_filename == current_frame.f_code.co_filename frame_back = current_frame.f_back assert frame_code_in_same_file(frame_back) while frame_back is not None and frame_code_in_same_file(frame_back): if id(frame_back) in self._hijack_frame_ids: return self._original_run_baton_query(baton_binary, [BATON_CHMOD_RECURSIVE_FLAG], input_data) frame_back = frame_back.f_back return self._original_run_baton_query(baton_binary, program_arguments, input_data)
Hijacked `run_baton_query` method with hijacking to add the `--recursive` flag to calls to `baton-chmod` that originate from code called from frames with the ids in `self._hijack_frame_ids`. :param baton_binary: see `BatonRunner.run_baton_query` :param program_arguments: see `BatonRunner.run_baton_query` :param input_data: see `BatonRunner.run_baton_query` :return: see `BatonRunner.run_baton_query`
383,362
def _axis(self, axis): return self.df.columns if axis == 0 else self.df.index
Return the corresponding labels taking into account the axis. The axis could be horizontal (0) or vertical (1).
383,363
def dump(obj, attributes = True, _refset = None): "Show full value of a data object" if _refset is None: _refset = set() if obj is None: return None elif isinstance(obj, DataObject): if id(obj) in _refset: attributes = False else: _refset.add(id(obj)) cls = type(obj) clsname = getattr(cls, , ) + + getattr(cls, , ) baseresult = {: clsname, : obj.getkey()} if not attributes: return baseresult else: baseresult.update((k,dump(v, attributes, _refset)) for k,v in vars(obj).items() if k[:1] != ) _refset.remove(id(obj)) return baseresult elif isinstance(obj, ReferenceObject): if obj._ref is not None: return dump(obj._ref, attributes, _refset) else: return {:obj.getkey()} elif isinstance(obj, WeakReferenceObject): return {:obj.getkey()} elif isinstance(obj, DataObjectSet): return dump(list(obj.dataset())) elif isinstance(obj, dict): return dict((k, dump(v, attributes, _refset)) for k,v in obj.items()) elif isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, set): return [dump(v, attributes, _refset) for v in obj] else: return obj
Show full value of a data object
383,364
def expr_str(expr, sc_expr_str_fn=standard_sc_expr_str): if expr.__class__ is not tuple: return sc_expr_str_fn(expr) if expr[0] is AND: return "{} && {}".format(_parenthesize(expr[1], OR, sc_expr_str_fn), _parenthesize(expr[2], OR, sc_expr_str_fn)) if expr[0] is OR: return "{} || {}".format(_parenthesize(expr[1], AND, sc_expr_str_fn), _parenthesize(expr[2], AND, sc_expr_str_fn)) if expr[0] is NOT: if expr[1].__class__ is tuple: return "!({})".format(expr_str(expr[1], sc_expr_str_fn)) return "!" + sc_expr_str_fn(expr[1]) return "{} {} {}".format(sc_expr_str_fn(expr[1]), _REL_TO_STR[expr[0]], sc_expr_str_fn(expr[2]))
Returns the string representation of the expression 'expr', as in a Kconfig file. Passing subexpressions of expressions to this function works as expected. sc_expr_str_fn (default: standard_sc_expr_str): This function is called for every symbol/choice (hence "sc") appearing in the expression, with the symbol/choice as the argument. It is expected to return a string to be used for the symbol/choice. This can be used e.g. to turn symbols/choices into links when generating documentation, or for printing the value of each symbol/choice after it. Note that quoted values are represented as constants symbols (Symbol.is_constant == True).
383,365
def pid_exists(pid): try: os.kill(pid, 0) except OSError as exc: return exc.errno == errno.EPERM else: return True
Determines if a system process identifer exists in process table.
383,366
def sequence_to_graph(G, seq, color=): for x in seq: if x.endswith("_1"): G.node(x, color=color, width="0.1", shape="circle", label="") else: G.node(x, color=color) for a, b in pairwise(seq): G.edge(a, b, color=color)
Automatically construct graph given a sequence of characters.
383,367
def is_left(point0, point1, point2): return ((point1[0] - point0[0]) * (point2[1] - point0[1])) - ((point2[0] - point0[0]) * (point1[1] - point0[1]))
Tests if a point is Left|On|Right of an infinite line. Ported from the C++ version: on http://geomalgorithms.com/a03-_inclusion.html .. note:: This implementation only works in 2-dimensional space. :param point0: Point P0 :param point1: Point P1 :param point2: Point P2 :return: >0 for P2 left of the line through P0 and P1 =0 for P2 on the line <0 for P2 right of the line
383,368
def arg_tup_to_dict(argument_tuples): data = dict() for arg_name, arg_val in argument_tuples: if arg_val is not None: if arg_val is True: arg_val = elif arg_val is False: arg_val = data[arg_name] = arg_val return data
Given a set of argument tuples, set their value in a data dictionary if not blank
383,369
def remove(src, rel, dst): smt = % rel queries = [] params = [] if src is not None: queries.append() params.append(src) if dst is not None: queries.append() params.append(dst) if not queries: return smt, params smt = % (smt, .join(queries)) return smt, params
Returns an SQL statement that removes edges from the SQL backing store. Either `src` or `dst` may be specified, even both. :param src: The source node. :param rel: The relation. :param dst: The destination node.
383,370
def ipoib_interfaces(): interfaces = [] for interface in network_interfaces(): try: driver = re.search(, subprocess.check_output([ , , interface]), re.M).group(1) if driver in IPOIB_DRIVERS: interfaces.append(interface) except Exception: log("Skipping interface %s" % interface, level=INFO) continue return interfaces
Return a list of IPOIB capable ethernet interfaces
383,371
def extract_cookies(self, response, request, referrer_host=None): new_response = HTTPResponseInfoWrapper(response) new_request = convert_http_request(request, referrer_host) self._cookie_jar.extract_cookies(new_response, new_request)
Wrapped ``extract_cookies``. Args: response: An instance of :class:`.http.request.Response`. request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL.
383,372
def print_tree( expr, attr=, padding=, exclude_type=None, depth=None, unicode=True, srepr_leaves=False, _last=False, _root=True, _level=0, _print=True): from qnet.printing import srepr lines = [] if unicode: draw = {: , : , : } sub_render = _shorten_render_unicode() else: draw = {: , : , : } sub_render = _shorten_render_ascii() to_str = lambda expr: render_head_repr( expr, sub_render=sub_render, key_sub_render=sub_render) if _root: lines.append(". " + to_str(expr)) else: if _last: lines.append(padding[:-1] + draw[] + to_str(expr)) else: lines.append(padding[:-1] + draw[] + to_str(expr)) padding = padding + try: children = getattr(expr, attr) except AttributeError: children = [] if exclude_type is not None: if isinstance(expr, exclude_type): children = [] if depth is not None: if depth <= _level: children = [] for count, child in enumerate(children): if hasattr(child, attr): if count == len(children)-1: lines += print_tree( child, attr, padding + , exclude_type=exclude_type, depth=depth, unicode=unicode, srepr_leaves=srepr_leaves, _last=True, _root=False, _level=_level+1) else: lines += print_tree( child, attr, padding + draw[], exclude_type=exclude_type, depth=depth, unicode=unicode, srepr_leaves=srepr_leaves, _last=False, _root=False, _level=_level+1) else: if count == len(children)-1: if srepr_leaves: lines.append(padding + draw[] + srepr(child)) else: lines.append(padding + draw[] + to_str(child)) else: if srepr_leaves: lines.append(padding + draw[] + srepr(child)) else: lines.append(padding + draw[] + to_str(child)) if _root: if _print: print("\n".join(lines)) else: return lines else: return lines
Print a tree representation of the structure of `expr` Args: expr (Expression): expression to render attr (str): The attribute from which to get the children of `expr` padding (str): Whitespace by which the entire tree is idented exclude_type (type): Type (or list of types) which should never be expanded recursively depth (int or None): Maximum depth of the tree to be printed unicode (bool): If True, use unicode line-drawing symbols for the tree, and print expressions in a unicode representation. If False, use an ASCII approximation. srepr_leaves (bool): Whether or not to render leaves with `srepr`, instead of `ascii`/`unicode` See also: :func:`tree` return the result as a string, instead of printing it
383,373
def deploy_directory(directory, auth=None): for file in os.listdir(directory): full_path = os.path.join(directory, file) if file.endswith(BELANNO_EXTENSION): name = file[:-len(BELANNO_EXTENSION)] log.info(, full_path) deploy_annotation(full_path, name, auth=auth) elif file.endswith(BELNS_EXTENSION): name = file[:-len(BELNS_EXTENSION)] log.info(, full_path) deploy_namespace(full_path, name, auth=auth) elif file.endswith(BEL_EXTENSION): name = file[:-len(BEL_EXTENSION)] log.info(, full_path) deploy_knowledge(full_path, name, auth=auth) else: log.debug(, full_path)
Deploy all files in a given directory. :param str directory: the path to a directory :param tuple[str] auth: A pair of (str username, str password) to give to the auth keyword of the constructor of :class:`artifactory.ArtifactoryPath`. Defaults to the result of :func:`get_arty_auth`.
383,374
def get_current_cmus(): result = subprocess.run(.split(), check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) info = {} for line in result.stdout.decode().split(): line = line.split() if line[0] != : continue key = line[1] if key in [, , , ] and\ key not in info: info[key] = .join(line[2:]) if in info: info[] = info[] del info[] return Song(**info)
Get the current song from cmus.
383,375
def get_frame_locals(stepback=0): with Frame(stepback=stepback) as frame: locals_dict = frame.f_locals return locals_dict
Returns locals dictionary from a given frame. :param int stepback: :rtype: dict
383,376
def getEdgeDirected(self, networkId, edgeId, verbose=None): response=api(url=self.___url++str(networkId)++str(edgeId)+, method="GET", verbose=verbose, parse_params=False) return response
Returns true if the edge specified by the `edgeId` and `networkId` parameters is directed. :param networkId: SUID of the network containing the edge :param edgeId: SUID of the edge :param verbose: print more :returns: 200: successful operation
383,377
def volumes_from(self, value): volumes_from = [] if isinstance(value, list): for volume_from in value: if not isinstance(volume_from, six.string_types): raise TypeError("each bind must be a str. {0} was passed".format(volume_from)) volumes_from.append(self._convert_volume_from(volume_from)) elif isinstance(value, six.string_types): volumes_from.append(self._convert_volume_from(value)) elif value is None: pass else: raise ValueError( ) self._volumes_from = volumes_from
:param value: :return:
383,378
def overlay_gateway_map_vlan_vni_mapping_vid(self, **kwargs): config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop() map = ET.SubElement(overlay_gateway, "map") vlan_vni_mapping = ET.SubElement(map, "vlan-vni-mapping") vid = ET.SubElement(vlan_vni_mapping, "vid") vid.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
383,379
def render_fields(dictionary, *fields, **opts): *mtudescriptionInterface descriptionmtudescriptionInterface descriptionmtudescription results = [] for field in fields: res = render_field(dictionary, field, **opts) if res: results.append(res) if not in opts: opts[] = 0 if not in opts: opts[] = .format(ind=*opts[]) return opts[].join(results)
This function works similarly to :mod:`render_field <salt.modules.napalm_formula.render_field>` but for a list of fields from the same dictionary, rendering, indenting and distributing them on separate lines. dictionary The dictionary to traverse. fields A list of field names or paths in the dictionary. indent: ``0`` The indentation to use, prepended to the rendered field. separator: ``\\n`` The separator to use between fields. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description Jinja usage example: .. code-block:: jinja {%- set config={'mtu': 68, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }} The Jinja example above would generate the following configuration: .. code-block:: text mtu "68" description "Interface description"
383,380
def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None): return _FetchRemapping(message_type, , python_name=python_name, json_name=json_name, mappings=_JSON_FIELD_MAPPINGS)
Return the appropriate remapping for the given field, or None.
383,381
def invenio_query_factory(parser=None, walkers=None): parser = parser or Main walkers = walkers or [PypegConverter()] walkers.append(ElasticSearchDSL()) def invenio_query(pattern): query = pypeg2.parse(pattern, parser, whitespace="") for walker in walkers: query = query.accept(walker) return query return invenio_query
Create a parser returning Elastic Search DSL query instance.
383,382
def default_values_of(func): signature = inspect.signature(func) return [k for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty or v.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD]
Return the defaults of the function `func`.
383,383
def window(self, vec): slce = slice(None, None, None) self.__setitem__((slce, 0), self.__getitem__((slce, 0)) * vec) for m in xrange(1, self.mmax + 1): self.__setitem__((slce, -m), self.__getitem__((slce, -m)) * vec[m:]) self.__setitem__((slce, m), self.__getitem__((slce, m)) * vec[m:])
Apply a window to the coefficients defined by *vec*. *vec* must have length *nmax* + 1. This is good way to filter the pattern by windowing in the coefficient domain. Example:: >>> vec = numpy.linspace(0, 1, c.nmax + 1) >>> c.window(vec) Args: vec (numpy.array): Vector of values to apply in the n direction of the data. Has length *nmax* + 1. Returns: Nothing, applies the window to the data in place.
383,384
def capture_vm_image(self, service_name, deployment_name, role_name, options): _validate_not_none(, service_name) _validate_not_none(, deployment_name) _validate_not_none(, role_name) _validate_not_none(, options) _validate_not_none(, options.os_state) _validate_not_none(, options.vm_image_name) _validate_not_none(, options.vm_image_label) return self._perform_post( self._get_capture_vm_image_path(service_name, deployment_name, role_name), _XmlSerializer.capture_vm_image_to_xml(options), as_async=True)
Creates a copy of the operating system virtual hard disk (VHD) and all of the data VHDs that are associated with the Virtual Machine, saves the VHD copies in the same storage location as the original VHDs, and registers the copies as a VM Image in the image repository that is associated with the specified subscription. service_name: The name of the service. deployment_name: The name of the deployment. role_name: The name of the role. options: An instance of CaptureRoleAsVMImage class. options.os_state: Required. Specifies the state of the operating system in the image. Possible values are: Generalized, Specialized A Virtual Machine that is fully configured and running contains a Specialized operating system. A Virtual Machine on which the Sysprep command has been run with the generalize option contains a Generalized operating system. If you capture an image from a generalized Virtual Machine, the machine is deleted after the image is captured. It is recommended that all Virtual Machines are shut down before capturing an image. options.vm_image_name: Required. Specifies the name of the VM Image. options.vm_image_label: Required. Specifies the label of the VM Image. options.description: Optional. Specifies the description of the VM Image. options.language: Optional. Specifies the language of the VM Image. options.image_family: Optional. Specifies a value that can be used to group VM Images. options.recommended_vm_size: Optional. Specifies the size to use for the Virtual Machine that is created from the VM Image.
383,385
def cancel(**kwargs): task_list = _query(**kwargs) for task in task_list: task.status = WorkQueue.CANCELED task.finished = datetime.datetime.utcnow() db.session.add(task) return len(task_list)
Cancels work items based on their criteria. Args: **kwargs: Same parameters as the query() method. Returns: The number of tasks that were canceled.
383,386
def build_mutation_pruner_plugin() -> LaserPlugin: from mythril.laser.ethereum.plugins.implementations.mutation_pruner import ( MutationPruner, ) return MutationPruner()
Creates an instance of the mutation pruner plugin
383,387
def read_random_state(self, group=None): group = self.sampler_group if group is None else group dataset_name = "/".join([group, "random_state"]) arr = self[dataset_name][:] s = self[dataset_name].attrs["s"] pos = self[dataset_name].attrs["pos"] has_gauss = self[dataset_name].attrs["has_gauss"] cached_gauss = self[dataset_name].attrs["cached_gauss"] return s, arr, pos, has_gauss, cached_gauss
Reads the state of the random number generator from the file. Parameters ---------- group : str Name of group to read random state from. Returns ------- tuple A tuple with 5 elements that can be passed to numpy.set_state.
383,388
def compare_outputs(expected, output, **kwargs): SkipDim1 = kwargs.pop("SkipDim1", False) NoProb = kwargs.pop("NoProb", False) Dec4 = kwargs.pop("Dec4", False) Dec3 = kwargs.pop("Dec3", False) Dec2 = kwargs.pop("Dec2", False) Disc = kwargs.pop("Disc", False) Mism = kwargs.pop("Mism", False) Opp = kwargs.pop("Opp", False) if Opp and not NoProb: raise ValueError("Opp is only available if NoProb is True") if Dec4: kwargs["decimal"] = min(kwargs["decimal"], 4) if Dec3: kwargs["decimal"] = min(kwargs["decimal"], 3) if Dec2: kwargs["decimal"] = min(kwargs["decimal"], 2) if isinstance(expected, numpy.ndarray) and isinstance(output, numpy.ndarray): if SkipDim1: expected = expected.reshape(tuple([d for d in expected.shape if d > 1])) output = output.reshape(tuple([d for d in expected.shape if d > 1])) if NoProb: if len(output.shape) == 2 and output.shape[1] == 2 and len(expected.shape) == 1: output = output[:, 1] elif len(output.shape) == 1 and len(expected.shape) == 1: pass elif len(expected.shape) == 1 and len(output.shape) == 2 and \ expected.shape[0] == output.shape[0] and output.shape[1] == 1: output = output[:, 0] elif expected.shape != output.shape: raise NotImplementedError("No good shape: {0} != {1}".format(expected.shape, output.shape)) if Opp: output = -output if len(expected.shape) == 1 and len(output.shape) == 2 and output.shape[1] == 1: output = output.ravel() if len(expected.shape) == 2 and len(output.shape) == 1 and expected.shape[1] == 1: expected = expected.ravel() if not numpy.issubdtype(expected.dtype, numpy.number): try: assert_array_equal(expected, output) except Exception as e: if Disc: return ExpectedAssertionError(str(e)) else: return OnnxRuntimeAssertionError(str(e)) else: try: assert_array_almost_equal(expected, output, **kwargs) except Exception as e: expected_ = expected.ravel() output_ = output.ravel() if len(expected_) == len(output_): diff = numpy.abs(expected_ - output_).max() elif Mism: return ExpectedAssertionError("dimension mismatch={0}, {1}\n{2}".format(expected.shape, output.shape, e)) else: return OnnxRuntimeAssertionError("dimension mismatch={0}, {1}\n{2}".format(expected.shape, output.shape, e)) if Disc: return ExpectedAssertionError("max diff(expected, output)={0}\n{1}".format(diff, e)) else: return OnnxRuntimeAssertionError("max diff(expected, output)={0}\n{1}".format(diff, e)) else: return OnnxRuntimeAssertionError("Unexpected types {0} != {1}".format(type(expected), type(output))) return None
Compares expected values and output. Returns None if no error, an exception message otherwise.
383,389
def load_file(self, file_path, share_name, directory_name, file_name, **kwargs): self.connection.create_file_from_path(share_name, directory_name, file_name, file_path, **kwargs)
Upload a file to Azure File Share. :param file_path: Path to the file to load. :type file_path: str :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param kwargs: Optional keyword arguments that `FileService.create_file_from_path()` takes. :type kwargs: object
383,390
def _dispatch(name, *args, **kwargs): def outer(self, *args, **kwargs): def f(x): x = self._shallow_copy(x, groupby=self._groupby) return getattr(x, name)(*args, **kwargs) return self._groupby.apply(f) outer.__name__ = name return outer
Dispatch to apply.
383,391
def addChild(self, child_id): self.log.debug("Try to add a child <Workitem %s> to current " "<Workitem %s>", child_id, self) self._addChildren([child_id]) self.log.info("Successfully add a child <Workitem %s> to current " "<Workitem %s>", child_id, self)
Add a child to current workitem :param child_id: the child workitem id/number (integer or equivalent string)
383,392
def institute(context, institute_id, sanger_recipient, coverage_cutoff, frequency_cutoff, display_name, remove_sanger): adapter = context.obj[] LOG.info("Running scout update institute") try: adapter.update_institute( internal_id=institute_id, sanger_recipient=sanger_recipient, coverage_cutoff=coverage_cutoff, frequency_cutoff=frequency_cutoff, display_name=display_name, remove_sanger=remove_sanger, ) except Exception as err: LOG.warning(err) context.abort()
Update an institute
383,393
def removeDuplicates(inFileName, outFileName) : f = open(inFileName) legend = f.readline() data = h = {} h[legend] = 0 lines = f.readlines() for l in lines : if not h.has_key(l) : h[l] = 0 data += l f.flush() f.close() f = open(outFileName, ) f.write(legend+data) f.flush() f.close()
removes duplicated lines from a 'inFileName' CSV file, the results are witten in 'outFileName
383,394
def to_comm(self, light_request=False): data = None if not light_request: tmp_fn = path_to_zip(self.dir_pointer) data = read_as_base64(tmp_fn) os.unlink(tmp_fn) return Archive( isbn=self.isbn, uuid=self.uuid, aleph_id=self.aleph_id, b64_data=data, dir_pointer=self.dir_pointer, )
Convert `self` to :class:`.Archive`. Returns: obj: :class:`.Archive` instance.
383,395
def get_roles(self, principal, object=None, no_group_roles=False): assert principal if hasattr(principal, "is_anonymous") and principal.is_anonymous: return [AnonymousRole] query = db.session.query(RoleAssignment.role) if isinstance(principal, Group): filter_principal = RoleAssignment.group == principal else: filter_principal = RoleAssignment.user == principal if not no_group_roles: groups = [g.id for g in principal.groups] if groups: filter_principal |= RoleAssignment.group_id.in_(groups) query = query.filter(filter_principal) if object is not None: assert isinstance(object, Entity) query = query.filter(RoleAssignment.object == object) roles = {i[0] for i in query.all()} if object is not None: for attr, role in (("creator", Creator), ("owner", Owner)): if getattr(object, attr) == principal: roles.add(role) return list(roles)
Get all the roles attached to given `principal`, on a given `object`. :param principal: a :class:`User` or :class:`Group` :param object: an :class:`Entity` :param no_group_roles: If `True`, return only direct roles, not roles acquired through group membership.
383,396
def default(self, obj): obj_type = type(obj) if obj_type not in self._ndb_type_encoding: if hasattr(obj, ): obj_type = obj.__metaclass__ else: for ndb_type in NDB_TYPES: if isinstance(obj, ndb_type): obj_type = ndb_type break fn = self._ndb_type_encoding.get(obj_type) if fn: return fn(obj) return json.JSONEncoder.default(self, obj)
Overriding the default JSONEncoder.default for NDB support.
383,397
def convert_date(value, parameter): - value = _check_default(value, parameter, ( , , None )) if value is None or isinstance(value, datetime.date): return value for fmt in settings.DATE_INPUT_FORMATS: try: return datetime.datetime.strptime(value, fmt).date() except (ValueError, TypeError): continue raise ValueError("`{}` does not match a format in settings.DATE_INPUT_FORMATS".format(value))
Converts to datetime.date: '', '-', None convert to parameter default The first matching format in settings.DATE_INPUT_FORMATS converts to datetime
383,398
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(Digest, self).read(istream, kmip_version=kmip_version) tstream = BytearrayStream(istream.read(self.length)) self.hashing_algorithm.read(tstream, kmip_version=kmip_version) self.digest_value.read(tstream, kmip_version=kmip_version) self.key_format_type.read(tstream, kmip_version=kmip_version) self.is_oversized(tstream) self.validate()
Read the data encoding the Digest object and decode it into its constituent parts. Args: istream (Stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
383,399
def save_modules(): saved = sys.modules.copy() with ExceptionSaver() as saved_exc: yield saved sys.modules.update(saved) del_modules = ( mod_name for mod_name in sys.modules if mod_name not in saved and not mod_name.startswith() ) _clear_modules(del_modules) saved_exc.resume()
Context in which imported modules are saved. Translates exceptions internal to the context into the equivalent exception outside the context.