Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
1,400
def forgot_password(): form_class = _security.forgot_password_form if request.is_json: form = form_class(MultiDict(request.get_json())) else: form = form_class() if form.validate_on_submit(): send_reset_password_instructions(form.user) if not request.is_json: do_flash(*get_message(, email=form.user.email)) if request.is_json: return _render_json(form, include_user=False) return _security.render_template(config_value(), forgot_password_form=form, **_ctx())
View function that handles a forgotten password request.
1,401
def get_method_documentation(method): from inspect import getargspec result = { : method.__name__, : .join([name.capitalize() for name in method.__name__.split()]), } arg_specs = getargspec(method) arguments = {} if not arg_specs.defaults: if len(arg_specs.args[1:]) > 0: arguments[] = list(arg_specs.args[1:]) else: if len(arg_specs.args[1:-(len(arg_specs.defaults))]): arguments[] = list(arg_specs.args[1:-(len(arg_specs.defaults))]) arguments[] = {} for i in range(len(arg_specs.defaults)): arguments[][arg_specs.args[-(len(arg_specs.defaults)) + i]] = arg_specs.defaults[i] if arguments != {}: result[] = arguments doc = method.__doc__.strip() if method.__doc__ else if in method.__doc__: doc = {: method.__doc__[0:doc.find()].strip()} params = re.findall(r":param ([^\s]*): (.*)\n", method.__doc__) if len(params) > 0: doc[] = {} for param in params: doc[][param[0]] = param[1].strip() regex = re.compile(r":returns:(.*)", re.MULTILINE | re.DOTALL) returns = regex.search(method.__doc__) if returns and returns.group(0): doc[] = returns.group(0).replace(, ).replace(, ).strip() if doc != : result[] = doc return result
This function uses "inspect" to retrieve information about a method. Also, if you place comment on the method, method can be docummented with "reStructured Text". :param method: method to describe :returns: { 'name' : <string> - name of the method, 'friendly_name' : <string> - friendly name of the method, 'parameters' : { 'required' : [ 'param1', 'param2' ], 'optionnal' : { 'param3' : 'default_value3', 'param4' : 'default_value4', }, 'help' : { 'summary' : <string> - Summary - general description like in the comment, 'parameters' : { 'param1' : 'description', 'param2' : 'description', }, 'return' : <string> - Can be multiline, } }
1,402
def add_state_editor(self, state_m): state_identifier = self.get_state_identifier(state_m) if state_identifier in self.closed_tabs: state_editor_ctrl = self.closed_tabs[state_identifier][] state_editor_view = state_editor_ctrl.view handler_id = self.closed_tabs[state_identifier][] source_code_view_is_dirty = self.closed_tabs[state_identifier][] del self.closed_tabs[state_identifier] else: state_editor_view = StateEditorView() if isinstance(state_m, LibraryStateModel): state_editor_view[].set_current_page( state_editor_view[].page_num(state_editor_view.page_dict["Data Linkage"])) state_editor_ctrl = StateEditorController(state_m, state_editor_view) self.add_controller(state_identifier, state_editor_ctrl) if state_editor_ctrl.get_controller() and state_m.state.get_next_upper_library_root_state() is None: handler_id = state_editor_view.source_view.get_buffer().connect(, self.script_text_changed, state_m) self.view.get_top_widget().connect(, state_editor_view.source_view.on_draw) else: handler_id = None source_code_view_is_dirty = False (tab, inner_label, sticky_button) = create_tab_header(, self.on_tab_close_clicked, self.on_toggle_sticky_clicked, state_m) set_tab_label_texts(inner_label, state_m, source_code_view_is_dirty) state_editor_view.get_top_widget().title_label = inner_label state_editor_view.get_top_widget().sticky_button = sticky_button page_content = state_editor_view.get_top_widget() page_id = self.view.notebook.prepend_page(page_content, tab) page = self.view.notebook.get_nth_page(page_id) self.view.notebook.set_tab_reorderable(page, True) page.show_all() self.view.notebook.show() self.tabs[state_identifier] = {: page, : state_m, : state_editor_ctrl, : self.model.selected_state_machine_id, : False, : source_code_view_is_dirty, : handler_id} return page_id
Triggered whenever a state is selected. :param state_m: The selected state model.
1,403
def register_converter(operator_name, conversion_function, overwrite=False): if not overwrite and operator_name in _converter_pool: raise ValueError() _converter_pool[operator_name] = conversion_function
:param operator_name: A unique operator ID. It is usually a string but you can use a type as well :param conversion_function: A callable object :param overwrite: By default, we raise an exception if the caller of this function is trying to assign an existing key (i.e., operator_name) a new value (i.e., conversion_function). Set this flag to True to enable overwriting.
1,404
def get_all_apps(): LOG.info() url = .format(API_URL) response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) assert response.ok, pipelines = response.json() LOG.debug(, pipelines) return pipelines
Get a list of all applications in Spinnaker. Returns: requests.models.Response: Response from Gate containing list of all apps.
1,405
def filterAcceptsRow(self, row, parentindex): if not super(ReftrackSortFilterModel, self).filterAcceptsRow(row, parentindex): return False if parentindex.isValid(): m = parentindex.model() else: m = self.sourceModel() i = m.index(row, 18, parentindex) reftrack = i.data(REFTRACK_OBJECT_ROLE) if not reftrack: return True else: return self.filter_accept_reftrack(reftrack)
Return True, if the filter accepts the given row of the parent :param row: the row to filter :type row: :class:`int` :param parentindex: the parent index :type parentindex: :class:`QtCore.QModelIndex` :returns: True, if the filter accepts the row :rtype: :class:`bool` :raises: None
1,406
def getmlsthelper(referencefilepath, start, organism, update): from accessoryFunctions.accessoryFunctions import GenObject organismset = set() organism = organism if organism != else organismdictionary = {: , : , : , : , : , : , : } try: organismset.add(organismdictionary[organism]) except KeyError: organismset.add(organism) for scheme in organismset: organismpath = os.path.join(referencefilepath, , organism) try: lastfolder = sorted(glob(.format(organismpath)))[-1].rstrip() except IndexError: lastfolder = [] delta, foldersize, d1 = schemedate(lastfolder) newfolder = .format(organismpath, d1) if update: if delta.days > 7 or foldersize < 100: printtime(.format(organism), start) getmlstargs = GenObject() getmlstargs.species = scheme getmlstargs.repository_url = getmlstargs.force_scheme_name = False getmlstargs.path = newfolder make_path(getmlstargs.path) getmlst.main(getmlstargs) try: profilestart = open(glob(.format(newfolder))[0]).readline() except IndexError: profilestart = [] if not profilestart or profilestart[0] == : shutil.rmtree(newfolder) newfolder = lastfolder else: newfolder = lastfolder else: newfolder = lastfolder try: newfoldersize = sum(os.path.getsize(.format(newfolder, f)) for f in os.listdir(newfolder) if os.path.isfile(.format(newfolder, f))) except (OSError, TypeError): newfoldersize = 100 if newfoldersize < 100: shutil.rmtree(newfolder) try: newfolder = sorted(glob(.format(organismpath)))[-1].rstrip() except IndexError: newfolder = organismpath return newfolder
Prepares to run the getmlst.py script provided in SRST2
1,407
def neighsol(addr, src, iface, timeout=1, chainCC=0): nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr)) d = inet_ntop(socket.AF_INET6, nsma) dm = in6_getnsmac(nsma) p = Ether(dst=dm) / IPv6(dst=d, src=src, hlim=255) p /= ICMPv6ND_NS(tgt=addr) p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface)) res = srp1(p, type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0, chainCC=chainCC) return res
Sends and receive an ICMPv6 Neighbor Solicitation message This function sends an ICMPv6 Neighbor Solicitation message to get the MAC address of the neighbor with specified IPv6 address address. 'src' address is used as source of the message. Message is sent on iface. By default, timeout waiting for an answer is 1 second. If no answer is gathered, None is returned. Else, the answer is returned (ethernet frame).
1,408
def load_yaml_config(self, conf): with open(conf) as fd: self.config = recursive_dict_update(self.config, yaml.load(fd, Loader=UnsafeLoader))
Load a YAML configuration file and recursively update the overall configuration.
1,409
def list_policies(self, filters=None): _, policy_list = self.handler.streamed_request("list-policies", "list-policy", filters) return policy_list
Retrieve installed trap, drop and bypass policies. :param filters: retrieve only matching policies (optional) :type filters: dict :return: list of installed trap, drop and bypass policies :rtype: list
1,410
def add_extension_attribute(self, ext_name, key, value): attributes = self.extension_attributes.pop(ext_name, {}) attributes[key] = value self.extension_attributes[ext_name] = attributes
Banana banana
1,411
def watcher(self) -> Watcher: if not hasattr(self, "_watcher"): self._watcher = Watcher() return self._watcher
Gives an access to action's watcher. :return: Action's watcher instance.
1,412
def GetResources(filename, types=None, names=None, languages=None): hsrc = win32api.LoadLibraryEx(filename, 0, LOAD_LIBRARY_AS_DATAFILE) res = _GetResources(hsrc, types, names, languages) win32api.FreeLibrary(hsrc) return res
Get resources from dll/exe file. types = a list of resource types to search for (None = all) names = a list of resource names to search for (None = all) languages = a list of resource languages to search for (None = all) Return a dict of the form {type_: {name: {language: data}}} which might also be empty if no matching resources were found.
1,413
def gen_anytext(*args): bag = [] for term in args: if term is not None: if isinstance(term, list): for term2 in term: if term2 is not None: bag.append(term2) else: bag.append(term) return .join(bag)
Convenience function to create bag of words for anytext property
1,414
def write(self, values): filtered = {_id: value for _id, value in values.items() if _id in self._document_ids} if not filtered: return bulk = self.get_collection().initialize_ordered_bulk_op() for _id, value in filtered.items(): bulk.find({: _id}).upsert() \ .update_one({: {self._field: value}}) bulk.execute()
Write values to the targeted documents Values need to be a dict as : {document_id: value}
1,415
def _parse_values(self): data = [] if self.has_tabs: def _parse_tab_text(tab): if tab.select_one(".visible_normal"): return tab.select_one(".visible_normal").text else: return tab.text sub_table_ids = [_parse_tab_text(x) for x in self.soup.select(".table_switch li")] sub_tables = self.soup.select(".dataTables_wrapper") assert len(sub_tables) == len(sub_table_ids) assert len(sub_tables) > 0 for measure, table in zip(sub_table_ids, sub_tables): if self.has_horizontal_scroll: _data = self._parse_horizontal_scroll_table(table) for region, col, value in _data: data.append({ "region_or_unit": region, "select_period": col, "measure": measure, }) else: if self.has_horizontal_scroll: raise NotImplementedError() if self.has_vertical_scroll: table = self.soup.select_one(" _data = self._parse_vertical_scroll_table(table) else: table = self.soup.select(".chart.table.scrolling")[-1] _data = self._parse_regular_table(table) for region, measure, value in _data: data.append({ "region_or_unit": region, "measure": measure, "value": value }) return data
Get values
1,416
def complete_modules(text): import MAVProxy.modules, pkgutil modlist = [x[1] for x in pkgutil.iter_modules(MAVProxy.modules.__path__)] ret = [] loaded = set(complete_loadedmodules()) for m in modlist: if not m.startswith("mavproxy_"): continue name = m[9:] if not name in loaded: ret.append(name) return ret
complete mavproxy module names
1,417
def create_sequence_sites(chain, seq_site_length): seq_ids = sorted(list(chain.keys()), key=int) slices = [itertools.islice(seq_ids, i, None) for i in range(seq_site_length)] seq_site_ids = list(zip(*slices)) sequence_sites = [] for seq_site_id in seq_site_ids: seq_site = plsimulator.SequenceSite(chain[seq_id] for seq_id in seq_site_id) if seq_site.is_sequential(): sequence_sites.append(seq_site) else: continue return sequence_sites
Create sequence sites using sequence ids. :param dict chain: Chain object that contains chemical shift values and assignment information. :param int seq_site_length: Length of a single sequence site. :return: List of sequence sites. :rtype: :py:class:`list`
1,418
def from_dict(input_dict, data=None): import copy input_dict = copy.deepcopy(input_dict) model_class = input_dict.pop() input_dict["name"] = str(input_dict["name"]) import GPy model_class = eval(model_class) return model_class._build_from_input_dict(input_dict, data)
Instantiate an object of a derived class using the information in input_dict (built by the to_dict method of the derived class). More specifically, after reading the derived class from input_dict, it calls the method _build_from_input_dict of the derived class. Note: This method should not be overrided in the derived class. In case it is needed, please override _build_from_input_dict instate. :param dict input_dict: Dictionary with all the information needed to instantiate the object.
1,419
def sha256_fingerprint_from_raw_ssh_pub_key(raw_key): digest = hashlib.sha256(raw_key).digest() h = base64.b64encode(digest).decode() h = h.rstrip().rstrip() return + h
Encode a raw SSH key (string of bytes, as from `str(paramiko.AgentKey)`) to a fingerprint in the SHA256 form: SHA256:j2WoSeOWhFy69BQ39fuafFAySp9qCZTSCEyT2vRKcL+s
1,420
def to_json(model, sort=False, **kwargs): obj = model_to_dict(model, sort=sort) obj[u"version"] = JSON_SPEC return json.dumps(obj, allow_nan=False, **kwargs)
Return the model as a JSON document. ``kwargs`` are passed on to ``json.dumps``. Parameters ---------- model : cobra.Model The cobra model to represent. sort : bool, optional Whether to sort the metabolites, reactions, and genes or maintain the order defined in the model. Returns ------- str String representation of the cobra model as a JSON document. See Also -------- save_json_model : Write directly to a file. json.dumps : Base function.
1,421
def update(self, index, id, doc_type="_doc", body=None, params=None): for param in (index, id): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "POST", _make_path(index, doc_type, id, "_update"), params=params, body=body )
Update a document based on a script or partial data provided. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html>`_ :arg index: The name of the index :arg id: Document ID :arg body: The request definition using either `script` or partial `doc` :arg _source: True or false to return the _source field or not, or a list of fields to return :arg _source_exclude: A list of fields to exclude from the returned _source field :arg _source_include: A list of fields to extract and return from the _source field :arg fields: A comma-separated list of fields to return in the response :arg if_seq_no: :arg if_primary_term: :arg lang: The script language (default: painless) :arg parent: ID of the parent document. Is is only used for routing and when for the upsert request :arg refresh: If `true` then refresh the effected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes., valid choices are: 'true', 'false', 'wait_forarg retry_on_conflict: Specify how many times should the operation be retried when a conflict occurs (default: 0) :arg routing: Specific routing value :arg timeout: Explicit operation timeout :arg timestamp: Explicit timestamp for the document :arg ttl: Expiration time for the document :arg version: Explicit version number for concurrency control :arg version_type: Specific version type, valid choices are: 'internal', 'force' :arg wait_for_active_shards: Sets the number of shard copies that must be active before proceeding with the update operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)
1,422
def get_django_user(self, username, password=None): try: user = User.objects.get(username=username) except User.DoesNotExist: user = User(username=username) if password is not None: user.set_password(password) user.save() return user
Get the Django user with the given username, or create one if it doesn't already exist. If `password` is given, then set the user's password to that (regardless of whether the user was created or not).
1,423
def increase_volume(percentage): if percentage > 100 or percentage < 0: raise ValueError() if system.get_name() == : pass elif system.get_name() == : volume_int = percentage / 10 old_volume = get() new_volume = old_volume + volume_int if new_volume > 10: new_volume = 10 set_volume(new_volume * 10) else: formatted = % percentage sp.Popen([, , , , formatted]).wait()
Increase the volume. Increase the volume by a given percentage. Args: percentage (int): The percentage (as an integer between 0 and 100) to increase the volume by. Raises: ValueError: if the percentage is >100 or <0.
1,424
def __init_configsvrs(self, params): self._configsvrs = [] for cfg in params: cfg = self._strip_auth(cfg) server_id = cfg.pop(, None) version = cfg.pop(, self._version) cfg.update({: True}) if self.enable_ipv6: common.enable_ipv6_single(cfg) self._configsvrs.append(Servers().create( , cfg, sslParams=self.sslParams, autostart=True, version=version, server_id=server_id))
create and start config servers
1,425
def _file_write(path, content): with salt.utils.files.fopen(path, ) as fp_: fp_.write(salt.utils.stringutils.to_str(content)) fp_.close()
Write content to a file
1,426
def pager_fatality_rates(): theta = 13.249 beta = 0.151 mmi_range = list(range(2, 11)) fatality_rate = {mmi: 0 if mmi < 4 else log_normal_cdf( mmi, median=theta, sigma=beta) for mmi in mmi_range} return fatality_rate
USGS Pager fatality estimation model. Fatality rate(MMI) = cum. standard normal dist(1/BETA * ln(MMI/THETA)). Reference: Jaiswal, K. S., Wald, D. J., and Hearne, M. (2009a). Estimating casualties for large worldwide earthquakes using an empirical approach. U.S. Geological Survey Open-File Report 2009-1136. v1.0: Theta: 14.05, Beta: 0.17, Zeta 2.15 Jaiswal, K, and Wald, D (2010) An Empirical Model for Global Earthquake Fatality Estimation Earthquake Spectra, Volume 26, No. 4, pages 1017–1037 v2.0: Theta: 13.249, Beta: 0.151, Zeta: 1.641) (http://pubs.usgs.gov/of/2009/1136/pdf/ PAGER%20Implementation%20of%20Empirical%20model.xls) :returns: Fatality rate calculated as: lognorm.cdf(mmi, shape=Beta, scale=Theta) :rtype: dic
1,427
def _find_players(self, year): if not year: year = utils._find_year_for_season() url = self._create_url(year) page = self._pull_team_page(url) if not page: output = ("Cantable player_id = self._get_id(player) if self._slim: name = self._get_name(player) self._players[player_id] = name else: player_instance = Player(player_id) self._players.append(player_instance)
Find all player IDs for the requested team. For the requested team and year (if applicable), pull the roster table and parse the player ID for all players on the roster and create an instance of the Player class for the player. All player instances are added to the 'players' property to get all stats for all players on a team. Parameters ---------- year : string The 6-digit string representing the year to pull the team's roster from.
1,428
def tent_map(x, steps, mu=2): for _ in range(steps): x = mu * x if x < 0.5 else mu * (1 - x) yield x
Generates a time series of the tent map. Characteristics and Background: The name of the tent map is derived from the fact that the plot of x_i vs x_i+1 looks like a tent. For mu > 1 one application of the mapping function can be viewed as stretching the surface on which the value is located and then folding the area that is greater than one back towards the zero. This corresponds nicely to the definition of chaos as expansion in one dimension which is counteracted by a compression in another dimension. Calculating the Lyapunov exponent: The lyapunov exponent of the tent map can be easily calculated as due to this stretching behavior a small difference delta between two neighboring points will indeed grow exponentially by a factor of mu in each iteration. We thus can assume that: delta_n = delta_0 * mu^n We now only have to change the basis to e to obtain the exact formula that is used for the definition of the lyapunov exponent: delta_n = delta_0 * e^(ln(mu) * n) Therefore the lyapunov exponent of the tent map is: lambda = ln(mu) References: .. [tm_1] https://en.wikipedia.org/wiki/Tent_map Args: x (float): starting point steps (int): number of steps for which the generator should run Kwargs: mu (int): parameter mu that controls the behavior of the map Returns: generator object: the generator that creates the time series
1,429
def nexec(statement, globals=None, locals=None, **kwargs): try: import __builtin__ as builtins except ImportError: import builtins from ast import parse from napi.transformers import NapiTransformer from ast import fix_missing_locations as fml try: node = parse(statement, , ) except ImportError: exec(statement) else: if globals is None: globals = builtins.globals() if locals is None: locals = {} trans = NapiTransformer(globals=globals, locals=locals, **kwargs) trans.visit(node) code = compile(fml(node), , ) return builtins.eval(code, globals, locals)
Execute *statement* using *globals* and *locals* dictionaries as *global* and *local* namespace. *statement* is transformed using :class:`.NapiTransformer`.
1,430
def _draw_frame(self, framedata): original = self.read_frame() if original is None: self.update_info(self.info_string(message=, frame=framedata)) return if self.original is not None: processed = self.process_frame(original.copy()) if self.cmap_original is not None: original = to_gray(original) elif not is_color_image(original): self.original.set_cmap() self.original.set_data(original) else: processed = self.process_frame(original) if self.cmap_processed is not None: processed = to_gray(processed) elif not is_color_image(processed): self.processed.set_cmap() if self.annotations: self.annotate(framedata) self.processed.set_data(processed) self.update_info(self.info_string(frame=framedata))
Reads, processes and draws the frames. If needed for color maps, conversions to gray scale are performed. In case the images are no color images and no custom color maps are defined, the colormap `gray` is applied. This function is called by TimedAnimation. Args: framedata: The frame data.
1,431
def match_any_learning_objective(self, match): match_key = param = if match: flag = else: flag = if match_key in self._my_osid_query._query_terms: self._my_osid_query._query_terms[match_key][param] = flag else: self._my_osid_query._query_terms[match_key] = {param: flag} self._my_osid_query._query_terms[match_key][] = [[], []]
Matches an item with any objective. arg: match (boolean): ``true`` to match items with any learning objective, ``false`` to match items with no learning objectives *compliance: mandatory -- This method must be implemented.*
1,432
def set_poll_func(self, func, func_err_handler=None): if not func_err_handler: func_err_handler = traceback.print_exception self._pa_poll_cb = c.PA_POLL_FUNC_T(ft.partial(self._pulse_poll_cb, func, func_err_handler)) c.pa.mainloop_set_poll_func(self._loop, self._pa_poll_cb, None)
Can be used to integrate pulse client into existing eventloop. Function will be passed a list of pollfd structs and timeout value (seconds, float), which it is responsible to use and modify (set poll flags) accordingly, returning int value >= 0 with number of fds that had any new events within timeout. func_err_handler defaults to traceback.print_exception(), and will be called on any exceptions from callback (to e.g. log these), returning poll error code (-1) to libpulse after that.
1,433
def eeg_microstates_relabel(method, results, microstates_labels, reverse_microstates=None): microstates = list(method[]) for index, microstate in enumerate(method[]): if microstate in list(reverse_microstates.keys()): microstates[index] = reverse_microstates[microstate] method["data"][index] = -1*method["data"][index] if microstate in list(microstates_labels.keys()): microstates[index] = microstates_labels[microstate] method[] = np.array(microstates) return(results, method)
Relabel the microstates.
1,434
def _(s: Influence) -> bool: return is_grounded(s.subj) and is_grounded(s.obj)
Check if an Influence statement is grounded
1,435
def channel(self): if not self._channel: self._channel_ref = weakref.ref(self.connection.get_channel()) return self._channel
If no channel exists, a new one is requested.
1,436
def _reduce(self): for reduction, methname in self.reducers: token_num = len(reduction) if (len(self.tokens) >= token_num and self.tokens[-token_num:] == reduction): meth = getattr(self, methname) results = meth(*self.values[-token_num:]) self.tokens[-token_num:] = [r[0] for r in results] self.values[-token_num:] = [r[1] for r in results] return self._reduce()
Perform a greedy reduction of token stream. If a reducer method matches, it will be executed, then the :meth:`reduce` method will be called recursively to search for any more possible reductions.
1,437
def check(ctx): check_command = f"twine check {ctx.directory!s}/dist/*" report.info(ctx, "package.check", "checking package") ctx.run(check_command)
Check built package is valid.
1,438
def get_collections(self, data): collections = self._filter_queryset(, data.collection_set.all()) from .collection import CollectionSerializer class CollectionWithoutDataSerializer(WithoutDataSerializerMixin, CollectionSerializer): return self._serialize_items(CollectionWithoutDataSerializer, , collections)
Return serialized list of collection objects on data that user has `view` permission on.
1,439
def get_network_by_id(self, network_id: int) -> Network: return self.session.query(Network).get(network_id)
Get a network from the database by its identifier.
1,440
def set_active_vectors(self, name, preference=): _, field = get_scalar(self, name, preference=preference, info=True) if field == POINT_DATA_FIELD: self.GetPointData().SetActiveVectors(name) elif field == CELL_DATA_FIELD: self.GetCellData().SetActiveVectors(name) else: raise RuntimeError(.format(field)) self._active_vectors_info = [field, name]
Finds the vectors by name and appropriately sets it as active
1,441
def run(self): try: build_ext.build_ext.run(self) build_dir = os.path.abspath(self.build_lib) root_dir = os.path.abspath(os.path.join(__file__, "..")) target_dir = build_dir if not self.inplace else root_dir src_file = os.path.join("advanced_descriptors", "__init__.py") src = os.path.join(root_dir, src_file) dst = os.path.join(target_dir, src_file) if src != dst: shutil.copyfile(src, dst) except ( distutils.errors.DistutilsPlatformError, FileNotFoundError, ): raise BuildFailed()
Run. :raises BuildFailed: extension build failed and need to skip cython part.
1,442
def iam(self): iam = { : self.format[].format(**self.data), : self.format[].format(**self.data), : self.format[].format(**self.data), : self.format[].format(**self.data), : self.format[].format(**self.data), : self.format[].format(**self.data), : self.format[].format(**self.data), } return iam
Generate iam details.
1,443
def update_loadbalancer(self, lbaas_loadbalancer, body=None): return self.put(self.lbaas_loadbalancer_path % (lbaas_loadbalancer), body=body)
Updates a load balancer.
1,444
def get_default_config_help(self): config_help = super(MemoryLxcCollector, self).get_default_config_help() config_help.update({ "sys_path": "Defaults to ", }) return config_help
Return help text for collector configuration.
1,445
def from_json(self, resource_root, data): if data is None: return None if self._atype == datetime.datetime: return datetime.datetime.strptime(data, self.DATE_FMT) elif self._atype == ApiConfig: if not data[]: return { } first = data[][0] return json_to_config(data, len(first) == 2) elif self._is_api_list: return ApiList.from_json_dict(data, resource_root, self._atype) elif isinstance(data, list): return [ self.from_json(resource_root, x) for x in data ] elif hasattr(self._atype, ): return self._atype.from_json_dict(data, resource_root) else: return data
Parses the given JSON value into an appropriate python object. This means: - a datetime.datetime if 'atype' is datetime.datetime - a converted config dictionary or config list if 'atype' is ApiConfig - if the attr is an API list, an ApiList with instances of 'atype' - an instance of 'atype' if it has a 'from_json_dict' method - a python list with decoded versions of the member objects if the input is a python list. - the raw value otherwise
1,446
def getKeywordsForText(self, retina_name, body, ): resourcePath = method = queryParams = {} headerParams = {: , : } postData = None queryParams[] = retina_name postData = body response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams) return response.json()
Get a list of keywords from the text Args: retina_name, str: The retina name (required) body, str: The text to be evaluated (required) Returns: Array[str]
1,447
def pack_image(filename, max_size, form_field=): try: if os.path.getsize(filename) > (max_size * 1024): raise IdeaScalyError( % max_size) except os.error as e: raise IdeaScalyError( % e.strerror) fp = open(filename, ) file_type = mimetypes.guess_type(filename) if file_type is None: raise IdeaScalyError() file_type = file_type[0] if file_type not in [, , ]: raise IdeaScalyError( % file_type) if isinstance(filename, six.text_type): filename = filename.encode() BOUNDARY = b body = list() body.append(b + BOUNDARY) body.append( .format(form_field, filename) .encode()) body.append(.format(file_type).encode()) body.append(b) body.append(fp.read()) body.append(b + BOUNDARY + b) body.append(b) fp.close() body = b.join(body) body_length = str(len(body)) headers = { : .format(BOUNDARY), : body_length } return headers, body
Pack an image from file into multipart-formdata post body
1,448
def _convert_types(schema, col_type_dict, row): converted_row = [] for col_name, col_val in zip(schema, row): if type(col_val) in (datetime, date): col_val = time.mktime(col_val.timetuple()) elif isinstance(col_val, Decimal): col_val = float(col_val) elif col_type_dict.get(col_name) == "BYTES": col_val = base64.standard_b64encode(col_val).decode() else: col_val = col_val converted_row.append(col_val) return converted_row
Takes a value from MySQLdb, and converts it to a value that's safe for JSON/Google cloud storage/BigQuery. Dates are converted to UTC seconds. Decimals are converted to floats. Binary type fields are encoded with base64, as imported BYTES data must be base64-encoded according to Bigquery SQL date type documentation: https://cloud.google.com/bigquery/data-types
1,449
def plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes): "Plot signal channels" if len(sig_style) == 1: sig_style = n_sig * sig_style if time_units == : t = np.linspace(0, sig_len-1, sig_len) else: downsample_factor = {:fs, :fs * 60, :fs * 3600} t = np.linspace(0, sig_len-1, sig_len) / downsample_factor[time_units] if signal.ndim == 1: axes[0].plot(t, signal, sig_style[0], zorder=3) else: for ch in range(n_sig): axes[ch].plot(t, signal[:,ch], sig_style[ch], zorder=3)
Plot signal channels
1,450
def is_scalar(value): return np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0))
Test if the given value is a scalar. This function also works with memory mapped array values, in contrast to the numpy is_scalar method. Args: value: the value to test for being a scalar value Returns: boolean: if the given value is a scalar or not
1,451
def noisy_operation(self, operation: ) -> : if not hasattr(self.noisy_moments, ): return self.noisy_moments([ops.Moment([operation])], operation.qubits) if not hasattr(self.noisy_moment, ): return self.noisy_moment(ops.Moment([operation]), operation.qubits) assert False,
Adds noise to an individual operation. Args: operation: The operation to make noisy. Returns: An OP_TREE corresponding to the noisy operations implementing the noisy version of the given operation.
1,452
def do_check_artifact_cache(self, vts, post_process_cached_vts=None): if not vts: return [], [], [] read_cache = self._cache_factory.get_read_cache() items = [(read_cache, vt.cache_key, vt.current_results_dir if self.cache_target_dirs else None) for vt in vts] res = self.context.subproc_map(call_use_cached_files, items) cached_vts = [] uncached_vts = [] uncached_causes = [] for vt, was_in_cache in zip(vts, res): if was_in_cache: cached_vts.extend(vt.versioned_targets) else: uncached_vts.extend(vt.versioned_targets) uncached_causes.extend(repeat(was_in_cache, len(vt.versioned_targets))) if isinstance(was_in_cache, UnreadableArtifact): self._cache_key_errors.update(was_in_cache.key) if post_process_cached_vts: post_process_cached_vts(cached_vts) for vt in cached_vts: vt.update() return cached_vts, uncached_vts, uncached_causes
Checks the artifact cache for the specified list of VersionedTargetSets. Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were satisfied/unsatisfied from the cache.
1,453
def is_dict_like(obj): dict_like_attrs = ("__getitem__", "keys", "__contains__") return (all(hasattr(obj, attr) for attr in dict_like_attrs) and not isinstance(obj, type))
Check if the object is dict-like. Parameters ---------- obj : The object to check Returns ------- is_dict_like : bool Whether `obj` has dict-like properties. Examples -------- >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False >>> is_dict_like(dict) False >>> is_dict_like(dict()) True
1,454
def set_fixed_image(self, image): if not isinstance(image, iio.ANTsImage): raise ValueError() if image.dimension != self.dimension: raise ValueError( % (image.dimension, self.dimension)) self._metric.setFixedImage(image.pointer, False) self.fixed_image = image
Set Fixed ANTsImage for metric
1,455
def _translate_key(key): d = _get_deprecated_option(key) if d: return d.rkey or key else: return key
if key id deprecated and a replacement key defined, will return the replacement key, otherwise returns `key` as - is
1,456
def _generate_processed_key_name(process_to, upload_name): timestamp = datetime.now().strftime() name, extension = os.path.splitext(upload_name) digest = md5(.join([timestamp, upload_name])).hexdigest() return os.path.join(process_to, .format(digest, extension))
Returns a key name to use after processing based on timestamp and upload key name.
1,457
def get_relative_from_paths(self, filepath, paths): for systempath in paths_by_depth(paths): if filepath.startswith(systempath): return os.path.relpath(filepath, systempath) raise FinderException(" could not " "find filepath start from ".format(filepath))
Find the relative filepath from the most relevant multiple paths. This is somewhat like a ``os.path.relpath(path[, start])`` but where ``start`` is a list. The most relevant item from ``paths`` will be used to apply the relative transform. Args: filepath (str): Path to transform to relative. paths (list): List of absolute paths to use to find and remove the start path from ``filepath`` argument. If there is multiple path starting with the same directories, the biggest will match. Raises: boussole.exception.FinderException: If no ``filepath`` start could be finded. Returns: str: Relative filepath where the start coming from ``paths`` is removed.
1,458
def find_dangerous_changes( old_schema: GraphQLSchema, new_schema: GraphQLSchema ) -> List[DangerousChange]: return ( find_arg_changes(old_schema, new_schema).dangerous_changes + find_values_added_to_enums(old_schema, new_schema) + find_interfaces_added_to_object_types(old_schema, new_schema) + find_types_added_to_unions(old_schema, new_schema) + find_fields_that_changed_type_on_input_object_types( old_schema, new_schema ).dangerous_changes )
Find dangerous changes. Given two schemas, returns a list containing descriptions of all the types of potentially dangerous changes covered by the other functions down below.
1,459
def desc(self) -> str: kind, value = self.kind.value, self.value return f"{kind} {value!r}" if value else kind
A helper property to describe a token as a string for debugging
1,460
def _get_content_type(self, filename): mntype = mimetypes.guess_type(filename)[0] filename, fileExtension = os.path.splitext(filename) if mntype is None and\ fileExtension.lower() == ".csv": mntype = "text/csv" elif mntype is None and \ fileExtension.lower() == ".sd": mntype = "File/sd" elif mntype is None: mntype= "File/%s" % fileExtension.replace(, ) return mntype
gets the content type of a file
1,461
def set(self, project, date, data, data_ts): data[] = { : project, : date.strftime(), : time.time(), : VERSION, : data_ts } fpath = self._path_for_file(project, date) logger.debug(, project, date.strftime(), fpath) with open(fpath, ) as fh: fh.write(json.dumps(data))
Set the cache data for a specified project for the specified date. :param project: project name to set data for :type project: str :param date: date to set data for :type date: datetime.datetime :param data: data to cache :type data: dict :param data_ts: maximum timestamp in the BigQuery data table :type data_ts: int
1,462
def _button_plus_clicked(self, n): self._button_save.setEnabled(True) self.insert_colorpoint(self._colorpoint_list[n][0], self._colorpoint_list[n][1], self._colorpoint_list[n][2]) self._build_gui()
Create a new colorpoint.
1,463
def Unlock(fd, path): try: fcntl.flock(fd, fcntl.LOCK_UN | fcntl.LOCK_NB) except IOError as e: if e.errno == errno.EWOULDBLOCK: raise IOError( % path) else: raise IOError( % (path, str(e)))
Release the lock on the file. Args: fd: int, the file descriptor of the file to unlock. path: string, the name of the file to lock. Raises: IOError, raised from flock while attempting to release a file lock.
1,464
def _set_linkinfo_domain_reachable(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={: u, : [u]}), is_leaf=True, yang_name="linkinfo-domain-reachable", rest_name="linkinfo-domain-reachable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "brocade-fabric-service:linkinfo-domain-reachable-type", : , }) self.__linkinfo_domain_reachable = t if hasattr(self, ): self._set()
Setter method for linkinfo_domain_reachable, mapped from YANG variable /brocade_fabric_service_rpc/show_linkinfo/output/show_link_info/linkinfo_domain_reachable (linkinfo-domain-reachable-type) If this variable is read-only (config: false) in the source YANG file, then _set_linkinfo_domain_reachable is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_linkinfo_domain_reachable() directly. YANG Description: Indicates whether the RBridge is reachable or not. Yes - Indicates RBridge is reachable No - Indicates RBridge is not reachable.
1,465
def linear_exprs(A, x, b=None, rref=False, Matrix=None): if b is None: b = [0]*len(x) if rref: rA, rb = linear_rref(A, b, Matrix) if Matrix is None: from sympy import Matrix return [lhs - rhs for lhs, rhs in zip(rA * Matrix(len(x), 1, x), rb)] else: return [sum([x0*x1 for x0, x1 in zip(row, x)]) - v for row, v in zip(A, b)]
Returns Ax - b Parameters ---------- A : matrix_like of numbers Of shape (len(b), len(x)). x : iterable of symbols b : array_like of numbers (default: None) When ``None``, assume zeros of length ``len(x)``. Matrix : class When ``rref == True``: A matrix class which supports slicing, and methods ``__mul__`` and ``rref``. Defaults to ``sympy.Matrix``. rref : bool Calculate the reduced row echelon form of (A | -b). Returns ------- A list of the elements in the resulting column vector.
1,466
def decompressBuffer(buffer): "complements the compressBuffer function in CacheClient" zbuf = cStringIO.StringIO(buffer) zfile = gzip.GzipFile(fileobj=zbuf) deflated = zfile.read() zfile.close() return deflated
complements the compressBuffer function in CacheClient
1,467
def login(self, username, json_document): url = .format(self.url, username) make_request( url, method=, body=json_document, timeout=self.timeout)
Send user identity information to the identity manager. Raise a ServerError if an error occurs in the request process. @param username The logged in user. @param json_document The JSON payload for login.
1,468
def extract_largest(self, inplace=False): mesh = self.connectivity(largest=True) if inplace: self.overwrite(mesh) else: return mesh
Extract largest connected set in mesh. Can be used to reduce residues obtained when generating an isosurface. Works only if residues are not connected (share at least one point with) the main component of the image. Parameters ---------- inplace : bool, optional Updates mesh in-place while returning nothing. Returns ------- mesh : vtki.PolyData Largest connected set in mesh
1,469
def to_image_list(tensors, size_divisible=0): if isinstance(tensors, torch.Tensor) and size_divisible > 0: tensors = [tensors] if isinstance(tensors, ImageList): return tensors elif isinstance(tensors, torch.Tensor): assert tensors.dim() == 4 image_sizes = [tensor.shape[-2:] for tensor in tensors] return ImageList(tensors, image_sizes) elif isinstance(tensors, (tuple, list)): max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors])) if size_divisible > 0: import math stride = size_divisible max_size = list(max_size) max_size[1] = int(math.ceil(max_size[1] / stride) * stride) max_size[2] = int(math.ceil(max_size[2] / stride) * stride) max_size = tuple(max_size) batch_shape = (len(tensors),) + max_size batched_imgs = tensors[0].new(*batch_shape).zero_() for img, pad_img in zip(tensors, batched_imgs): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) image_sizes = [im.shape[-2:] for im in tensors] return ImageList(batched_imgs, image_sizes) else: raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
tensors can be an ImageList, a torch.Tensor or an iterable of Tensors. It can't be a numpy array. When tensors is an iterable of Tensors, it pads the Tensors with zeros so that they have the same shape
1,470
def getSolutionIter(self): domains, constraints, vconstraints = self._getArgs() if not domains: return iter(()) return self._solver.getSolutionIter(domains, constraints, vconstraints)
Return an iterator to the solutions of the problem Example: >>> problem = Problem() >>> list(problem.getSolutionIter()) == [] True >>> problem.addVariables(["a"], [42]) >>> iter = problem.getSolutionIter() >>> next(iter) {'a': 42} >>> next(iter) Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration
1,471
def find_multiplex_by_name(self, multiplex_name: str) -> Multiplex: for multiplex in self.multiplexes: if multiplex.name == multiplex_name: return multiplex raise AttributeError(f)
Find and return a multiplex in the influence graph with the given name. Raise an AttributeError if there is no multiplex in the graph with the given name.
1,472
def get_suppliers_per_page(self, per_page=1000, page=1, params=None): return self._get_resource_per_page(resource=SUPPLIERS, per_page=per_page, page=page, params=params)
Get suppliers per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list
1,473
def key_exists(self, section, key): LOGGER.debug("> Checking key existence in section.".format(key, section)) self.__settings.beginGroup(section) exists = self.__settings.contains(key) self.__settings.endGroup() return exists
Checks if given key exists. :param section: Current section to check key in. :type section: unicode :param key: Current key to check. :type key: unicode :return: Key existence. :rtype: bool
1,474
def _decode_embedded_dict(src): output = {} for key, val in six.iteritems(src): if isinstance(val, dict): val = _decode_embedded_dict(val) elif isinstance(val, list): val = _decode_embedded_list(val) elif isinstance(val, bytes): try: val = val.decode() except UnicodeError: pass if isinstance(key, bytes): try: key = key.decode() except UnicodeError: pass output[key] = val return output
Convert enbedded bytes to strings if possible. Dict helper.
1,475
def _filter_headers(self): headers = {} for user in self.usernames: headers["fedora_messaging_user_{}".format(user)] = True for package in self.packages: headers["fedora_messaging_rpm_{}".format(package)] = True for container in self.containers: headers["fedora_messaging_container_{}".format(container)] = True for module in self.modules: headers["fedora_messaging_module_{}".format(module)] = True for flatpak in self.flatpaks: headers["fedora_messaging_flatpak_{}".format(flatpak)] = True return headers
Add headers designed for filtering messages based on objects. Returns: dict: Filter-related headers to be combined with the existing headers
1,476
def import_app_module(app_name, module_name): name_split = app_name.split() if name_split[-1][0].isupper(): app_name = .join(name_split[:-2]) module = import_module(app_name) try: sub_module = import_module( % (app_name, module_name)) return sub_module except: if module_has_submodule(module, module_name): raise return None
Returns a module from a given app by its name. :param str app_name: :param str module_name: :rtype: module or None
1,477
def dig(host): * cmd = .format(salt.utils.network.sanitize_host(host)) return __salt__[](cmd)
Performs a DNS lookup with dig CLI Example: .. code-block:: bash salt '*' network.dig archlinux.org
1,478
def create_filter(self): return Filter( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of filter services facade.
1,479
def _check_connections(self): for server in self._servers: if self._is_reachable(server): server[] = 0 else: server[] = time.time() + 5
Checks if all configured redis servers are reachable
1,480
def clean(self): self._table.clear() for item in self._usage_recency: self._usage_recency.remove(item)
Empties the cache
1,481
def fit(self, X, y, **fit_params): return default_client().sync(self._fit, X, y, **fit_params)
Find the best parameters for a particular model. Parameters ---------- X, y : array-like **fit_params Additional partial fit keyword arguments for the estimator.
1,482
def json_data(self): def stringify_keys(d): if not isinstance(d, dict): return d return dict((str(k), stringify_keys(v)) for k, v in d.items()) data = self.make_data() json_data = json.dumps(stringify_keys(data)) return json_data
Returns data as JSON Returns: json_data (str): JSON representation of data, as created in make_data
1,483
def fetch_assets(self): packages = set( env.instance.config.get(, ).split()) packages.update([]) cmd = env.instance.config.get(, ) items = sorted(self.bootstrap_files.items()) for filename, asset in items: if asset.url: if not exists(dirname(asset.local)): os.makedirs(dirname(asset.local)) local(cmd.format(asset)) if filename == : items.extend(self._fetch_packages(asset.local, packages))
download bootstrap assets to control host. If present on the control host they will be uploaded to the target host during bootstrapping.
1,484
def _serialize_value_for_xml(self, value): if value is not None: value_serialized = self.serializer.serialize(value) else: value_serialized = return value_serialized
See base class.
1,485
def applyIndex(self, lst, right): if len(right) != 1: raise exceptions.EvaluationError( % (self.left, self.right)) right = right[0] if isinstance(right, int): return lst[right] raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right))
Apply a list to something else.
1,486
def indication(self, pdu): if _debug: TCPServer._debug("indication %r", pdu) self.request += pdu.pduData
Requests are queued for delivery.
1,487
def get_partition_dciId(self, org_name, part_name, part_info=None): if part_info is None: part_info = self._get_partition(org_name, part_name) LOG.info("query result from dcnm for partition info is %s", part_info) if part_info is not None and "dciId" in part_info: return part_info.get("dciId")
get DCI ID for the partition. :param org_name: name of organization :param part_name: name of partition
1,488
def load_vectors(self, vectors, **kwargs): if not isinstance(vectors, list): vectors = [vectors] for idx, vector in enumerate(vectors): if six.PY2 and isinstance(vector, str): vector = six.text_type(vector) if isinstance(vector, six.string_types): if vector not in pretrained_aliases: raise ValueError( "Got string input vector {}, but allowed pretrained " "vectors are {}".format( vector, list(pretrained_aliases.keys()))) vectors[idx] = pretrained_aliases[vector](**kwargs) elif not isinstance(vector, Vectors): raise ValueError( "Got input vectors of type {}, expected str or " "Vectors object".format(type(vector))) tot_dim = sum(v.dim for v in vectors) self.vectors = torch.Tensor(len(self), tot_dim) for i, token in enumerate(self.itos): start_dim = 0 for v in vectors: end_dim = start_dim + v.dim self.vectors[i][start_dim:end_dim] = v[token.strip()] start_dim = end_dim assert(start_dim == tot_dim)
Arguments: vectors: one of or a list containing instantiations of the GloVe, CharNGram, or Vectors classes. Alternatively, one of or a list of available pretrained vectors: charngram.100d fasttext.en.300d fasttext.simple.300d glove.42B.300d glove.840B.300d glove.twitter.27B.25d glove.twitter.27B.50d glove.twitter.27B.100d glove.twitter.27B.200d glove.6B.50d glove.6B.100d glove.6B.200d glove.6B.300d Remaining keyword arguments: Passed to the constructor of Vectors classes.
1,489
def detach(self, ids=None, touch=True): if isinstance(ids, orator.orm.model.Model): ids = ids.get_key() if ids is None: ids = [] query = self._new_pivot_query() if not isinstance(ids, list): ids = [ids] if len(ids) > 0: query.where_in(self._other_key, ids) if touch: self.touch_if_touching() results = query.delete() return results
Detach models from the relationship.
1,490
def tValueForPoint(self, point): if self.segmentType == "curve": on1 = self.previousOnCurve off1 = self.points[0].coordinates off2 = self.points[1].coordinates on2 = self.points[2].coordinates return _tValueForPointOnCubicCurve(point, (on1, off1, off2, on2)) elif self.segmentType == "line": return _tValueForPointOnLine(point, (self.previousOnCurve, self.points[0].coordinates)) elif self.segmentType == "qcurve": raise NotImplementedError else: raise NotImplementedError
get a t values for a given point required: the point must be a point on the curve. in an overlap cause the point will be an intersection points wich is alwasy a point on the curve
1,491
def delete_messages(self, messages): url = "/2/messages/?%s" % urlencode([(, ",".join(messages))]) data = self._delete_resource(url) return data
Delete existing messages. http://dev.wheniwork.com/#delete-existing-message
1,492
def getTypeName(data_type_oid, type_modifier): if data_type_oid == VerticaType.BOOL: return "Boolean" elif data_type_oid == VerticaType.INT8: return "Integer" elif data_type_oid == VerticaType.FLOAT8: return "Float" elif data_type_oid == VerticaType.CHAR: return "Char" elif data_type_oid in (VerticaType.VARCHAR, VerticaType.UNKNOWN): return "Varchar" elif data_type_oid == VerticaType.LONGVARCHAR: return "Long Varchar" elif data_type_oid == VerticaType.DATE: return "Date" elif data_type_oid == VerticaType.TIME: return "Time" elif data_type_oid == VerticaType.TIMETZ: return "TimeTz" elif data_type_oid == VerticaType.TIMESTAMP: return "Timestamp" elif data_type_oid == VerticaType.TIMESTAMPTZ: return "TimestampTz" elif data_type_oid in (VerticaType.INTERVAL, VerticaType.INTERVALYM): return "Interval " + getIntervalRange(data_type_oid, type_modifier) elif data_type_oid == VerticaType.BINARY: return "Binary" elif data_type_oid == VerticaType.VARBINARY: return "Varbinary" elif data_type_oid == VerticaType.LONGVARBINARY: return "Long Varbinary" elif data_type_oid == VerticaType.NUMERIC: return "Numeric" elif data_type_oid == VerticaType.UUID: return "Uuid" else: return "Unknown"
Returns the base type name according to data_type_oid and type_modifier
1,493
def parse(self, args=None): s `[]` field. s in the world can_', []).append(name) return opts
Parse a list of arguments, returning a dict. Flags are only boolean if they are not followed by a non-flag argument. All positional arguments not associable with a flag will be added to the return dictionary's `['_']` field.
1,494
def readline(self): self.lineno += 1 if self._buffer: return self._buffer.pop() else: return self.input.readline()
Get the next line including the newline or '' on EOF.
1,495
def max_intensity(item_a, time_a, item_b, time_b, max_value): intensity_a = item_a.max_intensity(time_a) intensity_b = item_b.max_intensity(time_b) diff = np.sqrt((intensity_a - intensity_b) ** 2) return np.minimum(diff, max_value) / float(max_value)
RMS difference in maximum intensity Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
1,496
def parser(self): if self._command_parser is None: parents = [] if self.need_verbose: parents.append(_verbose_parser) if self.need_settings: parents.append(_settings_parser) self._command_parser = self._main_parser.add_parser(self.name, help=self.help, parents=parents, formatter_class=argparse.RawDescriptionHelpFormatter) return self._command_parser
Returns the appropriate parser to use for adding arguments to your command.
1,497
def read(self, filename): with tarfile.open(filename, "r:*") as arc: temp_dir = tempfile.mkdtemp() arc.extractall(path=temp_dir, members=_safemembers(arc)) tribe_dir = glob.glob(temp_dir + os.sep + )[0] self._read_from_folder(dirname=tribe_dir) shutil.rmtree(temp_dir) return self
Read a tribe of templates from a tar formatted file. :type filename: str :param filename: File to read templates from. .. rubric:: Example >>> tribe = Tribe(templates=[Template(name='c', st=read())]) >>> tribe.write('test_tribe') Tribe of 1 templates >>> tribe_back = Tribe().read('test_tribe.tgz') >>> tribe_back == tribe True
1,498
def build(path, query=None, fragment=): url = nstr(path) keys = projex.text.findkeys(path) if keys: if query is None: query = {} opts = {} for key in keys: opts[key] = query.pop(key, .format(key)) url %= opts if query: if type(query) is dict: mapped_query = {} for key, value in query.items(): mapped_query[nstr(key)] = nstr(value) query_str = urllib.urlencode(mapped_query) else: query_str = nstr(query) url += + query_str if fragment: url += + fragment return url
Generates a URL based on the inputted path and given query options and fragment. The query should be a dictionary of terms that will be generated into the URL, while the fragment is the anchor point within the target path that will be navigated to. If there are any wildcards within the path that are found within the query, they will be inserted into the path itself and removed from the query string. :example |>>> import skyline.gui |>>> skyline.gui.build_url('sky://projects/%(project)s', | {'project': 'Test', 'asset': 'Bob'}) |'sky://projects/Test/?asset=Bob' :param path | <str> query | <dict> || None fragment | <str> || None :return <str> | url
1,499
def unpack(self, buff, offset=0): super().unpack(buff, offset) if self.tpid.value: self._validate() self.tpid = self.tpid.value self.pcp = self._tci.value >> 13 self.cfi = (self._tci.value >> 12) & 1 self.vid = self._tci.value & 4095 else: self.tpid = EtherType.VLAN self.pcp = None self.cfi = None self.vid = None
Unpack a binary struct into this object's attributes. Return the values instead of the lib's basic types. After unpacking, the abscence of a `tpid` value causes the assignment of None to the field values to indicate that there is no VLAN information. Args: buff (bytes): Binary buffer. offset (int): Where to begin unpacking. Raises: :exc:`~.exceptions.UnpackException`: If unpack fails.