Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
27,800
def build_knowledge_graph(self, json_ontology: dict) -> List: nested_docs = list() if json_ontology: for key in list(json_ontology): j_values = json_ontology[key] if not isinstance(j_values, list): j_values = [j_values] for j_value in j_values: if not isinstance(j_value, dict): if self.kg: if key not in [, ]: self.kg.add_value(key, value=j_value) else: child_doc_id = None if in j_value: child_doc_id = j_value[] elif in j_value: child_doc_id = j_value[] child_doc = Document(self.etk, cdr_document=dict(), mime_type=, url=) nested_docs.extend(child_doc.build_knowledge_graph(j_value)) if not child_doc_id: child_doc_id = Utility.create_doc_id_from_json(child_doc.kg._kg) if self.kg: self.kg.add_value(key, value=child_doc_id) child_doc.cdr_document["doc_id"] = child_doc_id nested_docs.append(child_doc) return nested_docs
The idea if to be able to build a json knowledge graph from a json like ontology representation, eg: kg_object_ontology = { "uri": doc.doc_id, "country": doc.select_segments("$.Location"), "type": [ "Event", doc.extract(self.incomp_decoder, doc.select_segments("$.Incomp")[0]), doc.extract(self.int_decoder, doc.select_segments("$.Int")[0]) ] } Currently json ontology representation is supported, might add new ways later Args: json_ontology: a json ontology representation of a knowledge graph Returns: returns a list of nested documents created
27,801
def lessgreater(x, y): x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_lessgreater_p(x, y)
Return True if x < y or x > y and False otherwise. This function returns False whenever x and/or y is a NaN.
27,802
def cudnnSetTensor(handle, srcDesc, srcData, value): dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(srcDesc) if dataType == cudnnDataType[]: alphaRef = ctypes.byref(ctypes.c_double(alpha)) else: alphaRef = ctypes.byref(ctypes.c_float(alpha)) status = _libcudnn.cudnnSetTensor(handle, srcDesc, srcData, alphaRef) cudnnCheckStatus(status)
Set all data points of a tensor to a given value : srcDest = alpha. Parameters ---------- handle : cudnnHandle Handle to a previously created cuDNN context. srcDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. srcData : void_p Pointer to data of the tensor described by srcDesc descriptor. value : float Value that all elements of the tensor will be set to.
27,803
def _index_to_ansi_values(self, index): if self.__class__.__name__[0] == : if index < 8: index += ANSI_FG_LO_BASE else: index += (ANSI_FG_HI_BASE - 8) else: if index < 8: index += ANSI_BG_LO_BASE else: index += (ANSI_BG_HI_BASE - 8) return [str(index)]
Converts an palette index to the corresponding ANSI color. Arguments: index - an int (from 0-15) Returns: index as str in a list for compatibility with values.
27,804
def set_max_waypoint_items(self, max_waypoint_items): if self.get_max_waypoint_items_metadata().is_read_only(): raise NoAccess() if not self.my_osid_object_form._is_valid_cardinal(max_waypoint_items, self.get_max_waypoint_items_metadata()): raise InvalidArgument() self.my_osid_object_form._my_map[] = max_waypoint_items
This determines how many waypoint items will be seen for a scaffolded wrong answer
27,805
def _run_qmc(self, boot): self._tmp = os.path.join(self.dirs, ".tmptre") cmd = [ip.bins.qmc, "qrtt="+self.files.qdump, "otre="+self._tmp] proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) res = proc.communicate() if proc.returncode: raise IPyradWarningExit(res[1]) with open(self._tmp, ) as intree: tre = ete3.Tree(intree.read().strip()) names = tre.get_leaves() for name in names: name.name = self.samples[int(name.name)] tmptre = tre.write(format=9) if boot: self.trees.boots = os.path.join(self.dirs, self.name+".boots") with open(self.trees.boots, ) as outboot: outboot.write(tmptre+"\n") else: self.trees.tree = os.path.join(self.dirs, self.name+".tree") with open(self.trees.tree, ) as outtree: outtree.write(tmptre) self._save()
Runs quartet max-cut QMC on the quartets qdump file.
27,806
def _crop(self, bounds, xsize=None, ysize=None, resampling=Resampling.cubic): out_raster = self[ int(bounds[0]): int(bounds[2]), int(bounds[1]): int(bounds[3]) ] if xsize and ysize: if not (xsize == out_raster.width and ysize == out_raster.height): out_raster = out_raster.resize(dest_width=xsize, dest_height=ysize, resampling=resampling) return out_raster
Crop raster outside vector (convex hull). :param bounds: bounds on image :param xsize: output raster width, None for full resolution :param ysize: output raster height, None for full resolution :param resampling: reprojection resampling method, default `cubic` :return: GeoRaster2
27,807
def iselect(self, tag, limit=0): for el in CSSMatch(self.selectors, tag, self.namespaces, self.flags).select(limit): yield el
Iterate the specified tags.
27,808
def set_from_json(self, obj, json, models=None, setter=None): self._internal_set(obj, json, setter=setter)
Sets the value of this property from a JSON value. Args: obj: (HasProps) : instance to set the property value on json: (JSON-value) : value to set to the attribute to models (dict or None, optional) : Mapping of model ids to models (default: None) This is needed in cases where the attributes to update also have values that have references. setter (ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. (default: None) In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself. Returns: None
27,809
def check_state(self, state): if state.addr == self.function.addr: arch = state.arch if self._check_arguments(arch, state): return True return False
Check if the specific function is reached with certain arguments :param angr.SimState state: The state to check :return: True if the function is reached with certain arguments, False otherwise. :rtype: bool
27,810
def unary_operator(op): valid_ops = {} if op not in valid_ops: raise ValueError("Invalid unary operator %s." % op) def unary_operator(self): if isinstance(self, NumericalExpression): return NumExprFilter.create( "{op}({expr})".format(op=op, expr=self._expr), self.inputs, ) else: return NumExprFilter.create("{op}x_0".format(op=op), (self,)) unary_operator.__doc__ = "Unary Operator: " % op return unary_operator
Factory function for making unary operator methods for Filters.
27,811
def get_item(self, path): try: f = tempfile.NamedTemporaryFile() self.blob_service.get_blob_to_path(self.container_name, path, f.name) f.seek(0) image = Image.open(f.name) f.close() return image except AzureMissingResourceHttpError: return False
Get resource item :param path: string :return: Image
27,812
def save_to_file(destination_filename, append=False): def decorator_fn(f): @wraps(f) def wrapper_fn(*args, **kwargs): res = f(*args, **kwargs) makedirs(os.path.dirname(destination_filename)) mode = "a" if append else "w" with open(destination_filename, mode) as text_file: text_file.write(res) return res return wrapper_fn return decorator_fn
Save the output value to file.
27,813
def _update_docs(self, file_to_update): if self.is_dev_version(): regexes = { "/%s/" % "dev": r"\/%s\/" % "master", "=%s" % "dev": "=%s" % "master", } elif self.is_master_version(): regexes = { "/%s/" % "master": r"\/%s\/" % "dev", "=%s" % "master": "=%s" % "dev", } else: raise Exception("Please switch to `dev` or `master` branch.") to_update = File(file_to_update).read() for replacement, regex in regexes.items(): to_update = Regex(to_update, regex, replace_with=replacement).replace() File(file_to_update).write(to_update, overwrite=True)
Update the given documentation file or :code:`README.rst` so that it always gives branch related URL and informations. .. note:: This only apply to :code:`dev` and :code:`master` branch. :param file_to_update: The file to update. :type file_to_update: str
27,814
def groups_invite(self, room_id, user_id, **kwargs): return self.__call_api_post(, roomId=room_id, userId=user_id, kwargs=kwargs)
Adds a user to the private group.
27,815
def withdraw(self, account_id, **params): for required in [, , ]: if required not in params: raise ValueError("Missing required parameter: %s" % required) response = self._post(, , account_id, , data=params) return self._make_api_object(response, Withdrawal)
https://developers.coinbase.com/api/v2#withdraw-funds
27,816
def validate_config(self, values, argv=None, strict=False): options = [] for option in self._options: kwargs = option.kwargs.copy() if option.name in values: if in kwargs: raise_for_group.pop(option._mutexgroup, None) if raise_for_group: optstrings = [str(k.option_strings) for k in raise_for_group.values()[0]] msg = "One of %s required. " % " ,".join(optstrings) raise SystemExit(msg + "See --help for more info.") return results
Validate all config values through the command-line parser. This takes all supplied options (which could have been retrieved from a number of sources (such as CLI, env vars, etc...) and then validates them by running them through argparser (and raises SystemExit on failure). :returns dict: key/values for all config values (from all sources) :raises: SystemExit
27,817
def from_dict(cls, eas_from_nios): if not eas_from_nios: return return cls({name: cls._process_value(ib_utils.try_value_to_bool, eas_from_nios[name][]) for name in eas_from_nios})
Converts extensible attributes from the NIOS reply.
27,818
def add_service(self, service_name): if service_name not in self.manifest[]: self.manifest[].append(service_name)
Add the given service to the manifest.
27,819
def get_pv_args(name, session=None, call=None): if call == : raise SaltCloudException( ) if session is None: log.debug() session = _get_session() vm = _get_vm(name, session=session) pv_args = session.xenapi.VM.get_PV_args(vm) if pv_args: return pv_args return None
Get PV arguments for a VM .. code-block:: bash salt-cloud -a get_pv_args xenvm01
27,820
def _compile(self, dirpath, makename, compiler, debug, profile): from os import path options = "" if debug: options += " DEBUG=true" if profile: options += " GPROF=true" from os import system codestr = "cd {}; make -f F90={} FAM={}" + options code = system(codestr.format(dirpath, makename, compiler, compiler[0])) return code
Compiles the makefile at the specified location with 'compiler'. :arg dirpath: the full path to the directory where the makefile lives. :arg compiler: one of ['ifort', 'gfortran']. :arg makename: the name of the make file to compile.
27,821
def removeComments( self, comment = None ): if ( not comment ): lang = self.language() if ( lang ): comment = lang.lineComment() if ( not comment ): return False startline, startcol, endline, endcol = self.getSelection() len_comment = len(comment) line, col = self.getCursorPosition() for lineno in range(startline, endline+1 ): self.setSelection(lineno, 0, lineno, len_comment) if ( self.selectedText() == comment ): self.removeSelectedText() self.setSelection(startline, startcol, endline, endcol) self.setCursorPosition(line, col) return True
Inserts comments into the editor based on the current selection.\ If no comment string is supplied, then the comment from the language \ will be used. :param comment | <str> || None :return <bool> | success
27,822
def expect_lit(char, buf, pos): if pos >= len(buf) or buf[pos] != char: return None, len(buf) return char, pos+1
Expect a literal character at the current buffer position.
27,823
def deleteAll(self): for core in self.endpoints: self._send_solr_command(self.endpoints[core], "{\"delete\": { \"query\" : \"*:*\"}}")
Deletes whole Solr index. Use with care.
27,824
def results_path(self) -> str: def possible_paths(): yield self._results_path yield self.settings.fetch() yield environ.configs.fetch() yield environ.paths.results(self.uuid) return next(p for p in possible_paths() if p is not None)
The path where the project results will be written
27,825
def extract_variables(href): patterns = [re.sub(r, , pattern) for pattern in re.findall(r, href)] variables = [] for pattern in patterns: for part in pattern.split(","): if not part in variables: variables.append(part) return variables
Return a list of variable names used in a URI template.
27,826
def writelines(self, lines): self.make_dir() with open(self.path, "w") as f: return f.writelines(lines)
Write a list of strings to file.
27,827
def add_user_to_group(iam_client, user, group, quiet = False): if not quiet: printInfo( % group) iam_client.add_user_to_group(GroupName = group, UserName = user)
Add an IAM user to an IAM group :param iam_client: :param group: :param user: :param user_info: :param dry_run: :return:
27,828
def attribute(self): refs = re.findall( "\@([a-zA-Z:]+)=\\\?[\\"]", self.refsDecl ) return refs[-1]
Attribute that serves as a reference getter
27,829
def find_nested_meta_first_bf(d, prop_name): m_list = d.get() if not m_list: return None if not isinstance(m_list, list): m_list = [m_list] for m_el in m_list: if m_el.get() == prop_name or m_el.get() == prop_name: return m_el return None
Returns the $ value of the first meta element with the @property that matches @prop_name (or None).
27,830
def backup(): args = parser.parse_args() s3_backup_dir( args.datadir, args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name, args.zip_backups_dir, args.backup_aging_time, args.s3_folder, args.project)
zips into db_backups_dir and uploads to bucket_name/s3_folder fab -f ./fabfile.py backup_dbs
27,831
def add_user_to_group(self, user_name, group_name): res = self._make_ocs_request( , self.OCS_SERVICE_CLOUD, + user_name + , data={: group_name} ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return True raise HTTPResponseError(res)
Adds a user to a group. :param user_name: name of user to be added :param group_name: name of group user is to be added to :returns: True if user added :raises: HTTPResponseError in case an HTTP error status was returned
27,832
def translate(offset, dtype=None): assert len(offset) == 3 x, y, z = offset M = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [x, y, z, 1.0]], dtype) return M
Translate by an offset (x, y, z) . Parameters ---------- offset : array-like, shape (3,) Translation in x, y, z. dtype : dtype | None Output type (if None, don't cast). Returns ------- M : ndarray Transformation matrix describing the translation.
27,833
def _set_renamed_columns(self, table_diff, command, column): new_column = Column(command.to, column.get_type(), column.to_dict()) table_diff.renamed_columns = {command.from_: new_column} return table_diff
Set the renamed columns on the table diff. :rtype: orator.dbal.TableDiff
27,834
def encrypt(self, pkt, seq_num=None, iv=None): if not isinstance(pkt, self.SUPPORTED_PROTOS): raise TypeError( % (pkt.__class__, self.SUPPORTED_PROTOS)) if self.proto is ESP: return self._encrypt_esp(pkt, seq_num=seq_num, iv=iv) else: return self._encrypt_ah(pkt, seq_num=seq_num)
Encrypt (and encapsulate) an IP(v6) packet with ESP or AH according to this SecurityAssociation. @param pkt: the packet to encrypt @param seq_num: if specified, use this sequence number instead of the generated one @param iv: if specified, use this initialization vector for encryption instead of a random one. @return: the encrypted/encapsulated packet
27,835
def migrate1(): "Migrate from version 0 to 1" initial = [ "create table Chill (version integer);", "insert into Chill (version) values (1);", "alter table SelectSQL rename to Query;", "alter table Node add column template integer references Template (id) on delete set null;", "alter table Node add column query integer references Query (id) on delete set null;" ] cleanup = [ "drop table SelectSQL_Node;", "drop table Template_Node;" ] c = db.cursor() try: c.execute("select version from Chill limit 1;") except sqlite3.DatabaseError as err: pass result = c.fetchone() if result: version = result[0] if version == 1: current_app.logger.warn("Migration from version 0 to 1 is not needed.") else: current_app.logger.warn("Migration from version 0 to {0} is not supported.".format(version)) return try: for query in initial: c.execute(query) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) try: c.execute(fetch_query_string()) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) result = c.fetchall() if result: (result, col_names) = rowify(result, c.description) for kw in result: try: c.execute(, {:kw[]}) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) try: c.execute(, {:kw[]}) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) try: for query in cleanup: c.execute(query) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) db.commit()
Migrate from version 0 to 1
27,836
def show(*args, **kwargs): r _, plt, _ = _import_plt() plt.show(*args, **kwargs)
r"""Show created figures, alias to ``plt.show()``. By default, showing plots does not block the prompt. Calling this function will block execution.
27,837
def changed_lines(self): lines = [] file_name = line_number = 0 patch_position = -1 found_first_information_line = False for i, content in enumerate(self.body.splitlines()): range_information_match = RANGE_INFORMATION_LINE.search(content) file_name_line_match = FILE_NAME_LINE.search(content) if file_name_line_match: file_name = file_name_line_match.group() found_first_information_line = False elif range_information_match: line_number = int(range_information_match.group()) if not found_first_information_line: patch_position = 0 found_first_information_line = True elif MODIFIED_LINE.search(content): line = { : file_name, : content, : line_number, : patch_position } lines.append(line) line_number += 1 elif NOT_REMOVED_OR_NEWLINE_WARNING.search(content) or content == : line_number += 1 patch_position += 1 return lines
A list of dicts in the format: { 'file_name': str, 'content': str, 'line_number': int, 'position': int }
27,838
def plot_gaussian_cdf(mean=0., variance=1., ax=None, xlim=None, ylim=(0., 1.), xlabel=None, ylabel=None, label=None): import matplotlib.pyplot as plt if ax is None: ax = plt.gca() sigma = math.sqrt(variance) n = norm(mean, sigma) if xlim is None: xlim = [n.ppf(0.001), n.ppf(0.999)] xs = np.arange(xlim[0], xlim[1], (xlim[1] - xlim[0]) / 1000.) cdf = n.cdf(xs) ax.plot(xs, cdf, label=label) ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) return ax
Plots a normal distribution CDF with the given mean and variance. x-axis contains the mean, the y-axis shows the cumulative probability. Parameters ---------- mean : scalar, default 0. mean for the normal distribution. variance : scalar, default 0. variance for the normal distribution. ax : matplotlib axes object, optional If provided, the axes to draw on, otherwise plt.gca() is used. xlim, ylim: (float,float), optional specify the limits for the x or y axis as tuple (low,high). If not specified, limits will be automatically chosen to be 'nice' xlabel : str,optional label for the x-axis ylabel : str, optional label for the y-axis label : str, optional label for the legend Returns ------- axis of plot
27,839
def columnCount(self, parent): if parent.isValid(): return parent.internalPointer().column_count() else: return self._root.column_count()
Return the number of columns for the children of the given parent. :param parent: the parent index :type parent: :class:`QtCore.QModelIndex`: :returns: the column count :rtype: int :raises: None
27,840
def reference(self, install, upgrade): self.template(78) print("| Total {0} {1} installed and {2} {3} upgraded".format( len(install), self.pkg(len(install)), len(upgrade), self.pkg(len(upgrade)))) self.template(78) for installed, upgraded in itertools.izip_longest(install, upgrade): if upgraded: print("| Package {0} upgraded successfully".format(upgraded)) if installed: print("| Package {0} installed successfully".format(installed)) self.template(78) print("")
Reference list with packages installed and upgraded
27,841
def getcomponents(self, product, force_refresh=False): proddict = self._lookup_product_in_cache(product) product_id = proddict.get("id", None) if (force_refresh or product_id is None or product_id not in self._cache.component_names): self.refresh_products(names=[product], include_fields=["name", "id"]) proddict = self._lookup_product_in_cache(product) if "id" not in proddict: raise BugzillaError("Product not found" % product) product_id = proddict["id"] opts = {: product_id, : } names = self._proxy.Bug.legal_values(opts)["values"] self._cache.component_names[product_id] = names return self._cache.component_names[product_id]
Return a list of component names for the passed product. This can be implemented with Product.get, but behind the scenes it uses Bug.legal_values. Reason being that on bugzilla instances with tons of components, like bugzilla.redhat.com Product=Fedora for example, there's a 10x speed difference even with properly limited Product.get calls. On first invocation the value is cached, and subsequent calls will return the cached data. :param force_refresh: Force refreshing the cache, and return the new data
27,842
def winning_abbr(self): if self.winner == HOME: return utils._parse_abbreviation(self._home_name) return utils._parse_abbreviation(self._away_name)
Returns a ``string`` of the winning team's abbreviation, such as 'HOU' for the Houston Astros.
27,843
def delete(ids, yes): failures = False for id in ids: data_source = get_data_object(id, use_data_config=True) if not data_source: failures = True continue data_name = normalize_data_name(data_source.name) suffix = data_name.split()[-1] if not suffix.isdigit(): failures = True floyd_logger.error(, id) if suffix == : floyd_logger.error() continue if not yes and not click.confirm("Delete Data: {}?".format(data_name), abort=False, default=False): floyd_logger.info("Data %s: Skipped", data_name) continue if not DataClient().delete(data_source.id): failures = True else: floyd_logger.info("Data %s: Deleted", data_name) if failures: sys.exit(1)
Delete datasets.
27,844
async def add_items(self, *items): items = [item.id for item in await self.process(items)] if not items: return await self.connector.post(.format(Id=self.id), data={: .join(items)}, remote=False )
append items to the playlist |coro| Parameters ---------- items : array_like list of items to add(or their ids) See Also -------- remove_items :
27,845
def get_custom_view_details(name, auth, url): view_id = get_custom_views(auth, url, name=name) if view_id is None: return view_id view_id = get_custom_views(auth, url, name=name)[0][] get_custom_view_details_url = + str(view_id) f_url = url + get_custom_view_details_url response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: current_devices = (json.loads(response.text)) if in current_devices: if isinstance(current_devices[], dict): return [current_devices[]] else: return current_devices[] else: return [] except requests.exceptions.RequestException as error: return "Error:\n" + str(error) +
function requires no input and returns a list of dictionaries of custom views from an HPE IMC. Optional name argument will return only the specified view. :param name: str containing the name of the desired custom view :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param name: (optional) str of name of specific custom view :return: list of dictionaties containing attributes of the custom views :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.groups import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> view_details = get_custom_view_details('My Network View', auth.creds, auth.url) >>> assert type(view_details) is list >>> assert 'label' in view_details[0]
27,846
def dumps(obj, imports=None, binary=True, sequence_as_stream=False, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding=, default=None, use_decimal=True, namedtuple_as_object=True, tuple_as_array=True, bigint_as_string=False, sort_keys=False, item_sort_key=None, for_json=None, ignore_nan=False, int_as_string_bitcount=None, iterable_as_array=False, **kw): ion_buffer = six.BytesIO() dump(obj, ion_buffer, sequence_as_stream=sequence_as_stream, binary=binary, skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, cls=cls, indent=indent, separators=separators, encoding=encoding, default=default, use_decimal=use_decimal, namedtuple_as_object=namedtuple_as_object, tuple_as_array=tuple_as_array, bigint_as_string=bigint_as_string, sort_keys=sort_keys, item_sort_key=item_sort_key, for_json=for_json, ignore_nan=ignore_nan, int_as_string_bitcount=int_as_string_bitcount, iterable_as_array=iterable_as_array) ret_val = ion_buffer.getvalue() ion_buffer.close() if not binary: ret_val = ret_val.decode() return ret_val
Serialize ``obj`` as Python ``string`` or ``bytes`` object, using the conversion table used by ``dump`` (above). Args: obj (Any): A python object to serialize according to the above table. Any Python object which is neither an instance of nor inherits from one of the types in the above table will raise TypeError. imports (Optional[Sequence[SymbolTable]]): A sequence of shared symbol tables to be used by by the writer. binary (Optional[True|False]): When True, outputs binary Ion. When false, outputs text Ion. sequence_as_stream (Optional[True|False]): When True, if ``obj`` is a sequence, it will be treated as a stream of top-level Ion values (i.e. the resulting Ion data will begin with ``obj``'s first element). Default: False. skipkeys: NOT IMPLEMENTED ensure_ascii: NOT IMPLEMENTED check_circular: NOT IMPLEMENTED allow_nan: NOT IMPLEMENTED cls: NOT IMPLEMENTED indent (Str): If binary is False and indent is a string, then members of containers will be pretty-printed with a newline followed by that string repeated for each level of nesting. None (the default) selects the most compact representation without any newlines. Example: to indent with four spaces per level of nesting, use ``' '``. separators: NOT IMPLEMENTED encoding: NOT IMPLEMENTED default: NOT IMPLEMENTED use_decimal: NOT IMPLEMENTED namedtuple_as_object: NOT IMPLEMENTED tuple_as_array: NOT IMPLEMENTED bigint_as_string: NOT IMPLEMENTED sort_keys: NOT IMPLEMENTED item_sort_key: NOT IMPLEMENTED for_json: NOT IMPLEMENTED ignore_nan: NOT IMPLEMENTED int_as_string_bitcount: NOT IMPLEMENTED iterable_as_array: NOT IMPLEMENTED **kw: NOT IMPLEMENTED Returns: Union[str|bytes]: The string or binary representation of the data. if ``binary=True``, this will be a ``bytes`` object, otherwise this will be a ``str`` object (or ``unicode`` in the case of Python 2.x)
27,847
def json_obj_to_cursor(self, json): cursor = json_util.loads(json) if "id" in json: cursor["_id"] = ObjectId(cursor["id"]) del cursor["id"] return cursor
(Deprecated) Converts a JSON object to a mongo db cursor :param str json: A json string :returns: dictionary with ObjectId type :rtype: dict
27,848
def OnPaneClose(self, event): toggle_label = event.GetPane().caption menubar = self.main_window.menubar toggle_id = menubar.FindMenuItem(_("View"), toggle_label) toggle_item = menubar.FindItemById(toggle_id) toggle_item.Check(False) menubar.UpdateMenus() self.main_window._mgr.Update() event.Skip()
Pane close toggle event handler (via close button)
27,849
def sort(self, **kwargs): query = {} for field in kwargs: query[field] = kwargs[field] self._q.append({: query}) return self
Adds a sort stage to aggregation query :param kwargs: Specifies the field(s) to sort by and the respective sort order. :return: The current object
27,850
def decode_format_timestamp(timestamp): dt = maya.MayaDT(timestamp / 1000).datetime(naive=True) return dt.strftime(), dt.strftime()
Convert unix timestamp (millis) into date & time we use in logs output. :param timestamp: unix timestamp in millis :return: date, time in UTC
27,851
def _jobs_to_do(self, restrictions): if self.restriction: raise DataJointError( ) todo = self.key_source if not isinstance(todo, QueryExpression): raise DataJointError() try: raise DataJointError( % next( name for name in todo.heading.primary_key if name not in self.target.heading)) except StopIteration: pass return (todo & AndList(restrictions)).proj()
:return: the relation containing the keys to be computed (derived from self.key_source)
27,852
def is_artifact_optional(chain, task_id, path): upstream_artifacts = chain.task[].get(, []) optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts) return path in optional_artifacts_per_task_id.get(task_id, [])
Tells whether an artifact is flagged as optional or not. Args: chain (ChainOfTrust): the chain of trust object task_id (str): the id of the aforementioned task Returns: bool: True if artifact is optional
27,853
def detect_iter(self, det_iter, show_timer=False): num_images = det_iter._size if not isinstance(det_iter, mx.io.PrefetchingIter): det_iter = mx.io.PrefetchingIter(det_iter) start = timer() detections = self.mod.predict(det_iter).asnumpy() time_elapsed = timer() - start if show_timer: logging.info("Detection time for {} images: {:.4f} sec".format( num_images, time_elapsed)) result = Detector.filter_positive_detections(detections) return result
detect all images in iterator Parameters: ---------- det_iter : DetIter iterator for all testing images show_timer : Boolean whether to print out detection exec time Returns: ---------- list of detection results
27,854
def validate_output_format(self, default_format): output_format = self.options.output_format if output_format is None: output_format = default_format output_format = config.formats.get(output_format, output_format) if re.match(r"^[,._0-9a-zA-Z]+$", output_format): self.plain_output_format = True output_format = "%%(%s)s" % ")s\t%(".join(formatting.validate_field_list(output_format, allow_fmt_specs=True)) output_format = (output_format .replace(r"\\", "\\") .replace(r"\n", "\n") .replace(r"\t", "\t") .replace(r"\$", "\0") .replace("$(", "%(") .replace("\0", "$") .replace(r"\ ", " ") ) self.options.output_format = formatting.preparse(output_format)
Prepare output format for later use.
27,855
def token_count_pandas(self): freq_df = pd.DataFrame.from_dict(self.indexer.word_counts, orient=) freq_df.columns = [] return freq_df.sort_values(, ascending=False)
See token counts as pandas dataframe
27,856
def save(self, commit=True, **kwargs): org = super(OrganizationForm, self).save(commit=False, **kwargs) if not org.id: user = current_user._get_current_object() member = Member(user=user, role=) org.members.append(member) if commit: org.save() return org
Register the current user as admin on creation
27,857
def showlist(self, window_name, object_name): object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) object_handle.Press() return 1
Show combo box list / menu @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer
27,858
def find_one(self, **kwargs): future = TracebackFuture() def handle_response(result, error): if error: future.set_exception(error) else: instance = self.__entity() instance.map_dict(result) future.set_result(instance) self.__collection.find_one(kwargs, callback=handle_response) return future
Returns future. Executes collection's find_one method based on keyword args maps result ( dict to instance ) and return future Example:: manager = EntityManager(Product) product_saved = yield manager.find_one(_id=object_id)
27,859
def copy(self, filename: Optional[PathLike] = None) -> : if not self.isbacked: return AnnData(self._X.copy() if self._X is not None else None, self._obs.copy(), self._var.copy(), self._uns.copy() if isinstance(self._uns, DictView) else deepcopy(self._uns), self._obsm.copy(), self._varm.copy(), raw=None if self._raw is None else self._raw.copy(), layers=self.layers.as_dict(), dtype=self._X.dtype.name if self._X is not None else ) else: if filename is None: raise ValueError( myfilename.h5ad\) if self.isview: self.write(filename) else: from shutil import copyfile copyfile(self.filename, filename) return AnnData(filename=filename)
Full copy, optionally on disk.
27,860
def onAutoIndentTriggered(self): cursor = self._qpart.textCursor() startBlock = self._qpart.document().findBlock(cursor.selectionStart()) endBlock = self._qpart.document().findBlock(cursor.selectionEnd()) if startBlock != endBlock: stopBlock = endBlock.next() block = startBlock with self._qpart: while block != stopBlock: self.autoIndentBlock(block, ) block = block.next() else: self.autoIndentBlock(startBlock, )
Indent current line or selected lines
27,861
def _mean_prediction(self,theta,Y,scores,h,t_params): Y_exp = Y.copy() theta_exp = theta.copy() scores_exp = scores.copy() for t in range(0,h): new_value = theta_exp[-1] + t_params[0]*scores_exp[-1] if self.model_name2 == "Exponential": Y_exp = np.append(Y_exp, [1.0/self.link(new_value)]) else: Y_exp = np.append(Y_exp, [self.link(new_value)]) theta_exp = np.append(theta_exp, [new_value]) scores_exp = np.append(scores_exp, [0]) return Y_exp
Creates a h-step ahead mean prediction Parameters ---------- theta : np.array The past predicted values Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables Returns ---------- Y_exp : np.array Vector of past values and predictions
27,862
def setDaemon(self, runnable, isdaemon, noregister = False): if not noregister and runnable not in self.registerIndex: self.register((), runnable) if isdaemon: self.daemons.add(runnable) else: self.daemons.discard(runnable)
If a runnable is a daemon, it will not keep the main loop running. The main loop will end when all alived runnables are daemons.
27,863
def get_subcols(module_ident, plpy): plan = plpy.prepare(FalseSubCollection, (,)) for i in plpy.execute(plan, (module_ident,)): yield i[]
Get all the sub-collections that the module is part of.
27,864
def readCell(self, row, col): try: if self.__sheet is None: self.openSheet(super(ExcelRead, self).DEFAULT_SHEET) return self.__sheet.cell(row, col).value except BaseException as excp: raise UfException(Errors.UNKNOWN_ERROR, "Unknown Error in Excellib.readCell %s" % excp)
read a cell
27,865
def main(): parser = argparse.ArgumentParser(description=) parser.add_argument(, metavar=, help=) parser.add_argument(, , dest=, action=, help=,\, default=) parser.add_argument(, , dest=, action=, help=, default=False) parser.add_argument(, , dest=, action=, help=, default=False) parser.add_argument(, , dest=, action=, help=, default=False) parser.add_argument(, metavar=, type=argparse.FileType(), nargs=, help=, default=sys.stdout) args = parser.parse_args() ulog_file_name = args.filename message_filter = [] if not args.initial: message_filter = None ulog = ULog(ulog_file_name, message_filter) param_keys = sorted(ulog.initial_parameters.keys()) delimiter = args.delimiter output_file = args.output_filename if not args.octave: for param_key in param_keys: output_file.write(param_key) if args.initial: output_file.write(delimiter) output_file.write(str(ulog.initial_parameters[param_key])) output_file.write() elif args.timestamps: output_file.write(delimiter) output_file.write(str(ulog.initial_parameters[param_key])) for t, name, value in ulog.changed_parameters: if name == param_key: output_file.write(delimiter) output_file.write(str(value)) output_file.write() output_file.write("timestamp") output_file.write(delimiter) output_file.write() for t, name, value in ulog.changed_parameters: if name == param_key: output_file.write(delimiter) output_file.write(str(t)) output_file.write() else: for t, name, value in ulog.changed_parameters: if name == param_key: output_file.write(delimiter) output_file.write(str(value)) output_file.write() else: for param_key in param_keys: output_file.write() output_file.write(param_key) values = [ulog.initial_parameters[param_key]] if not args.initial: for t, name, value in ulog.changed_parameters: if name == param_key: values += [value] if len(values) > 1: output_file.write() output_file.write() output_file.write() output_file.write(str(len(values)) + ) for value in values: output_file.write(str(value) + ) else: output_file.write() output_file.write(str(values[0])) output_file.write()
Commande line interface
27,866
def basic_auth(f): def wrapper(*args, **kwargs): self = args[0] if in kwargs: raise AttributeError("donauthauth'] = self._auth else: assert False, "no basic token, no JWT, but connected o_O" return f(*args, **kwargs) return wrapper
Injects auth, into requests call over route :return: route
27,867
def export_compound(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue): if format == "score_plots": export_score_plots(infile) else: if outfile is None: if outcsv: outfile = infile.split(".osw")[0] + ".csv" else: outfile = infile.split(".osw")[0] + ".tsv" else: outfile = outfile export_compound_tsv(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue)
Export Compound TSV/CSV tables
27,868
def movementCompute(self, displacement, noiseFactor = 0): if noiseFactor != 0: displacement = copy.deepcopy(displacement) xnoise = np.random.normal(0, noiseFactor) ynoise = np.random.normal(0, noiseFactor) displacement[0] += xnoise displacement[1] += ynoise np.round(self.bumpPhases, decimals=9, out=self.bumpPhases) np.mod(self.bumpPhases, 1.0, out=self.bumpPhases) self._computeActiveCells() self.phaseDisplacement = phaseDisplacement
Shift the current active cells by a vector. @param displacement (pair of floats) A translation vector [di, dj].
27,869
def load(self, size): if self.exists() and self.isfile(): return eval(open(self).read(size))
open and read the file is existent
27,870
def _build_credentials_tuple(mech, source, user, passwd, extra, database): if mech != and user is None: raise ConfigurationError("%s requires a username." % (mech,)) if mech == : if source is not None and source != : raise ValueError( "authentication source must be $external or None for GSSAPI") properties = extra.get(, {}) service_name = properties.get(, ) canonicalize = properties.get(, False) service_realm = properties.get() props = GSSAPIProperties(service_name=service_name, canonicalize_host_name=canonicalize, service_realm=service_realm) return MongoCredential(mech, , user, passwd, props, None) elif mech == : if passwd is not None: raise ConfigurationError( "Passwords are not supported by MONGODB-X509") if source is not None and source != : raise ValueError( "authentication source must be " "$external or None for MONGODB-X509") return MongoCredential(mech, , user, None, None, None) elif mech == : source_database = source or database or return MongoCredential(mech, source_database, user, passwd, None, None) else: source_database = source or database or if passwd is None: raise ConfigurationError("A password is required.") return MongoCredential( mech, source_database, user, passwd, None, _Cache())
Build and return a mechanism specific credentials tuple.
27,871
def copy_random_state(random_state, force_copy=False): if random_state == np.random and not force_copy: return random_state else: rs_copy = dummy_random_state() orig_state = random_state.get_state() rs_copy.set_state(orig_state) return rs_copy
Creates a copy of a random state. Parameters ---------- random_state : numpy.random.RandomState The random state to copy. force_copy : bool, optional If True, this function will always create a copy of every random state. If False, it will not copy numpy's default random state, but all other random states. Returns ------- rs_copy : numpy.random.RandomState The copied random state.
27,872
def update_video_image(edx_video_id, course_id, image_data, file_name): try: course_video = CourseVideo.objects.select_related().get( course_id=course_id, video__edx_video_id=edx_video_id ) except ObjectDoesNotExist: error_message = u.format( edx_video_id, course_id ) raise ValVideoNotFoundError(error_message) video_image, _ = VideoImage.create_or_update(course_video, file_name, image_data) return video_image.image_url()
Update video image for an existing video. NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise a new file name is constructed based on uuid and extension from `file_name` value. `image_data` will be None in case of course re-run and export. Arguments: image_data (InMemoryUploadedFile): Image data to be saved for a course video. Returns: course video image url Raises: Raises ValVideoNotFoundError if the CourseVideo cannot be retrieved.
27,873
def gnpool(name, start, room, lenout=_default_len_out): name = stypes.stringToCharP(name) start = ctypes.c_int(start) kvars = stypes.emptyCharArray(yLen=room, xLen=lenout) room = ctypes.c_int(room) lenout = ctypes.c_int(lenout) n = ctypes.c_int() found = ctypes.c_int() libspice.gnpool_c(name, start, room, lenout, ctypes.byref(n), kvars, ctypes.byref(found)) return stypes.cVectorToPython(kvars)[0:n.value], bool(found.value)
Return names of kernel variables matching a specified template. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gnpool_c.html :param name: Template that names should match. :type name: str :param start: Index of first matching name to retrieve. :type start: int :param room: The largest number of values to return. :type room: int :param lenout: Length of strings in output array kvars. :type lenout: int :return: Kernel pool variables whose names match name. :rtype: list of str
27,874
def bounds_window(bounds, affine): w, s, e, n = bounds row_start, col_start = rowcol(w, n, affine) row_stop, col_stop = rowcol(e, s, affine, op=math.ceil) return (row_start, row_stop), (col_start, col_stop)
Create a full cover rasterio-style window
27,875
def network_lpf_contingency(network, snapshots=None, branch_outages=None): if snapshots is None: snapshots = network.snapshots if isinstance(snapshots, collections.Iterable): logger.warning("Apologies LPF contingency, this only works for single snapshots at the moment, taking the first snapshot.") snapshot = snapshots[0] else: snapshot = snapshots network.lpf(snapshot) passive_branches = network.passive_branches() if branch_outages is None: branch_outages = passive_branches.index p0_base = pd.Series(index=passive_branches.index) for c in network.passive_branch_components: pnl = network.pnl(c) p0_base[c] = pnl.p0.loc[snapshot] for sn in network.sub_networks.obj: sn._branches = sn.branches() sn.calculate_BODF() p0 = pd.DataFrame(index=passive_branches.index) p0["base"] = p0_base for branch in branch_outages: if type(branch) is not tuple: logger.warning("No type given for {}, assuming it is a line".format(branch)) branch = ("Line",branch) sn = network.sub_networks.obj[passive_branches.sub_network[branch]] branch_i = sn._branches.index.get_loc(branch) p0_new = p0_base + pd.Series(sn.BODF[:,branch_i]*p0_base[branch],sn._branches.index) p0[branch] = p0_new return p0
Computes linear power flow for a selection of branch outages. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots NB: currently this only works for a single snapshot branch_outages : list-like A list of passive branches which are to be tested for outages. If None, it's take as all network.passive_branches_i() Returns ------- p0 : pandas.DataFrame num_passive_branch x num_branch_outages DataFrame of new power flows
27,876
def to_dict(self): return { : self.name, : self.broker.to_dict(), : self.pid, : self.process_pids, : self.concurrency, : self.job_count, : [q.to_dict() for q in self.queues] }
Return a dictionary of the worker stats. Returns: dict: Dictionary of the stats.
27,877
def process_schema(value): schemas = current_app.extensions[].schemas try: return schemas[value] except KeyError: raise click.BadParameter( .format( value, .join(schemas.keys()) ) )
Load schema from JSONSchema registry based on given value. :param value: Schema path, relative to the directory when it was registered. :returns: The schema absolute path.
27,878
def merge_from(self, other): if other.national_number_pattern is not None: self.national_number_pattern = other.national_number_pattern if other.example_number is not None: self.example_number = other.example_number
Merge information from another PhoneNumberDesc object into this one.
27,879
def get_repository_nodes(self, repository_id, ancestor_levels, descendant_levels, include_siblings): return objects.RepositoryNode(self.get_repository_node_ids( repository_id=repository_id, ancestor_levels=ancestor_levels, descendant_levels=descendant_levels, include_siblings=include_siblings)._my_map, runtime=self._runtime, proxy=self._proxy)
Gets a portion of the hierarchy for the given repository. arg: repository_id (osid.id.Id): the ``Id`` to query arg: ancestor_levels (cardinal): the maximum number of ancestor levels to include. A value of 0 returns no parents in the node. arg: descendant_levels (cardinal): the maximum number of descendant levels to include. A value of 0 returns no children in the node. arg: include_siblings (boolean): ``true`` to include the siblings of the given node, ``false`` to omit the siblings return: (osid.repository.RepositoryNode) - the specified repository node raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
27,880
def urlretrieve(self, url, filename=None, method=, body=None, dir=None, **kwargs): result = self.request(method, url, data=body, **kwargs) result.code = result.status_code if not filename: fd, filename = tempfile.mkstemp(dir=dir) f = os.fdopen(fd, ) else: f = open(filename, ) f.write(result.content) f.close() return filename, result
Save result of a request to a file, similarly to :func:`urllib.urlretrieve`. If an error is encountered may raise any of the scrapelib `exceptions`_. A filename may be provided or :meth:`urlretrieve` will safely create a temporary file. If a directory is provided, a file will be given a random name within the specified directory. Either way, it is the responsibility of the caller to ensure that the temporary file is deleted when it is no longer needed. :param url: URL for request :param filename: optional name for file :param method: any valid HTTP method, but generally GET or POST :param body: optional body for request, to turn parameters into an appropriate string use :func:`urllib.urlencode()` :param dir: optional directory to place file in :returns filename, response: tuple with filename for saved response (will be same as given filename if one was given, otherwise will be a temp file in the OS temp directory) and a :class:`Response` object that can be used to inspect the response headers.
27,881
def preflight(self): origin = self.request.headers.get(, ) self.set_header(, origin) headers = self.request.headers.get() if headers: self.set_header(, headers) self.set_header(, )
Handles request authentication
27,882
def _setextra(self, extradata): current = self while hasattr(current, ): current = current._sub _set(current, , extradata)
Set the _extra field in the struct, which stands for the additional ("extra") data after the defined fields.
27,883
def _calc_validation_statistics(validation_results): successful_expectations = sum(exp["success"] for exp in validation_results) evaluated_expectations = len(validation_results) unsuccessful_expectations = evaluated_expectations - successful_expectations success = successful_expectations == evaluated_expectations try: success_percent = successful_expectations / evaluated_expectations * 100 except ZeroDivisionError: success_percent = float("nan") return ValidationStatistics( successful_expectations=successful_expectations, evaluated_expectations=evaluated_expectations, unsuccessful_expectations=unsuccessful_expectations, success=success, success_percent=success_percent, )
Calculate summary statistics for the validation results and return ``ExpectationStatistics``.
27,884
def imshow_z(data, name): zmes = pick_flat_z(data) plt.figure() plt.imshow(zmes.T, origin=, \ extent=[data[].min(), data[].max(), \ 0, data[].max()], aspect=.16) plt.colorbar() plt.xlabel(, fontsize=20) plt.ylabel(, fontsize=20) plt.savefig(name+, dpi=300, format=, transparent=False, bbox_inches=, pad_inches=0.05)
2D color plot of the quasiparticle weight as a function of interaction and doping
27,885
def _backup_dir_item(self, dir_path, process_bar): self.path_helper.set_src_filepath(dir_path) if self.path_helper.abs_src_filepath is None: self.total_errored_items += 1 log.info("Can't backup %r", dir_path) if dir_path.is_symlink: self.summary("TODO Symlink: %s" % dir_path) return if dir_path.resolve_error is not None: self.summary("TODO resolve error: %s" % dir_path.resolve_error) pprint_path(dir_path) return if dir_path.different_path: self.summary("TODO different path:") pprint_path(dir_path) return if dir_path.is_dir: self.summary("TODO dir: %s" % dir_path) elif dir_path.is_file: file_backup = FileBackup(dir_path, self.path_helper, self.backup_run) old_backup_entry = self.fast_compare(dir_path) if old_backup_entry is not None: file_backup.fast_deduplication_backup(old_backup_entry, process_bar) else: file_backup.deduplication_backup(process_bar) assert file_backup.fast_backup is not None, dir_path.path assert file_backup.file_linked is not None, dir_path.path file_size = dir_path.stat.st_size if file_backup.file_linked: self.total_file_link_count += 1 self.total_stined_bytes += file_size else: self.total_new_file_count += 1 self.total_new_bytes += file_size if file_backup.fast_backup: self.total_fast_backup += 1 else: self.summary("TODO:" % dir_path) pprint_path(dir_path)
Backup one dir item :param dir_path: filesystem_walk.DirEntryPath() instance
27,886
def _compute_distance(self, dists, C): return C["c4"] * np.log(np.sqrt(dists.rrup ** 2. + C["h"] ** 2.))
Compute the second term of the equation described on p. 1144: `` c4 * np.log(sqrt(R ** 2. + h ** 2.)
27,887
def create_language_dataset_from_url(self, file_url, token=None, url=API_CREATE_LANGUAGE_DATASET): auth = + self.check_for_token(token) dummy_files = {: (None, ), :(None, file_url)} h = {: auth, :} the_url = url r = requests.post(the_url, headers=h, files=dummy_files) return r
Creates a dataset from a publicly accessible file stored in the cloud. :param file_url: string, in the form of a URL to a file accessible on the cloud. Popular options include Dropbox, AWS S3, Google Drive. warning: Google Drive by default gives you a link to a web ui that allows you to download a file NOT to the file directly. There is a way to change the link to point directly to the file as of 2018 as this may change, please search google for a solution. returns: a request object
27,888
def read_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, opts: dict = None) -> : opts = opts if opts is not None else {} code = "filename x " if file.lower().startswith("http"): code += "url " code += "\""+file+"\";\n" code += "proc import datafile=x out=" if len(libref): code += libref+"." code += table+" dbms=csv replace; "+self._sb._impopts(opts)+" run;" if nosub: print(code) else: ll = self.submit(code, "text")
This method will import a csv file into a SAS Data Set and return the SASdata object referring to it. file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file table - the name of the SAS Data Set to create libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
27,889
def LoadElement(href, only_etag=False): request = SMCRequest(href=href) request.exception = FetchElementFailed result = request.read() if only_etag: return result.etag return ElementCache( result.json, etag=result.etag)
Return an instance of a element as a ElementCache dict used as a cache. :rtype ElementCache
27,890
def _DetermineOperatingSystem(self, searcher): find_specs = [ file_system_searcher.FindSpec( location=, case_sensitive=False), file_system_searcher.FindSpec( location=, case_sensitive=False), file_system_searcher.FindSpec( location=, case_sensitive=False), file_system_searcher.FindSpec( location=, case_sensitive=False), file_system_searcher.FindSpec( location=, case_sensitive=False), file_system_searcher.FindSpec( location=, case_sensitive=False)] locations = [] for path_spec in searcher.Find(find_specs=find_specs): relative_path = searcher.GetRelativePath(path_spec) if relative_path: locations.append(relative_path.lower()) windows_locations = set([ , , , , , , , ]) operating_system = definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN if windows_locations.intersection(set(locations)): operating_system = definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT elif in locations: operating_system = definitions.OPERATING_SYSTEM_FAMILY_MACOS elif in locations: operating_system = definitions.OPERATING_SYSTEM_FAMILY_LINUX return operating_system
Tries to determine the underlying operating system. Args: searcher (dfvfs.FileSystemSearcher): file system searcher. Returns: str: operating system for example "Windows". This should be one of the values in definitions.OPERATING_SYSTEM_FAMILIES.
27,891
def get_words(self): words = [] block = self.document().findBlockByLineNumber(0) while block.isValid(): blockWords = foundations.strings.get_words(foundations.strings.to_string(block.text())) if blockWords: words.extend(blockWords) block = block.next() return words
Returns the document words. :return: Document words. :rtype: list
27,892
def InterpolateValue(self, value, type_info_obj=type_info.String(), default_section=None, context=None): if isinstance(value, Text): try: value = StringInterpolator( value, self, default_section=default_section, parameter=type_info_obj.name, context=context).Parse() except InterpolationError as e: message = "{cause}: {detail}".format(cause=e, detail=value) raise type(e)(message) value = type_info_obj.FromString(value) if isinstance(value, list): value = [ self.InterpolateValue( v, default_section=default_section, context=context) for v in value ] return value
Interpolate the value and parse it with the appropriate type.
27,893
async def connect(self): proxy = os.environ.get() self._session = http_utils.Session(self._cookies, proxy=proxy) try: self._channel = channel.Channel( self._session, self._max_retries, self._retry_backoff_base ) self._channel.on_connect.add_observer(self.on_connect.fire) self._channel.on_reconnect.add_observer(self.on_reconnect.fire) self._channel.on_disconnect.add_observer(self.on_disconnect.fire) self._channel.on_receive_array.add_observer(self._on_receive_array) self._listen_future = asyncio.ensure_future(self._channel.listen()) try: await self._listen_future except asyncio.CancelledError: ) finally: await self._session.close()
Establish a connection to the chat server. Returns when an error has occurred, or :func:`disconnect` has been called.
27,894
def rates_for_location(self, postal_code, location_deets=None): request = self._get("rates/" + postal_code, location_deets) return self.responder(request)
Shows the sales tax rates for a given location.
27,895
def load(self): self.inventory = SDBInventory(self.usr) self.forms = self.inventory.forms
Loads the user's SDB inventory Raises parseException
27,896
def _update_index(self, axis, key, value): if value is None: return delattr(self, key) _key = "_{}".format(key) index = "{[0]}index".format(axis) unit = "{[0]}unit".format(axis) if not isinstance(value, Quantity): try: value = Quantity(value, getattr(self, unit)) except TypeError: value = Quantity(float(value), getattr(self, unit)) try: curr = getattr(self, _key) except AttributeError: delattr(self, index) else: if ( value is None or getattr(self, key) is None or not value.unit.is_equivalent(curr.unit) or value != curr ): delattr(self, index) setattr(self, _key, value) return value
Update the current axis index based on a given key or value This is an internal method designed to set the origin or step for an index, whilst updating existing Index arrays as appropriate Examples -------- >>> self._update_index("x0", 0) >>> self._update_index("dx", 0) To actually set an index array, use `_set_index`
27,897
def set_calibrators(self, parameter, calibrators): req = mdb_pb2.ChangeParameterRequest() req.action = mdb_pb2.ChangeParameterRequest.SET_CALIBRATORS for c in calibrators: if c.context : context_calib = req.contextCalibrator.add() context_calib.context = rs.context calib_info = context_calib.calibrator else : calib_info = req.defaultCalibrator _add_calib(calib_info, c.type, c.data) url = .format( self._instance, self._processor, parameter) response = self._client.post_proto(url, data=req.SerializeToString()) pti = mdb_pb2.ParameterTypeInfo()
Apply an ordered set of calibrators for the specified parameter. This replaces existing calibrators (if any). Each calibrator may have a context, which indicates when it its effects may be applied. Only the first matching calibrator is applied. A calibrator with context ``None`` is the *default* calibrator. There can be only one such calibrator, and is always applied at the end when no other contextual calibrator was applicable. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param .Calibrator[] calibrators: List of calibrators (either contextual or not)
27,898
def convert_norm(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis else None keepdims = get_boolean_attribute_value(attrs, "keepdims") ord = int(attrs.get("ord", 2)) onnx_op_name = "ReduceL1" if ord == 1 else "ReduceL2" if axes: reduce_node = onnx.helper.make_node( onnx_op_name, input_nodes, [name], axes=axes, keepdims=keepdims, name=name ) return [reduce_node] else: reduce_node = onnx.helper.make_node( onnx_op_name, input_nodes, [name], keepdims=keepdims, name=name ) return [reduce_node]
Map MXNet's norm operator attributes to onnx's ReduceL1 and ReduceL2 operators and return the created node.
27,899
def _does_require_deprecation(self): for index, version_number in enumerate(self.current_version[0][:2]): if version_number > self.version_yaml[index]: return True return False
Check if we have to put the previous version into the deprecated list.