Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
3,100
def reftrack_rtype_data(rt, role): tfi = rt.get_taskfileinfo() if not tfi: return return filesysitemdata.taskfileinfo_rtype_data(tfi, role)
Return the data for the releasetype that is loaded by the reftrack :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the releasetype :rtype: depending on role :raises: None
3,101
def is_binarized(self): is_binarized = np.issubdtype(self.pianoroll.dtype, np.bool_) return is_binarized
Return True if the pianoroll is already binarized. Otherwise, return False. Returns ------- is_binarized : bool True if the pianoroll is already binarized; otherwise, False.
3,102
def parse(self, obj): for k, default in obj.__class__.defaults.items(): typ = type(default) if typ is str: continue v = getattr(obj, k) if typ is int: setattr(obj, k, int(v or default)) elif typ is float: setattr(obj, k, float(v or default)) elif typ is bool: setattr(obj, k, bool(int(v or default)))
Parse the object's properties according to its default types.
3,103
def run_with_graph_transformation(self) -> Iterable[BELGraph]: yield self.get_remaining_graph() while not self.done_chomping(): while not list(self.iter_leaves()): self.remove_random_edge() yield self.get_remaining_graph() self.score_leaves() yield self.get_remaining_graph()
Calculate scores for all leaves until there are none, removes edges until there are, and repeats until all nodes have been scored. Also, yields the current graph at every step so you can make a cool animation of how the graph changes throughout the course of the algorithm :return: An iterable of BEL graphs
3,104
def zrem(self, key, *members): return self._execute([b, key] + list(members))
Removes the specified members from the sorted set stored at key. Non existing members are ignored. An error is returned when key exists and does not hold a sorted set. .. note:: **Time complexity**: ``O(M*log(N))`` with ``N`` being the number of elements in the sorted set and ``M`` the number of elements to be removed. :param key: The key of the sorted set :type key: :class:`str`, :class:`bytes` :param members: One or more member values to remove :type members: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError`
3,105
def resolve_movie(self, title, year=None): r = self.search_movie(title) return self._match_results(r, title, year)
Tries to find a movie with a given title and year
3,106
def tararchive_opener(path, pattern=, verbose=False): with tarfile.open(fileobj=io.BytesIO(urlopen(path).read())) if is_url(path) else tarfile.open(path) as tararchive: for tarinfo in tararchive: if tarinfo.isfile(): source = os.path.join(path, tarinfo.name) if pattern and not re.match(pattern, tarinfo.name): logger.verbose(.format(os.path.abspath(tarinfo.name), pattern)) continue logger.verbose(.format(source)) filehandle = tararchive.extractfile(tarinfo) yield filehandle
Opener that opens files from tar archive. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s).
3,107
def ReadFile(self, definitions_registry, path): with open(path, ) as file_object: self.ReadFileObject(definitions_registry, file_object)
Reads data type definitions from a file into the registry. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. path (str): path of the file to read from.
3,108
def encrypt(privkey, passphrase): if isinstance(privkey, str): privkey = PrivateKey(privkey) else: privkey = PrivateKey(repr(privkey)) privkeyhex = repr(privkey) addr = format(privkey.bitcoin.address, "BTC") a = _bytes(addr) salt = hashlib.sha256(hashlib.sha256(a).digest()).digest()[0:4] if SCRYPT_MODULE == "scrypt": key = scrypt.hash(passphrase, salt, 16384, 8, 8) elif SCRYPT_MODULE == "pylibscrypt": key = scrypt.scrypt(bytes(passphrase, "utf-8"), salt, 16384, 8, 8) else: raise ValueError("No scrypt module loaded") (derived_half1, derived_half2) = (key[:32], key[32:]) aes = AES.new(derived_half2, AES.MODE_ECB) encrypted_half1 = _encrypt_xor(privkeyhex[:32], derived_half1[:16], aes) encrypted_half2 = _encrypt_xor(privkeyhex[32:], derived_half1[16:], aes) " flag byte is forced 0xc0 because Graphene only uses compressed keys " payload = b"\x01" + b"\x42" + b"\xc0" + salt + encrypted_half1 + encrypted_half2 " Checksum " checksum = hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4] privatkey = hexlify(payload + checksum).decode("ascii") return Base58(privatkey)
BIP0038 non-ec-multiply encryption. Returns BIP0038 encrypted privkey. :param privkey: Private key :type privkey: Base58 :param str passphrase: UTF-8 encoded passphrase for encryption :return: BIP0038 non-ec-multiply encrypted wif key :rtype: Base58
3,109
def get_items(*indexes): return lambda obj: tuple( obj[index] if len(obj) > index else None for index in indexes )
Return a callable that fetches the given indexes of an object Always return a tuple even when len(indexes) == 1. Similar to `operator.itemgetter`, but will insert `None` when the object does not have the desired index (instead of raising IndexError).
3,110
def _is_region_extremely_sparse(self, start, end, base_state=None): all_bytes = None if base_state is not None: all_bytes = base_state.memory.load(start, end - start + 1) try: all_bytes = base_state.solver.eval(all_bytes, cast_to=bytes) except SimError: all_bytes = None size = end - start + 1 if all_bytes is None: all_bytes = self._fast_memory_load_bytes(start, size) if all_bytes is None: return True if len(all_bytes) < size: l.warning("_is_region_extremely_sparse: The given region % "memory space. Only the first %d bytes (% start, start + len(all_bytes) - 1) the_byte_value = None for b in all_bytes: if the_byte_value is None: the_byte_value = b else: if the_byte_value != b: return False return True
Check whether the given memory region is extremely sparse, i.e., all bytes are the same value. :param int start: The beginning of the region. :param int end: The end of the region. :param base_state: The base state (optional). :return: True if the region is extremely sparse, False otherwise. :rtype: bool
3,111
def joint_torques(self): return as_flat_array(getattr(j, , j).feedback[-1][:j.ADOF] for j in self.joints)
Get a list of all current joint torques in the skeleton.
3,112
def deep_reload_hook(m): if not isinstance(m, ModuleType): raise TypeError("reload() argument must be module") name = m.__name__ if name not in sys.modules: raise ImportError("reload(): module %.200s not in sys.modules" % name) global modules_reloading try: return modules_reloading[name] except: modules_reloading[name] = m dot = name.rfind() if dot < 0: subname = name path = None else: try: parent = sys.modules[name[:dot]] except KeyError: modules_reloading.clear() raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot]) subname = name[dot+1:] path = getattr(parent, "__path__", None) try: with replace_import_hook(original_import): fp, filename, stuff = imp.find_module(subname, path) finally: modules_reloading.clear() try: newm = imp.load_module(name, fp, filename, stuff) except: sys.modules[name] = m raise finally: if fp: fp.close() modules_reloading.clear() return newm
Replacement for reload().
3,113
def zlist(columns, items, print_columns=None, text="", title="", width=DEFAULT_WIDTH, height=ZLIST_HEIGHT, timeout=None): dialog = ZList(columns, items, print_columns, text, title, width, height, timeout) dialog.run() return dialog.response
Display a list of values :param columns: a list of columns name :type columns: list of strings :param items: a list of values :type items: list of strings :param print_columns: index of a column (return just the values from this column) :type print_columns: int (None if all the columns) :param text: text inside the window :type text: str :param title: title of the window :type title: str :param width: window width :type width: int :param height: window height :type height: int :param timeout: close the window after n seconds :type timeout: int :return: A row of values from the table :rtype: list
3,114
def img2wav(path, min_x, max_x, min_y, max_y, window_size=3): image = Image.open(path).convert("L") matrix = np.array(image)[::-1] matrix[np.where(matrix >= 128)] = 255 matrix[np.where(matrix < 128)] = 0 tick_x = (max_x - min_x) / matrix.shape[1] tick_y = (max_y - min_y) / matrix.shape[0] x, y = list(), list() for i in range(matrix.shape[1]): window = expand_window( i, window_size, matrix.shape[1]) margin_dots_y_indices = np.where(matrix[:, window] == 0)[0] if len(margin_dots_y_indices) > 0: x.append(min_x + (i + 1) * tick_x) y.append(min_y + margin_dots_y_indices.mean() * tick_y) return np.array(x), np.array(y)
Generate 1-D data ``y=f(x)`` from a black/white image. Suppose we have an image like that: .. image:: images/waveform.png :align: center Put some codes:: >>> from weatherlab.math.img2waveform import img2wav >>> import matplotlib.pyplot as plt >>> x, y = img2wav(r"testdata\img2waveform\waveform.png", ... min_x=0.0, max_x=288, ... min_y=15.0, max_y=35.0, ... window_size=15) >>> plt.plot(x, y) >>> plt.show() Then you got nicely sampled data: .. image:: images\waveform_pyplot.png :align: center :param path: the image file path :type path: string :param min_x: minimum value of x axis :type min_x: number :param max_x: maximum value of x axis :type max_x: number :param min_y: minimum value of y axis :type min_y: number :param max_y: maximum value of y axis :type max_y: number :param window_size: the slide window :type window_size: int Note: In python, a numpy array that represent a image is from left to the right, top to the bottom, but in coordinate, it's from bottom to the top. So we use ::-1 for a reverse output
3,115
def checkversion(version): try: for refversion, responseversion in zip([int(x) for x in REQUIREFOLIADOCSERVE.split()], [int(x) for x in version.split()]): if responseversion > refversion: return 1 elif responseversion < refversion: return -1 return 0 except ValueError: raise ValueError("Unable to parse version, invalid syntax")
Checks foliadocserve version, returns 1 if the document is newer than the library, -1 if it is older, 0 if it is equal
3,116
def getTradeHistory(pair, connection=None, info=None, count=None): if info is not None: info.validate_pair(pair) if connection is None: connection = common.BTCEConnection() response = connection.makeJSONRequest("/api/3/trades/%s" % pair) if type(response) is not dict: raise TypeError("The response is not a dict.") history = response.get(pair) if type(history) is not list: raise TypeError("The response is a %r, not a list." % type(history)) result = [] if count is not None: history = history[:count] for h in history: h["pair"] = pair t = Trade(**h) result.append(t) return result
Retrieve the trade history for the given pair. Returns a list of Trade instances. If count is not None, it should be an integer, and specifies the number of items from the trade history that will be processed and returned.
3,117
def tags(self, id, service=): return self.request.get(service + , params=dict(id=id))
Get the existing analysis for a given hash :param id: The hash to get tag analysis for :type id: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
3,118
def _extract_level(self, topic_str): topics = topic_str.split() for idx,t in enumerate(topics): level = getattr(logging, t, None) if level is not None: break if level is None: level = logging.INFO else: topics.pop(idx) return level, .join(topics)
Turn 'engine.0.INFO.extra' into (logging.INFO, 'engine.0.extra')
3,119
def validate(anchors, duplicate_tags, opts): try: return _validate(anchors, duplicate_tags, opts) except ValidationException as e: if str(e) == "Duplicate tags found": messages.print_duplicate_anchor_information(duplicate_tags) else: print(e) sys.exit(0)
Client facing validate function. Runs _validate() and returns True if anchors and duplicate_tags pass all validations. Handles exceptions automatically if _validate() throws any and exits the program. :param anchors: Dictionary mapping string file path keys to dictionary values. The inner dictionaries map string AnchorHub tags to generated anchor values :param duplicate_tags: Dictionary mapping string file path keys to a list of tuples. The tuples contain the following information, in order: 1. The string AnchorHub tag that was repeated 2. The line in the file that the duplicate was found, as a number 3. The string generated anchor that first used the repeated tag :param opts: Namespace containing AnchorHub options, usually created by command line arguments :return: True if the anchors pass all validation tests
3,120
def mode(name, mode, quotatype): ret = {: name, : {}, : None, : } fun = if mode is True: fun = if __salt__[](name)[name][quotatype] == fun: ret[] = True ret[] = .format(name, fun) return ret if __opts__[]: ret[] = .format(name, fun) return ret if __salt__[.format(fun)](name): ret[] = {: name} ret[] = True ret[] = .format(name, fun) return ret else: ret[] = False ret[] = .format(name, fun) return ret
Set the quota for the system name The filesystem to set the quota mode on mode Whether the quota system is on or off quotatype Must be ``user`` or ``group``
3,121
def status(self, verbose=False): try: response=api(url=self.__url, method="GET", verbose=verbose) except Exception as e: print( + str(e)) else: print()
Checks the status of your CyREST server.
3,122
def scs2e(sc, sclkch): sc = ctypes.c_int(sc) sclkch = stypes.stringToCharP(sclkch) et = ctypes.c_double() libspice.scs2e_c(sc, sclkch, ctypes.byref(et)) return et.value
Convert a spacecraft clock string to ephemeris seconds past J2000 (ET). http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scs2e_c.html :param sc: NAIF integer code for a spacecraft. :type sc: int :param sclkch: An SCLK string. :type sclkch: str :return: Ephemeris time, seconds past J2000. :rtype: float
3,123
def create_package_file(root, master_package, subroot, py_files, opts, subs): text = format_heading(1, % makename(master_package, subroot)) if opts.modulefirst: text += format_directive(subroot, master_package) text += subs = [sub for sub in subs if path.isfile(path.join(root, sub, INITPY))] if subs: text += for sub in subs: text += % (makename(master_package, subroot), sub) text += submods = [path.splitext(sub)[0] for sub in py_files if not shall_skip(path.join(root, sub), opts) and sub != INITPY] if submods: if opts.separatemodules: text += for submod in submods: modfile = makename(master_package, makename(subroot, submod)) text += % modfile if not opts.noheadings: filetext = format_heading(1, % modfile) else: filetext = filetext += format_directive(makename(subroot, submod), master_package) write_file(modfile, filetext, opts) else: for submod in submods: modfile = makename(master_package, makename(subroot, submod)) if not opts.noheadings: text += format_heading(2, % modfile) text += format_directive(makename(subroot, submod), master_package) text += text += if not opts.modulefirst: text += format_heading(2, ) text += format_directive(subroot, master_package) write_file(makename(master_package, subroot), text, opts)
Build the text of the file and write the file.
3,124
def main(): try: ser = serial.Serial(, 9600) xbee = XBee(ser) xbee.send(, frame_id=, command=) response = xbee.wait_read_frame() print response xbee.send(, frame_id=, command=) response = xbee.wait_read_frame() print response xbee.send(, frame_id=, command=) response = xbee.wait_read_frame() print response xbee.send(, frame_id=, command=) response = xbee.wait_read_frame() print response except KeyboardInterrupt: pass finally: ser.close()
Sends an API AT command to read the lower-order address bits from an XBee Series 1 and looks for a response
3,125
def write(self, value): row, col = self._cursor_pos try: if self._content[row][col] != value: self._send_data(value) self._content[row][col] = value unchanged = False else: unchanged = True except IndexError as e: if self.auto_linebreaks is True: raise e self._send_data(value) unchanged = False if self.text_align_mode == : if self.auto_linebreaks is False or col < self.lcd.cols - 1: newpos = (row, col + 1) if unchanged: self.cursor_pos = newpos else: self._cursor_pos = newpos self.recent_auto_linebreak = False else: if row < self.lcd.rows - 1: self.cursor_pos = (row + 1, 0) else: self.cursor_pos = (0, 0) self.recent_auto_linebreak = True else: if self.auto_linebreaks is False or col > 0: newpos = (row, col - 1) if unchanged: self.cursor_pos = newpos else: self._cursor_pos = newpos self.recent_auto_linebreak = False else: if row < self.lcd.rows - 1: self.cursor_pos = (row + 1, self.lcd.cols - 1) else: self.cursor_pos = (0, self.lcd.cols - 1) self.recent_auto_linebreak = True
Write a raw byte to the LCD.
3,126
def _get_repr(obj, pretty=False, indent=1): if pretty: repr_value = pformat(obj, indent) else: repr_value = repr(obj) if sys.version_info[0] == 2: try: repr_value = repr_value.decode() except UnicodeError: repr_value = repr_value.decode(, ) return repr_value
Get string representation of an object :param obj: object :type obj: object :param pretty: use pretty formatting :type pretty: bool :param indent: indentation for pretty formatting :type indent: int :return: string representation :rtype: str
3,127
def _add_data_to_general_stats(self, data): headers = _get_general_stats_headers() self.general_stats_headers.update(headers) header_names = (, , ) general_data = dict() for sample in data: general_data[sample] = {column: data[sample][column] for column in header_names} if sample not in self.general_stats_data: self.general_stats_data[sample] = dict() if data[sample][] != : headers[][] = False self.general_stats_data[sample].update(general_data[sample])
Add data for the general stats in a Picard-module specific manner
3,128
def GetParsers(cls, parser_filter_expression=None): includes, excludes = cls._GetParserFilters(parser_filter_expression) for parser_name, parser_class in iter(cls._parser_classes.items()): if not includes and parser_name in excludes: continue if includes and parser_name not in includes: continue yield parser_name, parser_class
Retrieves the registered parsers and plugins. Retrieves a dictionary of all registered parsers and associated plugins from a parser filter string. The filter string can contain direct names of parsers, presets or plugins. The filter string can also negate selection if prepended with an exclamation point, e.g.: "foo,!foo/bar" would include parser foo but not include plugin bar. A list of specific included and excluded plugins is also passed to each parser's class. The three types of entries in the filter string: * name of a parser: this would be the exact name of a single parser to include (or exclude), e.g. foo; * name of a preset, e.g. win7: the presets are defined in plaso/parsers/presets.py; * name of a plugin: if a plugin name is included the parent parser will be included in the list of registered parsers; Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Yields: tuple: containing: * str: name of the parser: * type: parser class (subclass of BaseParser).
3,129
def open_slots(self, session): from airflow.models.taskinstance import \ TaskInstance as TI used_slots = session.query(func.count()).filter(TI.pool == self.pool).filter( TI.state.in_([State.RUNNING, State.QUEUED])).scalar() return self.slots - used_slots
Returns the number of slots open at the moment
3,130
def _update(self, sock_info, criteria, document, upsert=False, check_keys=True, multi=False, manipulate=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, session=None, retryable_write=False): common.validate_boolean("upsert", upsert) if manipulate: document = self.__database._fix_incoming(document, self) collation = validate_collation_or_none(collation) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged update_doc = SON([(, criteria), (, document), (, multi), (, upsert)]) if collation is not None: if sock_info.max_wire_version < 5: raise ConfigurationError( ) elif not acknowledged: raise ConfigurationError( ) else: update_doc[] = collation if array_filters is not None: if sock_info.max_wire_version < 6: raise ConfigurationError( ) elif not acknowledged: raise ConfigurationError( ) else: update_doc[] = array_filters command = SON([(, self.name), (, ordered), (, [update_doc])]) if not write_concern.is_server_default: command[] = write_concern.document if not sock_info.op_msg_enabled and not acknowledged: return self._legacy_write( sock_info, , command, op_id, bypass_doc_val, message.update, self.__full_name, upsert, multi, criteria, document, False, write_concern.document, check_keys, self.__write_response_codec_options) if bypass_doc_val and sock_info.max_wire_version >= 4: command[] = True result = sock_info.command( self.__database.name, command, write_concern=write_concern, codec_options=self.__write_response_codec_options, session=session, client=self.__database.client, retryable_write=retryable_write).copy() _check_write_command_response(result) if result.get() and not in result: result[] = True else: result[] = False if in result: result[] = result[][0][] if not acknowledged: return None return result
Internal update / replace helper.
3,131
def generate_repo_files(self, release): repo_tmpl = pkg_resources.resource_string(__name__, ) repo_file = os.path.join(release[], % release[]) with file(repo_file, ) as repo: repo_out = Template(repo_tmpl).render(**release) self.log.debug(, repo_file, repo_out) repo.write(repo_out) self.log.info(, repo_file)
Dynamically generate our yum repo configuration
3,132
def angact_ho(x,omega): action = (x[3:]**2+(omega*x[:3])**2)/(2.*omega) angle = np.array([np.arctan(-x[3+i]/omega[i]/x[i]) if x[i]!=0. else -np.sign(x[3+i])*np.pi/2. for i in range(3)]) for i in range(3): if(x[i]<0): angle[i]+=np.pi return np.concatenate((action,angle % (2.*np.pi)))
Calculate angle and action variable in sho potential with parameter omega
3,133
def nvmlDeviceSetPowerManagementLimit(handle, limit): r fn = _nvmlGetFunctionPointer("nvmlDeviceSetPowerManagementLimit") ret = fn(handle, c_uint(limit)) _nvmlCheckReturn(ret) return None
r""" /** * Set new power limit of this device. * * For Kepler &tm; or newer fully supported devices. * Requires root/admin permissions. * * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. * * \note Limit is not persistent across reboots or driver unloads. * Enable persistent mode to prevent driver from unloading when no application is using the device. * * @param device The identifier of the target device * @param limit Power management limit in milliwatts to set * * @return * - \ref NVML_SUCCESS if \a limit has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceGetPowerManagementLimitConstraints * @see nvmlDeviceGetPowerManagementDefaultLimit */ nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit
3,134
def get_assessment_ids(self): if not self.is_assessment_based_activity(): raise IllegalState() else: return [Id(a) for a in self._my_map[]]
Gets the Ids of any assessments associated with this activity. return: (osid.id.IdList) - list of assessment Ids raise: IllegalState - is_assessment_based_activity() is false compliance: mandatory - This method must be implemented.
3,135
def add_layout(self, obj, place=): centerleftrightabovebelowcenter valid_places = [, , , , ] if place not in valid_places: raise ValueError( "Invalid place specified. Valid place values are: %s" % (place, nice_join(valid_places)) ) getattr(self, place).append(obj)
Adds an object to the plot in a specified place. Args: obj (Renderer) : the object to add to the Plot place (str, optional) : where to add the object (default: 'center') Valid places are: 'left', 'right', 'above', 'below', 'center'. Returns: None
3,136
def citedReferences(self, uid, count=100, offset=1, retrieveParameters=None): return self._search.service.citedReferences( databaseId=, uid=uid, queryLanguage=, retrieveParameters=(retrieveParameters or self.make_retrieveParameters(offset, count)) )
The citedReferences operation returns references cited by an article identified by a unique identifier. You may specify only one identifier per request. :uid: Thomson Reuters unique record identifier :count: Number of records to display in the result. Cannot be less than 0 and cannot be greater than 100. If count is 0 then only the summary information will be returned. :offset: First record in results to return. Must be greater than zero :retrieveParameters: Retrieve parameters. If omitted the result of make_retrieveParameters(offset, count, 'RS', 'D') is used.
3,137
def get_default_frame(self): if self.settings.terrainalt == : if self.get_mav_param(,0) == 1: return mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT return mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT if self.settings.terrainalt == : return mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT return mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT
default frame for waypoints
3,138
def base64_b64encode(instr): \\n return salt.utils.stringutils.to_unicode( base64.b64encode(salt.utils.stringutils.to_bytes(instr)), encoding= if salt.utils.platform.is_windows() else None )
Encode a string as base64 using the "modern" Python interface. Among other possible differences, the "modern" encoder does not include newline ('\\n') characters in the encoded output.
3,139
def retrieve_file_from_RCSB(http_connection, resource, silent = True): if not silent: colortext.printf("Retrieving %s from RCSB" % os.path.split(resource)[1], color = "aqua") return http_connection.get(resource)
Retrieve a file from the RCSB.
3,140
async def upload_sticker_file(self, user_id: base.Integer, png_sticker: base.InputFile) -> types.File: payload = generate_payload(**locals(), exclude=[]) files = {} prepare_file(payload, files, , png_sticker) result = await self.request(api.Methods.UPLOAD_STICKER_FILE, payload, files) return types.File(**result)
Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Source: https://core.telegram.org/bots/api#uploadstickerfile :param user_id: User identifier of sticker file owner :type user_id: :obj:`base.Integer` :param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. :type png_sticker: :obj:`base.InputFile` :return: Returns the uploaded File on success :rtype: :obj:`types.File`
3,141
def fromtext(source=None, encoding=None, errors=, strip=None, header=(,)): source = read_source_from_arg(source) return TextView(source, header=header, encoding=encoding, errors=errors, strip=strip)
Extract a table from lines in the given text file. E.g.:: >>> import petl as etl >>> # setup example file ... text = 'a,1\\nb,2\\nc,2\\n' >>> with open('example.txt', 'w') as f: ... f.write(text) ... 12 >>> table1 = etl.fromtext('example.txt') >>> table1 +-------+ | lines | +=======+ | 'a,1' | +-------+ | 'b,2' | +-------+ | 'c,2' | +-------+ >>> # post-process, e.g., with capture() ... table2 = table1.capture('lines', '(.*),(.*)$', ['foo', 'bar']) >>> table2 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | '1' | +-----+-----+ | 'b' | '2' | +-----+-----+ | 'c' | '2' | +-----+-----+ Note that the strip() function is called on each line, which by default will remove leading and trailing whitespace, including the end-of-line character - use the `strip` keyword argument to specify alternative characters to strip. Set the `strip` argument to `False` to disable this behaviour and leave line endings in place.
3,142
def normalized_nodes_on_bdry(nodes_on_bdry, length): shape = np.shape(nodes_on_bdry) if shape == (): out_list = [(bool(nodes_on_bdry), bool(nodes_on_bdry))] * length elif length == 1 and shape == (2,): out_list = [(bool(nodes_on_bdry[0]), bool(nodes_on_bdry[1]))] elif len(nodes_on_bdry) == length: out_list = [] for i, on_bdry in enumerate(nodes_on_bdry): shape_i = np.shape(on_bdry) if shape_i == (): out_list.append((bool(on_bdry), bool(on_bdry))) elif shape_i == (2,): out_list.append((bool(on_bdry[0]), bool(on_bdry[1]))) else: raise ValueError( .format(i, shape_i)) else: raise ValueError( .format(shape, length)) return out_list
Return a list of 2-tuples of bool from the input parameter. This function is intended to normalize a ``nodes_on_bdry`` parameter that can be given as a single boolean (global) or as a sequence (per axis). Each entry of the sequence can either be a single boolean (global for the axis) or a boolean sequence of length 2. Parameters ---------- nodes_on_bdry : bool or sequence Input parameter to be normalized according to the above scheme. length : positive int Desired length of the returned list. Returns ------- normalized : list of 2-tuples of bool Normalized list with ``length`` entries, each of which is a 2-tuple of boolean values. Examples -------- Global for all axes: >>> normalized_nodes_on_bdry(True, length=2) [(True, True), (True, True)] Global per axis: >>> normalized_nodes_on_bdry([True, False], length=2) [(True, True), (False, False)] Mixing global and explicit per axis: >>> normalized_nodes_on_bdry([[True, False], False, True], length=3) [(True, False), (False, False), (True, True)]
3,143
def get(key, default=-1): if isinstance(key, int): return QS_Function(key) if key not in QS_Function._member_map_: extend_enum(QS_Function, key, default) return QS_Function[key]
Backport support for original codes.
3,144
def parse_host(host): if re.match(r, host) is not None: return ("0.0.0.0", int(host)) if re.match(r, host) is None: host = "//" + host o = parse.urlparse(host) hostname = o.hostname or "0.0.0.0" port = o.port or 0 return (hostname, port)
Parses host name and port number from a string.
3,145
def filesize(num_bytes): for prefix in : if num_bytes < 999.9: break num_bytes /= 1000.0 if prefix == : return .format(num_bytes) return .format(num_bytes, prefix)
Return a string containing an approximate representation of *num_bytes* using a small number and decimal SI prefix.
3,146
def get_ceph_pool_sample(self, sentry_unit, pool_id=0): df = self.get_ceph_df(sentry_unit) for pool in df[]: if pool[] == pool_id: pool_name = pool[] obj_count = pool[][] kb_used = pool[][] self.log.debug( .format(pool_name, pool_id, obj_count, kb_used)) return pool_name, obj_count, kb_used
Take a sample of attributes of a ceph pool, returning ceph pool name, object count and disk space used for the specified pool ID number. :param sentry_unit: Pointer to amulet sentry instance (juju unit) :param pool_id: Ceph pool ID :returns: List of pool name, object count, kb disk space used
3,147
def unregister_service(self, name): try: self.store.delete_service(name=name) except Exception: LOGGER.exception() return False else: return True
Implementation of :meth:`twitcher.api.IRegistry.unregister_service`.
3,148
def setup_logging( config=, default_level=logging.INFO, env_key= ): path = config value = os.getenv(env_key, None) if value: path = value if path.exists(): with open(path, ) as f: config = yaml.safe_load(f.read()) logging.config.dictConfig(config) else: print( + str(path)) logging.basicConfig(level=default_level)
Setup logging configuration
3,149
def ajIrreguliers(self): lignes = lignesFichier(self.path("irregs.la")) for lin in lignes: try: irr = self.parse_irreg(lin) self.lemmatiseur._irregs[deramise(irr.gr())].append(irr) except Exception as E: warnings.warn("Erreur au chargement de l'irrégulier\n" + lin + "\n" + str(E)) raise E for irr in flatten(self.lemmatiseur._irregs.values()): irr.lemme().ajIrreg(irr)
Chargement des formes irrégulières du fichier data/irregs.la
3,150
def is_primitive(value): typeCode = TypeConverter.to_type_code(value) return typeCode == TypeCode.String or typeCode == TypeCode.Enum or typeCode == TypeCode.Boolean \ or typeCode == TypeCode.Integer or typeCode == TypeCode.Long \ or typeCode == TypeCode.Float or typeCode == TypeCode.Double \ or typeCode == TypeCode.DateTime or typeCode == TypeCode.Duration
Checks if value has primitive type. Primitive types are: numbers, strings, booleans, date and time. Complex (non-primitive types are): objects, maps and arrays :param value: a value to check :return: true if the value has primitive type and false if value type is complex.
3,151
def absence_count(self): from ..eighth.models import EighthSignup return EighthSignup.objects.filter(user=self, was_absent=True, scheduled_activity__attendance_taken=True).count()
Return the user's absence count. If the user has no absences or is not a signup user, returns 0.
3,152
def create_unique_autosave_filename(self, filename, autosave_dir): basename = osp.basename(filename) autosave_filename = osp.join(autosave_dir, basename) if autosave_filename in self.name_mapping.values(): counter = 0 root, ext = osp.splitext(basename) while autosave_filename in self.name_mapping.values(): counter += 1 autosave_basename = .format(root, counter, ext) autosave_filename = osp.join(autosave_dir, autosave_basename) return autosave_filename
Create unique autosave file name for specified file name. Args: filename (str): original file name autosave_dir (str): directory in which autosave files are stored
3,153
def get_next_input(self): all_input = Deployment.objects.get(pk=self.id).input or lines = all_input.splitlines() first_line = lines[0] if len(lines) else None lines = lines[1:] if len(lines) > 1 else [] Deployment.objects.filter(pk=self.id).update(input=.join(lines)) return first_line
Returns the next line of input :return: string of input
3,154
def _fw_delete(self, drvr_name, data): fw_id = data.get() tenant_id = self.tenant_db.get_fw_tenant(fw_id) if tenant_id not in self.fwid_attr: LOG.error("Invalid tenant id for FW delete %s", tenant_id) return tenant_obj = self.fwid_attr[tenant_id] ret = self._check_delete_fw(tenant_id, drvr_name) if ret: tenant_obj.delete_fw(fw_id) self.tenant_db.del_fw_tenant(fw_id)
Firewall Delete routine. This function calls routines to remove FW from fabric and device. It also updates its local cache.
3,155
def delete_invalid_tickets(self): for ticket in self.filter(Q(consumed__isnull=False) | Q(expires__lte=now())).order_by(): try: ticket.delete() except models.ProtectedError: pass
Delete consumed or expired ``Ticket``s that are not referenced by other ``Ticket``s. Invalid tickets are no longer valid for authentication and can be safely deleted. A custom management command is provided that executes this method on all applicable models by running ``manage.py cleanupcas``.
3,156
def rpc_put_zonefiles( self, zonefile_datas, **con_info ): conf = get_blockstack_opts() if not is_atlas_enabled(conf): return {: , : 400} if not in conf: return {: , : 400} if type(zonefile_datas) != list: return {: , : 400} if len(zonefile_datas) > 5: return {: , : 400} for zfd in zonefile_datas: if not check_string(zfd, max_length=((4 * RPC_MAX_ZONEFILE_LEN) / 3) + 3, pattern=OP_BASE64_EMPTY_PATTERN): return {: .format(RPC_MAX_ZONEFILE_LEN)} zonefile_dir = conf.get("zonefiles", None) saved = [] for zonefile_data in zonefile_datas: try: zonefile_data = base64.b64decode( zonefile_data ) except: log.debug("Invalid base64 zonefile") saved.append(0) continue if len(zonefile_data) > RPC_MAX_ZONEFILE_LEN: log.debug("Zonefile too long") saved.append(0) continue zonefile_hash = get_zonefile_data_hash(str(zonefile_data)) zfinfos = atlasdb_get_zonefiles_by_hash(zonefile_hash, path=conf[]) if not zfinfos: log.debug("Unknown zonefile hash {}".format(zonefile_hash)) saved.append(0) continue rc = store_atlas_zonefile_data( str(zonefile_data), zonefile_dir ) if not rc: log.error("Failed to store zonefile {}".format(zonefile_hash)) saved.append(0) continue recovery_start, recovery_end = get_recovery_range(self.working_dir) current_block = virtualchain_hooks.get_last_block(self.working_dir) if recovery_start is not None and recovery_end is not None and recovery_end < current_block: log.debug("Already have zonefile {}".format(zonefile_hash)) saved.append(1) continue if self.subdomain_index: min_block_height = min([zfi[] for zfi in zfinfos]) log.debug("Enqueue {} from {} for subdomain processing".format(zonefile_hash, min_block_height)) self.subdomain_index.enqueue_zonefile(zonefile_hash, min_block_height) log.debug("Stored new zonefile {}".format(zonefile_hash)) saved.append(1) log.debug("Saved {} zonefile(s)".format(sum(saved))) log.debug("Reply: {}".format({: saved})) return self.success_response( {: saved} )
Replicate one or more zonefiles, given as serialized strings. Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files) Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure) Returns {'error': ...} on error Takes at most 5 zonefiles
3,157
async def await_reply(self, correlation_id, timeout=None): try: result = await asyncio.wait_for( self._futures[correlation_id], timeout=timeout) return result finally: del self._futures[correlation_id]
Wait for a reply to a given correlation id. If a timeout is provided, it will raise a asyncio.TimeoutError.
3,158
def map_parameters(cls, params): d = {} for k, v in six.iteritems(params): d[cls.FIELD_MAP.get(k.lower(), k)] = v return d
Maps parameters to form field names
3,159
def list_leases(self, prefix): api_path = .format(prefix=prefix) response = self._adapter.list( url=api_path, ) return response.json()
Retrieve a list of lease ids. Supported methods: LIST: /sys/leases/lookup/{prefix}. Produces: 200 application/json :param prefix: Lease prefix to filter list by. :type prefix: str | unicode :return: The JSON response of the request. :rtype: dict
3,160
def _run_select(self): return self._connection.select( self.to_sql(), self.get_bindings(), not self._use_write_connection )
Run the query as a "select" statement against the connection. :return: The result :rtype: list
3,161
def set_crypttab( name, device, password=, options=, config=, test=False, match_on=): * if options is None: options = elif isinstance(options, six.string_types): pass elif isinstance(options, list): options = .join(options) else: msg = raise CommandExecutionError(msg) entry_args = { : name, : device, : password if password is not None else , : options, } lines = [] ret = None if isinstance(match_on, list): pass elif not isinstance(match_on, six.string_types): msg = raise CommandExecutionError(msg) else: match_on = [match_on] entry = _crypttab_entry(**entry_args) try: criteria = entry.pick(match_on) except KeyError: filterFn = lambda key: key not in _crypttab_entry.crypttab_keys invalid_keys = six.moves.filter(filterFn, match_on) msg = .format(invalid_keys) raise CommandExecutionError(msg) if not os.path.isfile(config): raise CommandExecutionError(.format(config)) try: with salt.utils.files.fopen(config, ) as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) try: if criteria.match(line): ret = if entry.match(line): lines.append(line) else: ret = lines.append(six.text_type(entry)) else: lines.append(line) except _crypttab_entry.ParseError: lines.append(line) except (IOError, OSError) as exc: msg = t read from {0}: {1}newpresentnewchangew+File not writable {0}' raise CommandExecutionError(msg.format(config)) return ret
Verify that this device is represented in the crypttab, change the device to match the name passed, or add the name if it is not present. CLI Example: .. code-block:: bash salt '*' cryptdev.set_crypttab foo /dev/sdz1 mypassword swap,size=256
3,162
def ignore(code): if code in Main.options[]: return True if any(c in code for c in Main.options[]): return True return False
Should this code be ignored. :param str code: Error code (e.g. D201). :return: True if code should be ignored, False otherwise. :rtype: bool
3,163
def unrate_url(obj): return reverse(, args=( ContentType.objects.get_for_model(obj).pk, obj.pk, ))
Generates a link to "un-rate" the given object - this can be used as a form target or for POSTing via Ajax.
3,164
def action_filter(method_name, *args, **kwargs): def action_filter(value, context, **_params): method = getattr(context["action"], method_name) return _filter(method, value, args, kwargs) return action_filter
Creates an effect that will call the action's method with the current value and specified arguments and keywords. @param method_name: the name of method belonging to the action. @type method_name: str
3,165
def parse_declaration_expressn_operator(self, lhsAST, rhsAST, es, operator): if isinstance(lhsAST, wdl_parser.Terminal): if lhsAST.str == : es = es + .format(string=lhsAST.source_string) else: es = es + .format(string=lhsAST.source_string) elif isinstance(lhsAST, wdl_parser.Ast): es = es + self.parse_declaration_expressn(lhsAST, es=) elif isinstance(lhsAST, wdl_parser.AstList): raise NotImplementedError es = es + operator if isinstance(rhsAST, wdl_parser.Terminal): if rhsAST.str == : es = es + .format(string=rhsAST.source_string) else: es = es + .format(string=rhsAST.source_string) elif isinstance(rhsAST, wdl_parser.Ast): es = es + self.parse_declaration_expressn(rhsAST, es=) elif isinstance(rhsAST, wdl_parser.AstList): raise NotImplementedError return es
Simply joins the left and right hand arguments lhs and rhs with an operator. :param lhsAST: :param rhsAST: :param es: :param operator: :return:
3,166
def OnNodeActivated(self, event): try: node = self.sorted[event.GetIndex()] except IndexError, err: log.warn(_(), index=event.GetIndex()) else: wx.PostEvent( self, squaremap.SquareActivationEvent(node=node, point=None, map=None) )
We have double-clicked for hit enter on a node refocus squaremap to this node
3,167
def reset(self): logger.debug() self._log and self.pause() self._sendline() self._read() self._log and self.resume()
Reset openthread device, not equivalent to stop and start
3,168
def create_uinput_device(mapping): if mapping not in _mappings: raise DeviceError("Unknown device mapping: {0}".format(mapping)) try: mapping = _mappings[mapping] device = UInputDevice(mapping) except UInputError as err: raise DeviceError(err) return device
Creates a uinput device.
3,169
def generate_ref(self): with self._resolver.in_scope(self._definition[]): name = self._resolver.get_scope_name() uri = self._resolver.get_uri() if uri not in self._validation_functions_done: self._needed_validation_functions[uri] = name self.l(, name)
Ref can be link to remote or local definition. .. code-block:: python {'$ref': 'http://json-schema.org/draft-04/schema#'} { 'properties': { 'foo': {'type': 'integer'}, 'bar': {'$ref': '#/properties/foo'} } }
3,170
def parse_reaction_file(path, default_compartment=None): context = FilePathContext(path) format = resolve_format(None, context.filepath) if format == : logger.debug(.format( context.filepath)) with context.open() as f: for reaction in parse_reaction_table_file( context, f, default_compartment): yield reaction elif format == : logger.debug(.format( context.filepath)) with context.open() as f: for reaction in parse_reaction_yaml_file( context, f, default_compartment): yield reaction else: raise ParseError(.format( context.filepath))
Open and parse reaction file based on file extension Path can be given as a string or a context.
3,171
def _add_active_assets(specs): specs[] = {} for spec in specs.get_apps_and_libs(): for asset in spec[]: if not specs[].get(asset[]): specs[][asset[]] = {} specs[][asset[]][] = set() specs[][asset[]][] = set() specs[][asset[]][].add(spec.name) if asset[]: specs[][asset[]][].add(spec.name)
This function adds an assets key to the specs, which is filled in with a dictionary of all assets defined by apps and libs in the specs
3,172
def account_following(self, id, max_id=None, min_id=None, since_id=None, limit=None): id = self.__unpack_id(id) if max_id != None: max_id = self.__unpack_id(max_id) if min_id != None: min_id = self.__unpack_id(min_id) if since_id != None: since_id = self.__unpack_id(since_id) params = self.__generate_params(locals(), []) url = .format(str(id)) return self.__api_request(, url, params)
Fetch users the given user is following. Returns a list of `user dicts`_.
3,173
def build_disagg_matrix(bdata, bin_edges, sid, mon=Monitor): with mon(): mag_bins, dist_bins, lon_bins, lat_bins, eps_bins = bin_edges dim1, dim2, dim3, dim4, dim5 = shape = get_shape(bin_edges, sid) mags_idx = numpy.digitize(bdata.mags+pmf.PRECISION, mag_bins) - 1 dists_idx = numpy.digitize(bdata.dists[:, sid], dist_bins) - 1 lons_idx = _digitize_lons(bdata.lons[:, sid], lon_bins[sid]) lats_idx = numpy.digitize(bdata.lats[:, sid], lat_bins[sid]) - 1 mags_idx[mags_idx == dim1] = dim1 - 1 dists_idx[dists_idx == dim2] = dim2 - 1 lons_idx[lons_idx == dim3] = dim3 - 1 lats_idx[lats_idx == dim4] = dim4 - 1 out = {} cache = {} cache_hit = 0 num_zeros = 0 for k, allpnes in bdata.items(): pnes = allpnes[:, sid, :] cache_key = pnes.sum() if cache_key == pnes.size: num_zeros += 1 continue try: matrix = cache[cache_key] cache_hit += 1 except KeyError: mat = numpy.ones(shape) for i_mag, i_dist, i_lon, i_lat, pne in zip( mags_idx, dists_idx, lons_idx, lats_idx, pnes): mat[i_mag, i_dist, i_lon, i_lat] *= pne matrix = 1. - mat cache[cache_key] = matrix out[k] = matrix if hasattr(mon, ): mon.cache_info += numpy.array([len(bdata), cache_hit, num_zeros]) else: mon.cache_info = numpy.array([len(bdata), cache_hit, num_zeros]) return out
:param bdata: a dictionary of probabilities of no exceedence :param bin_edges: bin edges :param sid: site index :param mon: a Monitor instance :returns: a dictionary key -> matrix|pmf for each key in bdata
3,174
def _read_packet(f, pos, n_smp, n_allchan, abs_delta): if len(abs_delta) == 1: abs_delta = unpack(, abs_delta)[0] else: abs_delta = unpack(, abs_delta)[0] l_deltamask = int(ceil(n_allchan / BITS_IN_BYTE)) dat = empty((n_allchan, n_smp), dtype=int32) f.seek(pos) for i_smp in range(n_smp): eventbite = f.read(1) try: assert eventbite in (b, b) except: raise Exception( + str(i_smp) + + str(eventbite)) byte_deltamask = unpack( + * l_deltamask, f.read(l_deltamask)) deltamask = unpackbits(array(byte_deltamask[::-1], dtype =)) deltamask = deltamask[:-n_allchan-1:-1] n_bytes = int(deltamask.sum()) + deltamask.shape[0] deltamask = deltamask.astype() delta_dtype = empty(n_allchan, dtype=) delta_dtype[deltamask] = delta_dtype[~deltamask] = relval = array(unpack( + delta_dtype.tostring().decode(), f.read(n_bytes))) read_abs = (delta_dtype == b) & (relval == abs_delta) dat[~read_abs, i_smp] = dat[~read_abs, i_smp - 1] + relval[~read_abs] dat[read_abs, i_smp] = fromfile(f, , count=read_abs.sum()) return dat
Read a packet of compressed data Parameters ---------- f : instance of opened file erd file pos : int index of the start of the packet in the file (in bytes from beginning of the file) n_smp : int number of samples to read n_allchan : int number of channels (we should specify if shorted or not) abs_delta: byte if the delta has this value, it means that you should read the absolute value at the end of packet. If schema is 7, the length is 1; if schema is 8 or 9, the length is 2. Returns ------- ndarray data read in the packet up to n_smp. Notes ----- TODO: shorted chan. If I remember correctly, deltamask includes all the channels, but the absolute values are only used for not-shorted channels TODO: implement schema 7, which is slightly different, but I don't remember where exactly.
3,175
def text_response(self, contents, code=200, headers={}): return Response(contents, status=code, headers={ : })
shortcut to return simple plain/text messages in the response. :param contents: a string with the response contents :param code: the http status code :param headers: a dict with optional headers :returns: a :py:class:`flask.Response` with the ``text/plain`` **Content-Type** header.
3,176
def insert_record_by_dict(self, table: str, valuedict: Dict[str, Any]) -> Optional[int]: if not valuedict: return None n = len(valuedict) fields = [] args = [] for f, v in valuedict.items(): fields.append(self.delimit(f)) args.append(v) query = .format( table=table, fields=",".join(fields), placeholders=",".join(["?"]*n) ) query = self.localize_sql(query) log.debug("About to insert_record_by_dict with SQL template: " + query) try: cursor = self.db.cursor() debug_sql(query, args) cursor.execute(query, args) new_pk = get_pk_of_last_insert(cursor) log.debug("Record inserted.") return new_pk except: log.exception("insert_record_by_dict: Failed to insert record.") raise
Inserts a record into database, table "table", using a dictionary containing field/value mappings. Returns the new PK (or None).
3,177
def version(): * ret = {} cmd = res = __salt__[](cmd).splitlines() ret = res[0].split() return ret[-1]
Return imgadm version CLI Example: .. code-block:: bash salt '*' imgadm.version
3,178
def getRAM(self,ram=None): if(ram is None): ram_size = ale_lib.getRAMSize(self.obj) ram = np.zeros(ram_size,dtype=np.uint8) ale_lib.getRAM(self.obj,as_ctypes(ram))
This function grabs the atari RAM. ram MUST be a numpy array of uint8/int8. This can be initialized like so: ram = np.array(ram_size,dtype=uint8) Notice: It must be ram_size where ram_size can be retrieved via the getRAMSize function. If it is None, then this function will initialize it.
3,179
def post_shared_file(self, image_file=None, source_link=None, shake_id=None, title=None, description=None): if image_file and source_link: raise Exception( ) if not image_file and not source_link: raise Exception() content_type = self._get_image_type(image_file) if not title: title = os.path.basename(image_file) f = open(image_file, ) endpoint = files = {: (title, f, content_type)} data = self._make_request(, endpoint=endpoint, files=files) f.close() return data
Upload an image. TODO: Don't have a pro account to test (or even write) code to upload a shared filed to a particular shake. Args: image_file (str): path to an image (jpg/gif) on your computer. source_link (str): URL of a source (youtube/vine/etc.) shake_id (int): shake to which to upload the file or source_link [optional] title (str): title of the SharedFile [optional] description (str): description of the SharedFile Returns: SharedFile key.
3,180
def _to_dict(self): _dict = {} if hasattr(self, ) and self.exclude is not None: _dict[] = self.exclude if hasattr(self, ) and self.include is not None: _dict[] = self.include return _dict
Return a json dictionary representing this model.
3,181
def select_radio_button(self, key): key_index = list(self._parameter.options.keys()).index(key) radio_button = self.input_button_group.button(key_index) radio_button.click()
Helper to select a radio button with key. :param key: The key of the radio button. :type key: str
3,182
def WritePreprocessingInformation(self, knowledge_base): self._RaiseIfNotWritable() if self.storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError() system_configuration = knowledge_base.GetSystemConfigurationArtifact() self._WriteAttributeContainer(system_configuration)
Writes preprocessing information. Args: knowledge_base (KnowledgeBase): contains the preprocessing information. Raises: IOError: if the storage type does not support writing preprocess information or the storage file is closed or read-only. OSError: if the storage type does not support writing preprocess information or the storage file is closed or read-only.
3,183
def nextSunrise(jd, lat, lon): return swe.sweNextTransit(const.SUN, jd, lat, lon, )
Returns the JD of the next sunrise.
3,184
def in_check(self, position, location=None): location = location or self.location for piece in position: if piece is not None and piece.color != self.color: if not isinstance(piece, King): for move in piece.possible_moves(position): if move.end_loc == location: return True else: if self.loc_adjacent_to_opponent_king(piece.location, position): return True return False
Finds if the king is in check or if both kings are touching. :type: position: Board :return: bool
3,185
def more_like_this(self, q, mltfl, handler=, **kwargs): params = { : q, : mltfl, } params.update(kwargs) response = self._mlt(params, handler=handler) decoded = self.decoder.decode(response) self.log.debug( "Found MLT results.", (decoded.get(, {}) or {}).get(, 0) ) return self.results_cls(decoded)
Finds and returns results similar to the provided query. Returns ``self.results_cls`` class object (defaults to ``pysolr.Results``) Requires Solr 1.3+. Usage:: similar = solr.more_like_this('id:doc_234', 'text')
3,186
def message(self, level, *args): msg = .join((str(o) for o in args)) if level not in (, , , , , ): return msg return % (level, msg)
Format the message of the logger. You can rewrite this method to format your own message:: class MyLogger(Logger): def message(self, level, *args): msg = ' '.join(args) if level == 'error': return terminal.red(msg) return msg
3,187
def order(self): return [x.val for theclass in self.classes for x in theclass.items]
Produce a flatten list of the partition, ordered by classes
3,188
def _might_have_parameter(fn_or_cls, arg_name): if inspect.isclass(fn_or_cls): fn = _find_class_construction_fn(fn_or_cls) else: fn = fn_or_cls while hasattr(fn, ): fn = fn.__wrapped__ arg_spec = _get_cached_arg_spec(fn) if six.PY3: if arg_spec.varkw: return True return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs else: if arg_spec.keywords: return True return arg_name in arg_spec.args
Returns True if `arg_name` might be a valid parameter for `fn_or_cls`. Specifically, this means that `fn_or_cls` either has a parameter named `arg_name`, or has a `**kwargs` parameter. Args: fn_or_cls: The function or class to check. arg_name: The name fo the parameter. Returns: Whether `arg_name` might be a valid argument of `fn`.
3,189
def end(self): if not self.args.disable_autodiscover: self.autodiscover_client.close() self.server.end()
End of the Glances server session.
3,190
def message(self, text): self.client.publish(self.keys.external, .format(self.resource, text))
Public message.
3,191
def get_upcoming_events(self, days_to_look_ahead): now = datetime.now(tz=self.timezone) start_time = datetime(year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone) end_time = start_time + timedelta(days = days_to_look_ahead) start_time = start_time.isoformat() end_time = end_time.isoformat() return self.get_events(start_time, end_time)
Returns the events from the calendar for the next days_to_look_ahead days.
3,192
def _get_mean(self, imt, mag, hypo_depth, rrup, d): mag = min(mag, 8.3) if imt.name == : mean = ( 0.58 * mag + 0.0038 * hypo_depth + d - 1.29 - np.log10(rrup + 0.0028 * 10 ** (0.5 * mag)) - 0.002 * rrup ) else: mean = ( 0.50 * mag + 0.0043 * hypo_depth + d + 0.61 - np.log10(rrup + 0.0055 * 10 ** (0.5 * mag)) - 0.003 * rrup ) mean = np.log10(10**(mean)/(g*100)) return mean
Return mean value as defined in equation 3.5.1-1 page 148
3,193
def _hbf_handle_child_elements(self, obj, ntl): cd = {} ko = [] ks = set() for child in ntl: k = child.nodeName if k == and (not self._badgerfish_style_conversion): matk, matv = self._transform_meta_key_value(child) if matk is not None: _add_value_to_dict_bf(obj, matk, matv) else: if k not in ks: ko.append(k) ks.add(k) _add_value_to_dict_bf(cd, k, child) for k in ko: v = _index_list_of_values(cd, k) dcl = [] ct = None for xc in v: ct, dc = self._gen_hbf_el(xc) dcl.append(dc) assert ct not in obj obj[ct] = dcl _cull_redundant_about(obj) return obj
Indirect recursion through _gen_hbf_el
3,194
def create_dockwidget(self): dock = SpyderDockWidget(self.get_plugin_title(), self.main) dock.setObjectName(self.__class__.__name__+"_dw") dock.setAllowedAreas(self.ALLOWED_AREAS) dock.setFeatures(self.FEATURES) dock.setWidget(self) self.update_margins() dock.visibilityChanged.connect(self.visibility_changed) dock.topLevelChanged.connect(self.on_top_level_changed) dock.sig_plugin_closed.connect(self.plugin_closed) self.dockwidget = dock if self.shortcut is not None: sc = QShortcut(QKeySequence(self.shortcut), self.main, self.switch_to_plugin) self.register_shortcut(sc, "_", "Switch to %s" % self.CONF_SECTION) return (dock, self.LOCATION)
Add to parent QMainWindow as a dock widget
3,195
def rename(script, label=, layer_num=None): filter_xml = .join([ , , .format(label), , , , ]) if isinstance(script, mlx.FilterScript): if (layer_num is None) or (layer_num == script.current_layer()): util.write_filter(script, filter_xml) script.layer_stack[script.current_layer()] = label else: cur_layer = script.current_layer() change(script, layer_num) util.write_filter(script, filter_xml) change(script, cur_layer) script.layer_stack[layer_num] = label else: util.write_filter(script, filter_xml) return None
Rename layer label Can be useful for outputting mlp files, as the output file names use the labels. Args: script: the mlx.FilterScript object or script filename to write the filter to. label (str): new label for the mesh layer layer_num (int): layer number to rename. Default is the current layer. Not supported on the file base API. Layer stack: Renames a layer MeshLab versions: 2016.12 1.3.4BETA
3,196
def substitute(self, index, func_grp, bond_order=1): all_non_terminal_nn = [] for nn, dist in self.get_neighbors(self[index], 3): for inn, dist2 in self.get_neighbors(nn, 3): if inn != self[index] and \ dist2 < 1.2 * get_bond_length(nn.specie, inn.specie): all_non_terminal_nn.append((nn, dist)) break if len(all_non_terminal_nn) == 0: raise RuntimeError("Cant find functional group in list. " "Provide explicit coordinate instead") else: func_grp = FunctionalGroups[func_grp] try: bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie, bond_order=bond_order) except TypeError: bl = None if bl is not None: func_grp = func_grp.copy() vec = func_grp[0].coords - func_grp[1].coords vec /= np.linalg.norm(vec) func_grp[0] = "X", func_grp[1].coords + float(bl) * vec x = func_grp[0] func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords) v1 = func_grp[1].coords - origin v2 = self[index].coords - origin angle = get_angle(v1, v2) if 1 < abs(angle % 180) < 179: axis = np.cross(v1, v2) op = SymmOp.from_origin_axis_angle(origin, axis, angle) func_grp.apply_operation(op) elif abs(abs(angle) - 180) < 1: for i in range(len(func_grp)): func_grp[i] = (func_grp[i].species, origin - (func_grp[i].coords - origin)) del self[index] for site in func_grp[1:]: s_new = PeriodicSite(site.species, site.coords, self.lattice, coords_are_cartesian=True) self._sites.append(s_new)
Substitute atom at index with a functional group. Args: index (int): Index of atom to substitute. func_grp: Substituent molecule. There are two options: 1. Providing an actual Molecule as the input. The first atom must be a DummySpecie X, indicating the position of nearest neighbor. The second atom must be the next nearest atom. For example, for a methyl group substitution, func_grp should be X-CH3, where X is the first site and C is the second site. What the code will do is to remove the index site, and connect the nearest neighbor to the C atom in CH3. The X-C bond indicates the directionality to connect the atoms. 2. A string name. The molecule will be obtained from the relevant template in func_groups.json. bond_order (int): A specified bond order to calculate the bond length between the attached functional group and the nearest neighbor site. Defaults to 1.
3,197
def yaml_filter(element, doc, tag=None, function=None, tags=None, strict_yaml=False): countcount__main__foo assert (tag is None) + (tags is None) == 1 if tags is None: tags = {tag: function} if type(element) == CodeBlock: for tag in tags: if tag in element.classes: function = tags[tag] if not strict_yaml: raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 1, re.MULTILINE) data = raw[2] if len(raw) > 2 else data = data.lstrip() raw = raw[0] try: options = yaml.safe_load(raw) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return if options is None: options = {} else: options = {} data = [] raw = re.split("^([.]{3,}|[-]{3,})$", element.text, 0, re.MULTILINE) rawmode = True for chunk in raw: chunk = chunk.strip() if not chunk: continue if rawmode: if chunk.startswith(): rawmode = False else: data.append(chunk) else: if chunk.startswith() or chunk.startswith(): rawmode = True else: try: options.update(yaml.safe_load(chunk)) except yaml.scanner.ScannerError: debug("panflute: malformed YAML block") return data = .join(data) return function(options=options, data=data, element=element, doc=doc)
Convenience function for parsing code blocks with YAML options This function is useful to create a filter that applies to code blocks that have specific classes. It is used as an argument of ``run_filter``, with two additional options: ``tag`` and ``function``. Using this is equivalent to having filter functions that: 1. Check if the element is a code block 2. Check if the element belongs to a specific class 3. Split the YAML options (at the beginning of the block, by looking for ``...`` or ``---`` strings in a separate line 4. Parse the YAML 5. Use the YAML options and (optionally) the data that follows the YAML to return a new or modified element Instead, you just need to: 1. Call ``run_filter`` with ``yaml_filter`` as the action function, and with the additional arguments ``tag`` and ``function`` 2. Construct a ``fenced_action`` function that takes four arguments: (options, data, element, doc). Note that options is a dict and data is a raw string. Notice that this is similar to the ``action`` functions of standard filters, but with *options* and *data* as the new ones. Note: if you want to apply multiple functions to separate classes, you can use the ``tags`` argument, which receives a dict of ``tag: function`` pairs. Note: use the ``strict_yaml=True`` option in order to allow for more verbose but flexible YAML metadata: more than one YAML blocks are allowed, but they all must start with ``---`` (even at the beginning) and end with ``---`` or ``...``. Also, YAML is not the default content when no delimiters are set. Example:: """ Replace code blocks of class 'foo' with # horizontal rules """ import panflute as pf def fenced_action(options, data, element, doc): count = options.get('count', 1) div = pf.Div(attributes={'count': str(count)}) div.content.extend([pf.HorizontalRule] * count) return div if __name__ == '__main__': pf.run_filter(pf.yaml_filter, tag='foo', function=fenced_action)
3,198
def to_csv(self, dest:str)->None: "Save `self.to_df()` to a CSV file in `self.path`/`dest`." self.to_df().to_csv(self.path/dest, index=False)
Save `self.to_df()` to a CSV file in `self.path`/`dest`.
3,199
def create_api_method(restApiId, resourcePath, httpMethod, authorizationType, apiKeyRequired=False, requestParameters=None, requestModels=None, region=None, key=None, keyid=None, profile=None): {"name", "value"}{"content-type", "value"} try: resource = describe_api_resource(restApiId, resourcePath, region=region, key=key, keyid=keyid, profile=profile).get() if resource: requestParameters = dict() if requestParameters is None else requestParameters requestModels = dict() if requestModels is None else requestModels conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) method = conn.put_method(restApiId=restApiId, resourceId=resource[], httpMethod=httpMethod, authorizationType=str(authorizationType), apiKeyRequired=apiKeyRequired, requestParameters=requestParameters, requestModels=requestModels) return {: True, : method} return {: False, : } except ClientError as e: return {: False, : __utils__[](e)}
Creates API method for a resource in the given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_method restApiId resourcePath, httpMethod, authorizationType, \\ apiKeyRequired=False, requestParameters='{"name", "value"}', requestModels='{"content-type", "value"}'