code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def wd_grid_to_mesh_dict(the_grid, q, F, d): triangles_9N = the_grid[:,4:13] new_mesh = {} new_mesh['compute_at_vertices'] = False new_mesh['vertices'] = triangles_9N.reshape(-1,3) new_mesh['triangles'] = np.arange(len(triangles_9N)*3).reshape(-1,3) new_mesh['centers'] = the_grid[:,0:3] new_mesh['tnormals'] = the_grid[:,13:16] norms = np.linalg.norm(new_mesh['tnormals'], axis=1) new_mesh['normgrads'] = norms new_mesh['tnormals'] = np.array([tn/n for tn,n in zip(new_mesh['tnormals'], norms)]) new_mesh['areas'] = the_grid[:,3] new_mesh['tareas'] = the_grid[:,18] new_mesh['thetas'] = the_grid[:,16] new_mesh['phis'] = the_grid[:,17] new_mesh['volume'] = compute_volume(new_mesh['areas'], new_mesh['centers'], new_mesh['tnormals']) new_mesh['velocities'] = np.zeros(new_mesh['centers'].shape) return new_mesh
Transform a wd-style mesh to the format used by PHOEBE. Namely this handles translating vertices from Nx9 to Nx3x3 and creating the array of indices for each triangle. :parameter record-array the_grid: output from discretize_wd_style :parameter float q: mass-ratio (M_this/M_sibling) :parameter float F: syncpar :parameter float d: instantaneous unitless separation :return: the dictionary in PHOEBE's format to be passed to a Mesh class
def to_pygsp(self, **kwargs): from . import api if 'precomputed' in kwargs: if kwargs['precomputed'] != 'affinity': warnings.warn( "Cannot build PyGSPGraph with precomputed={}. " "Using 'affinity' instead.".format(kwargs['precomputed']), UserWarning) del kwargs['precomputed'] if 'use_pygsp' in kwargs: if kwargs['use_pygsp'] is not True: warnings.warn( "Cannot build PyGSPGraph with use_pygsp={}. " "Use True instead.".format(kwargs['use_pygsp']), UserWarning) del kwargs['use_pygsp'] return api.Graph(self.K, precomputed="affinity", use_pygsp=True, **kwargs)
Convert to a PyGSP graph For use only when the user means to create the graph using the flag `use_pygsp=True`, and doesn't wish to recompute the kernel. Creates a graphtools.graphs.TraditionalGraph with a precomputed affinity matrix which also inherits from pygsp.graphs.Graph. Parameters ---------- kwargs keyword arguments for graphtools.Graph Returns ------- G : graphtools.base.PyGSPGraph, graphtools.graphs.TraditionalGraph
def get_profile(session): try: profile = session.get(PROFILE_URL).json() if 'errorCode' in profile and profile['errorCode'] == '403': raise MoparError("not logged in") return profile except JSONDecodeError: raise MoparError("not logged in")
Get complete profile.
def _compute_symbolic_link_mapping( directory: str, extensions: Iterable[str] ) -> Dict[str, str]: symbolic_links = {} try: for symbolic_link in find_paths_with_extensions(directory, extensions): symbolic_links[os.path.realpath(symbolic_link)] = symbolic_link except subprocess.CalledProcessError as error: LOG.warning( "Exception encountered trying to find source files " "in the analysis directory: `%s`", error, ) LOG.warning("Starting with an empty set of tracked files.") return symbolic_links
Given a shared analysis directory, produce a mapping from actual source files to files contained within this directory. Only includes files which have one of the provided extensions. Watchman watches actual source files, so when a change is detected to a file, this mapping can be used to identify what file changed from Pyre's perspective.
def initial_dist_from_config(cp, variable_params): r if len(cp.get_subsections("initial")): logging.info("Using a different distribution for the starting points " "than the prior.") initial_dists = distributions.read_distributions_from_config( cp, section="initial") constraints = distributions.read_constraints_from_config( cp, constraint_section="initial_constraint") init_dist = distributions.JointDistribution( variable_params, *initial_dists, **{"constraints": constraints}) else: init_dist = None return init_dist
r"""Loads a distribution for the sampler start from the given config file. A distribution will only be loaded if the config file has a [initial-\*] section(s). Parameters ---------- cp : Config parser The config parser to try to load from. variable_params : list of str The variable parameters for the distribution. Returns ------- JointDistribution or None : The initial distribution. If no [initial-\*] section found in the config file, will just return None.
def intersectingInterval(self, start, end): l = [] for x in self.data.starts: xStartsAfterInterval = (x.start > end and not self.openEnded) or \ (x.start >= end and self.openEnded) xEndsBeforeInterval = (x.end < start and not self.openEnded) or \ (x.end <= start and self.openEnded) if ((not xStartsAfterInterval) and (not xEndsBeforeInterval)): l.append(x) if self.left is not None and start <= self.data.mid: l += self.left.intersectingInterval(start, end) if self.right is not None and end >= self.data.mid: l += self.right.intersectingInterval(start, end) return l
given an interval, get intervals in the tree that are intersected. :param start: start of the intersecting interval :param end: end of the intersecting interval :return: the list of intersected intervals
def _parse_feature(self, info): parts = info.split(b'=', 1) name = parts[0] if len(parts) > 1: value = self._path(parts[1]) else: value = None self.features[name] = value return commands.FeatureCommand(name, value, lineno=self.lineno)
Parse a feature command.
def validate_password(self, password): hash = sha256() hash.update((password + self.password[:64]).encode('utf-8')) return self.password[64:] == hash.hexdigest()
Check the password against existing credentials. :param password: the password that was provided by the user to try and authenticate. This is the clear text version that we will need to match against the hashed one in the database. :type password: unicode object. :return: Whether the password is valid. :rtype: bool
def emit_message(self, message, log_level): self.log(message) if log_level == logging.ERROR: if self._error_msg_callback: self._error_msg_callback(message) return if log_level == logging.WARNING: if self._warning_msg_callback: self._warning_msg_callback(message) return if log_level == logging.INFO: if self._info_msg_callback: self._info_msg_callback(message) return if self._msg_callback: self._msg_callback(message)
Call the msg callback function with the message.
def propagate_measurement_info(self): meas_df = self.tables['measurements'].df names_list = ['specimen', 'sample', 'site', 'location'] for num, name in enumerate(names_list): if (name + "s") in self.tables: continue elif name in meas_df.columns: items = meas_df[name].unique() df = pd.DataFrame(columns=[name], index=items) df[name] = df.index if num < (len(names_list) - 1): parent = names_list[num+1] if parent in meas_df.columns: meas_df = meas_df.where(meas_df.notnull(), "") df[parent] = meas_df.drop_duplicates(subset=[name])[parent].values.astype(str) df = df.where(df != "", np.nan) df = df.dropna(how='all', axis='rows') if len(df): self.tables[name + "s"] = MagicDataFrame(dtype=name + "s", df=df) self.write_table_to_file(name + "s")
Take a contribution with a measurement table. Create specimen, sample, site, and location tables using the unique names in the measurement table to fill in the index.
def GetFormattedEventObject(cls, event): time_string = timelib.Timestamp.CopyToIsoFormat(event.timestamp) lines_of_text = [ '+-' * 40, '[Timestamp]:', ' {0:s}'.format(time_string)] pathspec = getattr(event, 'pathspec', None) if pathspec: lines_of_text.append('[Pathspec]:') attribute_string = pathspec.comparable.replace('\n', '\n ') attribute_string = ' {0:s}\n'.format(attribute_string) lines_of_text.append(attribute_string) lines_of_text.append('[Reserved attributes]:') out_additional = ['[Additional attributes]:'] for attribute_name, attribute_value in sorted(event.GetAttributes()): if attribute_name not in definitions.RESERVED_VARIABLE_NAMES: attribute_string = ' {{{0!s}}} {1!s}'.format( attribute_name, attribute_value) out_additional.append(attribute_string) elif attribute_name not in ('pathspec', 'tag'): attribute_string = ' {{{0!s}}} {1!s}'.format( attribute_name, attribute_value) lines_of_text.append(attribute_string) lines_of_text.append('') out_additional.append('') lines_of_text.extend(out_additional) return '\n'.join(lines_of_text)
Retrieves a string representation of the event. Args: event (EventObject): event. Returns: str: string representation of the event.
def token(cls: Type[XHXType], sha_hash: str) -> XHXType: xhx = cls() xhx.sha_hash = sha_hash return xhx
Return XHX instance from sha_hash :param sha_hash: SHA256 hash :return:
def _replace_zeros(arr, default_min_value): min_nonzero_value = min(default_min_value, np.min(arr[arr > 0])) closest_to_zero = np.nextafter(min_nonzero_value, min_nonzero_value - 1) arr[arr == 0] = closest_to_zero return arr
Substitute 0s in the list with a near-zero value. Parameters ----------- arr : numpy.array(float) default_min_value : float If the smallest non-zero element in `arr` is greater than the default, use the default instead. Returns ----------- numpy.array(float)
def check_status_code(response, codes=None): codes = codes or [200] if response.status_code not in codes: raise StatusCodeError(response.status_code)
Checks response.status_code is in codes. :param requests.request response: Requests response :param list codes: List of accepted codes or callable :raises: StatusCodeError if code invalid
def set(self, **kargs): kwords = set([ 'mopt', 'fast', 'ratio', 'wavg_kargs', 'wavg_all', 'fitterargs', 'fitname', ]) kargs = dict(kargs) oldkargs = {} fargs = {} for k in list(kargs.keys()): if k in kwords: oldkargs[k] = getattr(self, k) setattr(self, k, kargs[k]) kwords.remove(k) else: fargs[k] = kargs[k] del kargs[k] for k in kwords: kargs[k] = getattr(self, k) if 'fitterargs' in kwords: oldkargs['fitterargs'] = self.fitterargs self.fitterargs = dict(self.fitterargs) if len(fargs) > 0: self.fitterargs.update(fargs) kargs['fitterargs'] = dict(self.fitterargs) return kargs, oldkargs
Reset default keyword parameters. Assigns new default values from dictionary ``kargs`` to the fitter's keyword parameters. Keywords for the underlying :mod:`lsqfit` fitters can also be included (or grouped together in dictionary ``fitterargs``). Returns tuple ``(kargs, oldkargs)`` where ``kargs`` is a dictionary containing all :class:`lsqfit.MultiFitter` keywords after they have been updated, and ``oldkargs`` contains the original values for these keywords. Use ``fitter.set(**oldkargs)`` to restore the original values.
def receive_verify_post(self, post_params): if isinstance(post_params, dict): required_params = ['action', 'email', 'send_id', 'sig'] if not self.check_for_valid_postback_actions(required_params, post_params): return False else: return False if post_params['action'] != 'verify': return False sig = post_params['sig'] post_params = post_params.copy() del post_params['sig'] if sig != get_signature_hash(post_params, self.secret): return False send_response = self.get_send(post_params['send_id']) try: send_body = send_response.get_body() send_json = json.loads(send_body) if 'email' not in send_body: return False if send_json['email'] != post_params['email']: return False except ValueError: return False return True
Returns true if the incoming request is an authenticated verify post.
def modules(self, value): modules = [module.__name__ for module in self.loadmodules(value)] self._modules = [ module for module in self._modules + modules if module not in self._modules ]
Change required modules. Reload modules given in the value. :param list value: new modules to use.
def serialize(pca, **kwargs): strike, dip, rake = pca.strike_dip_rake() hyp_axes = sampling_axes(pca) return dict( **kwargs, principal_axes = pca.axes.tolist(), hyperbolic_axes = hyp_axes.tolist(), n_samples = pca.n, strike=strike, dip=dip, rake=rake, angular_errors=[2*N.degrees(i) for i in angular_errors(hyp_axes)])
Serialize an orientation object to a dict suitable for JSON
def detectSonyMylo(self): return UAgentInfo.manuSony in self.__userAgent \ and (UAgentInfo.qtembedded in self.__userAgent or UAgentInfo.mylocom2 in self.__userAgent)
Return detection of a Sony Mylo device Detects if the current browser is a Sony Mylo device.
def copyfile_and_gzip(self, source_path, target_path): target_dir = path.dirname(target_path) if not self.fs.exists(target_dir): try: self.fs.makedirs(target_dir) except OSError: pass guess = mimetypes.guess_type(source_path) content_type = guess[0] encoding = guess[1] if content_type not in self.gzip_file_match: logger.debug("Copying {}{} to {}{} because its filetype isn't on the whitelist".format( "osfs://", source_path, self.fs_name, target_path )) copy.copy_file("osfs:///", smart_text(source_path), self.fs, smart_text(target_path)) elif encoding == 'gzip': logger.debug("Copying {}{} to {}{} because it's already gzipped".format( "osfs://", source_path, self.fs_name, target_path )) copy.copy_file("osfs:///", smart_text(source_path), self.fs, smart_text(target_path)) else: logger.debug("Gzipping {}{} to {}{}".format( "osfs://", source_path, self.fs_name, target_path )) with open(source_path, 'rb') as source_file: data_buffer = six.BytesIO() kwargs = dict( filename=path.basename(target_path), mode='wb', fileobj=data_buffer ) if float(sys.version[:3]) >= 2.7: kwargs['mtime'] = 0 with gzip.GzipFile(**kwargs) as f: f.write(six.binary_type(source_file.read())) with self.fs.open(smart_text(target_path), 'wb') as outfile: outfile.write(data_buffer.getvalue()) outfile.close()
Copies the provided file to the provided target directory. Gzips JavaScript, CSS and HTML and other files along the way.
def cmd_gimbal_point(self, args): if len(args) != 3: print("usage: gimbal point ROLL PITCH YAW") return (roll, pitch, yaw) = (float(args[0]), float(args[1]), float(args[2])) self.master.mav.mount_control_send(self.target_system, self.target_component, pitch*100, roll*100, yaw*100, 0)
control gimbal pointing
def _format_pair_no_equals(explode, separator, escape, key, value): if not value: return key return _format_pair(explode, separator, escape, key, value)
Format a key, value pair but don't include the equals sign when there is no value
def _send_packet(self, sid, pkt): encoded_packet = pkt.encode() if isinstance(encoded_packet, list): binary = False for ep in encoded_packet: self.eio.send(sid, ep, binary=binary) binary = True else: self.eio.send(sid, encoded_packet, binary=False)
Send a Socket.IO packet to a client.
def extract(pcmiter, samplerate, channels, duration = -1): extractor = _fplib.Extractor(samplerate, channels, duration) try: next_block = next(pcmiter) except StopIteration: raise ExtractionError() while True: cur_block = next_block try: next_block = next(pcmiter) except StopIteration: next_block = None done = next_block is None try: if extractor.process(cur_block, done): break except RuntimeError as exc: raise ExtractionError(exc.args[0]) if done: raise ExtractionError() out = extractor.result() if out is None: raise ExtractionError() extractor.free() return out
Given a PCM data stream, extract fingerprint data from the audio. Returns a byte string of fingerprint data. Raises an ExtractionError if fingerprinting fails.
def parse(document, clean_html=True, unix_timestamp=False, encoding=None): if isinstance(clean_html, bool): cleaner = default_cleaner if clean_html else fake_cleaner else: cleaner = clean_html result = feedparser.FeedParserDict() result['feed'] = feedparser.FeedParserDict() result['entries'] = [] result['bozo'] = 0 try: parser = SpeedParser(document, cleaner, unix_timestamp, encoding) parser.update(result) except Exception as e: if isinstance(e, UnicodeDecodeError) and encoding is True: encoding = chardet.detect(document)['encoding'] document = document.decode(encoding, 'replace').encode('utf-8') return parse(document, clean_html, unix_timestamp, encoding) import traceback result['bozo'] = 1 result['bozo_exception'] = e result['bozo_tb'] = traceback.format_exc() return result
Parse a document and return a feedparser dictionary with attr key access. If clean_html is False, the html in the feed will not be cleaned. If clean_html is True, a sane version of lxml.html.clean.Cleaner will be used. If it is a Cleaner object, that cleaner will be used. If unix_timestamp is True, the date information will be a numerical unix timestamp rather than a struct_time. If encoding is provided, the encoding of the document will be manually set to that.
def set_default_timeout(timeout=None, connect_timeout=None, read_timeout=None): global DEFAULT_CONNECT_TIMEOUT global DEFAULT_READ_TIMEOUT DEFAULT_CONNECT_TIMEOUT = connect_timeout if connect_timeout is not None \ else timeout DEFAULT_READ_TIMEOUT = read_timeout if read_timeout is not None \ else timeout
The purpose of this function is to install default socket timeouts and retry policy for requests calls. Any requests issued through the requests wrappers defined in this module will have these automatically set, unless explicitly overriden. The default timeouts and retries set through this option apply to the entire process. For that reason, it is recommended that this function is only called once during startup, and from the main thread, before any other threads are spawned. :param timeout timeout for socket connections and reads in seconds. This is a convenience argument that applies the same default to both connection and read timeouts. :param connect_timeout timeout for socket connections in seconds. :param read_timeout timeout for socket reads in seconds.
def make_summary(self, find_ocv=False, find_ir=False, find_end_voltage=False, use_cellpy_stat_file=None, all_tests=True, dataset_number=0, ensure_step_table=True, convert_date=False): if self.tester == "arbin": convert_date = True if ensure_step_table is None: ensure_step_table = self.ensure_step_table if use_cellpy_stat_file is None: use_cellpy_stat_file = prms.Reader.use_cellpy_stat_file self.logger.debug("using use_cellpy_stat_file from prms") self.logger.debug(f"use_cellpy_stat_file: {use_cellpy_stat_file}") if all_tests is True: for j in range(len(self.datasets)): txt = "creating summary for file " test = self.datasets[j] if not self._is_not_empty_dataset(test): self.logger.info("empty test %i" % j) return if isinstance(test.loaded_from, (list, tuple)): for f in test.loaded_from: txt += f txt += "\n" else: txt += str(test.loaded_from) if not test.mass_given: txt += " mass for test %i is not given" % j txt += " setting it to %f mg" % test.mass self.logger.debug(txt) self._make_summary(j, find_ocv=find_ocv, find_ir=find_ir, find_end_voltage=find_end_voltage, use_cellpy_stat_file=use_cellpy_stat_file, ensure_step_table=ensure_step_table, convert_date=convert_date, ) else: self.logger.debug("creating summary for only one test") dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return self._make_summary(dataset_number, find_ocv=find_ocv, find_ir=find_ir, find_end_voltage=find_end_voltage, use_cellpy_stat_file=use_cellpy_stat_file, ensure_step_table=ensure_step_table, convert_date=convert_date, ) return self
Convenience function that makes a summary of the cycling data.
def copy_cwl_files(from_dir=CWL_PATH, to_dir=None): cwl_files = glob.glob('{}{}*.cwl'.format(from_dir, os.sep)) if len(cwl_files) > 0: create_dirs(to_dir) for fi in cwl_files: fo = os.path.join(to_dir, os.path.basename(fi)) shutil.copy2(fi, fo) return len(cwl_files)
Copy cwl files to a directory where the cwl-runner can find them. Args: from_dir (str): Path to directory where to copy files from (default: the cwl directory of nlppln). to_dir (str): Path to directory where the files should be copied to (e.g., the CWL working directory).
def preprocess_search_hit(pid, record_hit, links_factory=None, **kwargs): links_factory = links_factory or (lambda x, **k: dict()) record = dict( pid=pid, metadata=record_hit['_source'], links=links_factory(pid, record_hit=record_hit, **kwargs), revision=record_hit['_version'], created=None, updated=None, ) for key in ['_created', '_updated']: if key in record['metadata']: record[key[1:]] = record['metadata'][key] del record['metadata'][key] return record
Prepare a record hit from Elasticsearch for serialization.
def create_or_update(cls, video, language_code, metadata, file_data=None): try: video_transcript = cls.objects.get(video=video, language_code=language_code) retrieved = True except cls.DoesNotExist: video_transcript = cls(video=video, language_code=language_code) retrieved = False for prop, value in six.iteritems(metadata): if prop in ['language_code', 'file_format', 'provider']: setattr(video_transcript, prop, value) transcript_name = metadata.get('file_name') try: if transcript_name: video_transcript.transcript.name = transcript_name elif file_data: with closing(file_data) as transcript_file_data: file_name = '{uuid}.{ext}'.format(uuid=uuid4().hex, ext=video_transcript.file_format) video_transcript.transcript.save(file_name, transcript_file_data) video_transcript.save() except Exception: logger.exception( '[VAL] Transcript save failed to storage for video_id "%s" language code "%s"', video.edx_video_id, language_code ) raise return video_transcript, not retrieved
Create or update Transcript object. Arguments: video (Video): Video for which transcript is going to be saved. language_code (str): language code for (to be created/updated) transcript metadata (dict): A dict containing (to be overwritten) properties file_data (InMemoryUploadedFile): File data to be saved Returns: Returns a tuple of (video_transcript, created).
def _childgroup(self, children, grid): result = [] index = 0 hidden_fields = [] for row in grid: child_row = [] width_sum = 0 for width, filled in row: width_sum += width if width_sum > self.num_cols: warnings.warn(u"It seems your grid configuration overlaps \ the bootstrap layout columns number. One of your lines is larger than {0}. \ You can increase this column number by compiling bootstrap css with \ lessc.".format(self.num_cols)) if isinstance(filled, StaticWidget): child = filled child.width = width elif filled: try: child = children[index] except IndexError: warnings.warn(u"The grid items number doesn't \ match the number of children of our mapping widget") break if type(child.widget) == deform.widget.HiddenWidget: hidden_fields.append(child) index += 1 try: child = children[index] except IndexError: warnings.warn(u"The grid items number doesn't \ match the number of children of our mapping widget") break child.width = width index += 1 else: child = VoidWidget(width) child_row.append(child) if child_row != []: result.append(child_row) if index <= len(children): result.append(children[index:]) if hidden_fields != []: result.append(hidden_fields) return result
Stores the children in a list following the grid's structure :param children: list of fields :param grid: a list of list corresponding of the layout to apply to the given children
async def _switch_dc(self, new_dc): self._log[__name__].info('Reconnecting to new data center %s', new_dc) dc = await self._get_dc(new_dc) self.session.set_dc(dc.id, dc.ip_address, dc.port) self._sender.auth_key.key = None self.session.auth_key = None self.session.save() await self._disconnect() return await self.connect()
Permanently switches the current connection to the new data center.
def compare_values(values0, values1): values0 = {v[0]: v[1:] for v in values0} values1 = {v[0]: v[1:] for v in values1} created = [(k, v[0], v[1]) for k, v in values1.items() if k not in values0] deleted = [(k, v[0], v[1]) for k, v in values0.items() if k not in values1] modified = [(k, v[0], v[1]) for k, v in values0.items() if v != values1.get(k, None)] return created, deleted, modified
Compares all the values of a single registry key.
def to_timedelta(value): if value is None: return None if isinstance(value, (six.integer_types, float)): return timedelta(microseconds=(float(value) / 10)) match = _TIMESPAN_PATTERN.match(value) if match: if match.group(1) == "-": factor = -1 else: factor = 1 return factor * timedelta( days=int(match.group("d") or 0), hours=int(match.group("h")), minutes=int(match.group("m")), seconds=float(match.group("s")), ) else: raise ValueError("Timespan value '{}' cannot be decoded".format(value))
Converts a string to a timedelta.
def process_value(self, name, value, module_name): if ":" in name: if module_name.split(" ")[0] in I3S_MODULE_NAMES + ["general"]: self.error("Only py3status modules can use obfuscated") if type(value).__name__ not in ["str", "unicode"]: self.error("Only strings can be obfuscated") (name, scheme) = name.split(":") if scheme == "base64": value = PrivateBase64(value, module_name) elif scheme == "hide": value = PrivateHide(value, module_name) else: self.error("Unknown scheme {} for data".format(scheme)) return name, value
This method allow any encodings to be dealt with. Currently only base64 is supported. Note: If other encodings are added then this should be split so that there is a method for each encoding.
def _get_hdr_childcnt(self, goobj, ntgo): if 'childcnt' in self.present: return "c{N}".format(N=len(goobj.children)) elif self.gosubdag.relationships and not goobj.children and ntgo.dcnt != 0: return "c0"
Get string representing count of children for this GO term.
def _is_pingable(ip): ping_cmd = ['ping', '-c', '5', '-W', '1', '-i', '0.2', ip] try: linux_utils.execute(ping_cmd, check_exit_code=True) return True except RuntimeError: LOG.warning("Cannot ping ip address: %s", ip) return False
Checks whether an IP address is reachable by pinging. Use linux utils to execute the ping (ICMP ECHO) command. Sends 5 packets with an interval of 0.2 seconds and timeout of 1 seconds. Runtime error implies unreachability else IP is pingable. :param ip: IP to check :return: bool - True or False depending on pingability.
def validate_arc_links_same_outline(sender, instance, *args, **kwargs): if instance.story_element_node: if instance.story_element_node.outline != instance.parent_outline: raise IntegrityError(_('An arc cannot be associated with an story element from another outline.'))
Evaluates attempts to link an arc to a story node from another outline.
def page(title, description, element_list=None, tab_list=None): _page = { 'Type': 'Page', 'Title': title, 'Description': description, 'Data': {}, } if element_list is not None: if isinstance(element_list, list): _page['Data']['Elements'] = element_list else: _page['Data']['Elements'] = [element_list] if tab_list is not None: if isinstance(tab_list, list): _page['Data']['Tabs'] = tab_list else: _page['Data']['Tabs'] = [tab_list] return _page
Returns a dictionary representing a new page to display elements. This can be thought of as a simple container for displaying multiple types of information. The ``section`` method can be used to create separate tabs. Args: title: The title to display description: A description of the section element_list: The list of elements to display. If a single element is given it will be wrapped in a list. tab_list: A list of tabs to display. Returns: A dictionary with metadata specifying that it is to be rendered as a page containing multiple elements and/or tabs.
def data_to_sys_base(self): if not self.n or self._flags['sysbase'] is True: return self.copy_data_ext(model='Synchronous', field='Sn', dest='Sn', idx=self.gen) super(GovernorBase, self).data_to_sys_base() self._store['R'] = self.R self.R = self.system.mva * div(self.R, self.Sn)
Custom system base conversion function
def specific_file_rst_filename(self, source_filename: str) -> str: highest_code_to_target = relative_filename_within_dir( source_filename, self.highest_code_dir) bname = basename(source_filename) result = join(self.autodoc_rst_root_dir, dirname(highest_code_to_target), bname + EXT_RST) log.debug("Source {!r} -> RST {!r}", source_filename, result) return result
Gets the RST filename corresponding to a source filename. See the help for the constructor for more details. Args: source_filename: source filename within current project Returns: RST filename Note in particular: the way we structure the directories means that we won't get clashes between files with idential names in two different directories. However, we must also incorporate the original source filename, in particular for C++ where ``thing.h`` and ``thing.cpp`` must not generate the same RST filename. So we just add ``.rst``.
def _post(url, headers={}, data=None, files=None): try: response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL) return _process_response(response) except requests.exceptions.RequestException as e: _log_and_raise_exception('Error connecting with foursquare API', e)
Tries to POST data to an endpoint
def save(self, outfile, close_file=True, **kwargs): if isinstance(outfile, text_type) or isinstance(outfile, binary_type): fid = open(outfile, 'wb') else: fid = outfile root = self.get_root() html = root.render(**kwargs) fid.write(html.encode('utf8')) if close_file: fid.close()
Saves an Element into a file. Parameters ---------- outfile : str or file object The file (or filename) where you want to output the html. close_file : bool, default True Whether the file has to be closed after write.
def custom_prompt(msg, delims="", completer=lambda: None): try: orig_delims = readline.get_completer_delims() orig_completer = readline.get_completer() readline.set_completer_delims(delims) readline.set_completer(completer) try: ret = input(msg) finally: readline.set_completer_delims(orig_delims) readline.set_completer(orig_completer) return ret except EOFError: raise UserQuit()
Start up a prompt that with particular delims and completer
def is_cached(file_name): gml_file_path = join(join(expanduser('~'), OCTOGRID_DIRECTORY), file_name) return isfile(gml_file_path)
Check if a given file is available in the cache or not
def temp_connect(self, hardware: hc.API): old_hw = self._hw_manager.hardware try: self._hw_manager.set_hw(hardware) yield self finally: self._hw_manager.set_hw(old_hw)
Connect temporarily to the specified hardware controller. This should be used as a context manager: .. code-block :: python with ctx.temp_connect(hw): # do some tasks ctx.home() # after the with block, the context is connected to the same # hardware control API it was connected to before, even if # an error occured in the code inside the with block
def init_app(self, app, datastore=None): datastore = datastore or self.datastore for key, value in default_config.items(): app.config.setdefault(key, value) providers = dict() for key, config in app.config.items(): if not key.startswith('SOCIAL_') or config is None or key in default_config: continue suffix = key.lower().replace('social_', '') default_module_name = 'flask_social.providers.%s' % suffix module_name = config.get('module', default_module_name) module = import_module(module_name) config = update_recursive(module.config, config) providers[config['id']] = OAuthRemoteApp(**config) providers[config['id']].tokengetter(_get_token) state = _get_state(app, datastore, providers) app.register_blueprint(create_blueprint(state, __name__)) app.extensions['social'] = state return state
Initialize the application with the Social extension :param app: The Flask application :param datastore: Connection datastore instance
def short_full_symbol(self): if self._short_full_symbol is None: self._short_full_symbol = self._symbol_extract(cache.RE_FULL_CURSOR, False, True) return self._short_full_symbol
Gets the full symbol excluding the character under the cursor.
def toPairTreePath(name): sName = sanitizeString(name) chunks = [] for x in range(0, len(sName)): if x % 2: continue if (len(sName) - 1) == x: chunk = sName[x] else: chunk = sName[x: x + 2] chunks.append(chunk) return os.sep.join(chunks) + os.sep
Cleans a string, and then splits it into a pairtree path.
async def update_object(obj, only=None): warnings.warn("update_object() is deprecated, Manager.update() " "should be used instead", DeprecationWarning) field_dict = dict(obj.__data__) pk_field = obj._meta.primary_key if only: field_dict = obj._prune_fields(field_dict, only) if not isinstance(pk_field, peewee.CompositeKey): field_dict.pop(pk_field.name, None) else: field_dict = obj._prune_fields(field_dict, obj.dirty_fields) rows = await update(obj.update(**field_dict).where(obj._pk_expr())) obj._dirty.clear() return rows
Update object asynchronously. :param obj: object to update :param only: list or tuple of fields to updata, is `None` then all fields updated This function does the same as `Model.save()`_ for already saved object, but it doesn't invoke ``save()`` method on model class. That is important to know if you overrided save method for your model. .. _Model.save(): http://peewee.readthedocs.io/en/latest/peewee/ api.html#Model.save
def _declarations_as_string(self, declarations): return ''.join('%s:%s%s;' % ( d.name, d.value.as_css(), ' !' + d.priority if d.priority else '') for d in declarations)
Returns a list of declarations as a formatted CSS string :param declarations: The list of tinycss Declarations to format :type declarations: list of tinycss.css21.Declaration :returns: The CSS string for the declarations list :rtype: str
def to_excel(self, filename, recommended_only=False, include_io=True): df = self.to_df(recommended_only, include_io) if isinstance(filename, string_types): filename = os.path.expanduser(filename) df.to_excel(filename, index=False)
Return an Excel file for each model and dataset. Parameters ---------- filename : str or ExcelWriter object Either the file name (string) or an ExcelWriter object. recommended_only : bool, optional If True, only recommended models for each session are included. If no model is recommended, then a row with it's ID will be included, but all fields will be null. include_io : bool, optional If True, then the input/output files from BMDS will also be included, specifically the (d) input file and the out file. Returns ------- None
def get_athlete_stats(self, athlete_id=None): if athlete_id is None: athlete_id = self.get_athlete().id raw = self.protocol.get('/athletes/{id}/stats', id=athlete_id) return model.AthleteStats.deserialize(raw)
Returns Statistics for the athlete. athlete_id must be the id of the authenticated athlete or left blank. If it is left blank two requests will be made - first to get the authenticated athlete's id and second to get the Stats. http://strava.github.io/api/v3/athlete/#stats :return: A model containing the Stats :rtype: :py:class:`stravalib.model.AthleteStats`
def create_error(msg, cause=None): status_code = config.exc_to_code(cause) status_name = config.NAME_STATUS_CODES.get(status_code) if status_name == 'INVALID_ARGUMENT': return InvalidArgumentError(msg, cause=cause) else: return GaxError(msg, cause=cause)
Creates a ``GaxError`` or subclass. Attributes: msg (string): describes the error that occurred. cause (Exception, optional): the exception raised by a lower layer of the RPC stack (for example, gRPC) that caused this exception, or None if this exception originated in GAX. Returns: .GaxError: The exception that wraps ``cause``.
def _gen_param_code(name, doc, defaultValueStr): template = Name = name[0].upper() + name[1:] return template \ .replace("$name", name) \ .replace("$Name", Name) \ .replace("$doc", doc) \ .replace("$defaultValueStr", str(defaultValueStr))
Generates Python code for a shared param class. :param name: param name :param doc: param doc :param defaultValueStr: string representation of the default value :return: code string
def writeDataTable(self, file, type): agents = self.experiment.agents numAgents = len(self.experiment.agents) colWidth = 8 idxColWidth = 3 sep = ("=" * idxColWidth) + " " + \ ("=" * colWidth + " ") * numAgents + "\n" file.write(sep) file.write("..".rjust(idxColWidth) + " ") for agent in agents: file.write(agent.name[-colWidth:].center(colWidth) + " ") file.write("\n") file.write(sep) if agents: rows, _ = agents[0].history.getField( type ).shape else: rows, _ = (0, 0) for sequence in range( min(rows, 999) ): file.write( str(sequence + 1).rjust(idxColWidth) + " " ) for agent in agents: field = agent.history.getField( type ) file.write("%8.3f " % field[sequence, 0]) file.write("\n") file.write(sep)
Writes agent data to an ReST table. The 'type' argument may be 'state', 'action' or 'reward'.
def get_dependencies_from_json(ireq): if ireq.editable or not is_pinned_requirement(ireq): return if ireq.extras: return session = requests.session() atexit.register(session.close) version = str(ireq.req.specifier).lstrip("=") def gen(ireq): info = None try: info = session.get( "https://pypi.org/pypi/{0}/{1}/json".format(ireq.req.name, version) ).json()["info"] finally: session.close() requires_dist = info.get("requires_dist", info.get("requires")) if not requires_dist: return for requires in requires_dist: i = pip_shims.shims.InstallRequirement.from_line(requires) if not _marker_contains_extra(i): yield format_requirement(i) if ireq not in DEPENDENCY_CACHE: try: reqs = DEPENDENCY_CACHE[ireq] = list(gen(ireq)) except JSONDecodeError: return req_iter = iter(reqs) else: req_iter = gen(ireq) return set(req_iter)
Retrieves dependencies for the given install requirement from the json api. :param ireq: A single InstallRequirement :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None
def is_port_profile_created(self, vlan_id, device_id): entry = self.session.query(ucsm_model.PortProfile).filter_by( vlan_id=vlan_id, device_id=device_id).first() return entry and entry.created_on_ucs
Indicates if port profile has been created on UCS Manager.
def run(self): logging.debug("Querying server {0}".format(self.server['ip'])) try: rsvr = dns.resolver.Resolver() rsvr.nameservers = [self.server['ip']] rsvr.lifetime = 5 rsvr.timeout = 5 qry = rsvr.query(self.domain, self.recType) results = sorted([r.to_text() for r in qry]) success = True except dns.resolver.NXDOMAIN: success = False results = ['NXDOMAIN'] except dns.resolver.NoNameservers: success = False results = ['No Nameservers'] except dns.resolver.NoAnswer: success = False results = ['No Answer'] except dns.resolver.Timeout: success = False results = ['Server Timeout'] self.result = { 'server': self.server, 'results': results, 'success': success }
Do a single DNS query against a server
def corr_flat_dir(a1, a2): n = len(a1) if len(a2) != n: raise BCTParamError("Cannot calculate flattened correlation on " "matrices of different size") ix = np.logical_not(np.eye(n)) return np.corrcoef(a1[ix].flat, a2[ix].flat)[0][1]
Returns the correlation coefficient between two flattened adjacency matrices. Similarity metric for weighted matrices. Parameters ---------- A1 : NxN np.ndarray directed matrix 1 A2 : NxN np.ndarray directed matrix 2 Returns ------- r : float Correlation coefficient describing edgewise similarity of a1 and a2
def unpack_flags(value, flags): try: return [flags[value]] except KeyError: return [flags[k] for k in sorted(flags.keys()) if k & value > 0]
Multiple flags might be packed in the same field.
def base_object(self, data): obj = {'id': data.get(self.id)} if self.parent is not None: obj['$parent'] = data.get(self.parent.id) return obj
Make sure to return all the existing filter fields for query results.
def crosscorr(self, signal, lag=0): from scipy.linalg import norm s = asarray(signal) s = s - mean(s) s = s / norm(s) if size(s) != size(self.index): raise Exception('Size of signal to cross correlate with, %g, ' 'does not match size of series' % size(s)) if lag is not 0: shifts = range(-lag, lag+1) d = len(s) m = len(shifts) sshifted = zeros((m, d)) for i in range(0, len(shifts)): tmp = roll(s, shifts[i]) if shifts[i] < 0: tmp[(d+shifts[i]):] = 0 if shifts[i] > 0: tmp[:shifts[i]] = 0 sshifted[i, :] = tmp s = sshifted else: shifts = [0] def get(y, s): y = y - mean(y) n = norm(y) if n == 0: b = zeros((s.shape[0],)) else: y /= n b = dot(s, y) return b return self.map(lambda x: get(x, s), index=shifts)
Cross correlate series data against another signal. Parameters ---------- signal : array Signal to correlate against (must be 1D). lag : int Range of lags to consider, will cover (-lag, +lag).
def parse(self, data): if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Entity ID already initialized') (self.flags, self.identifier, self.suffix) = struct.unpack_from(self.FMT, data, 0) self._initialized = True
Parse the passed in data into a UDF Entity ID. Parameters: data - The data to parse. Returns: Nothing.
def transform_audio(self, y): n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) D = stft(y, hop_length=self.hop_length, n_fft=self.n_fft) D = fix_length(D, n_frames) mag, phase = magphase(D) if self.log: mag = amplitude_to_db(mag, ref=np.max) return {'mag': mag.T[self.idx].astype(np.float32), 'phase': np.angle(phase.T)[self.idx].astype(np.float32)}
Compute the STFT magnitude and phase. Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) STFT magnitude data['phase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2) STFT phase
def namedb_get_namespace_at(cur, namespace_id, block_number, include_expired=False): if not include_expired: namespace_rec = namedb_get_namespace(cur, namespace_id, block_number, include_expired=False, include_history=False, only_revealed=False) if namespace_rec is None: return None history_rows = namedb_get_record_states_at(cur, namespace_id, block_number) if len(history_rows) == 0: return None else: return history_rows
Get the sequence of states that a namespace record was in at a particular block height. There can be more than one if the namespace changed durnig the block. Returns only unexpired namespaces by default. Can return expired namespaces with include_expired=True
def fetch_internal(item, request): from flask import make_response from werkzeug.test import EnvironBuilder from dpxdt.server import app environ_base = { 'REMOTE_ADDR': '127.0.0.1', } data = request.get_data() if data and not isinstance(data, str): data = ''.join(list(data)) builder = EnvironBuilder( path=request.get_selector(), base_url='%s://%s' % (request.get_type(), request.get_host()), method=request.get_method(), data=data, headers=request.header_items(), environ_base=environ_base) with app.request_context(builder.get_environ()): response = make_response(app.dispatch_request()) LOGGER.info('"%s" %s via internal routing', request.get_selector(), response.status_code) item.status_code = response.status_code item.content_type = response.mimetype if item.result_path: with open(item.result_path, 'wb') as result_file: for piece in response.iter_encoded(): result_file.write(piece) else: item.data = response.get_data() return item
Fetches the given request by using the local Flask context.
def this_boot(self, bootid=None): if bootid is None: bootid = _id128.get_boot().hex else: bootid = getattr(bootid, 'hex', bootid) self.add_match(_BOOT_ID=bootid)
Add match for _BOOT_ID for current boot or the specified boot ID. If specified, bootid should be either a UUID or a 32 digit hex number. Equivalent to add_match(_BOOT_ID='bootid').
def mkdir(dir_path, user=None, group=None, mode=None): dir_path = os.path.expanduser(dir_path) directory = os.path.normpath(dir_path) if not os.path.isdir(directory): makedirs_perms(directory, user, group, mode) return True
Ensure that a directory is available. CLI Example: .. code-block:: bash salt '*' file.mkdir /opt/jetty/context
def write(self, value): if self.capacity > 0 and self.strategy == 0: len_value = len(value) if len_value >= self.capacity: needs_new_strategy = True else: self.seek(0, 2) needs_new_strategy = \ (self.tell() + len_value) >= self.capacity if needs_new_strategy: self.makeTempFile() if not isinstance(value, six.binary_type): value = value.encode('utf-8') self._delegate.write(value)
If capacity != -1 and length of file > capacity it is time to switch
def compat_serializer_attr(serializer, obj): if DRFVLIST[0] == 3 and DRFVLIST[1] == 1: for i in serializer.instance: if i.id == obj.id: return i else: return obj
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer. This is a quick solution but works without breajing anything.
def accept(self, data): self.raw_data = MultiDict(data) self.errors = {} for field in self.fields: if field.writable: self.python_data.update(field.accept()) else: for name in field.field_names: subfield = self.get_field(name) value = self.python_data[subfield.name] subfield.set_raw_value(self.raw_data, subfield.from_python(value)) return self.is_valid
Try to accpet MultiDict-like object and return if it is valid.
def _make_eof_intr(): global _EOF, _INTR if (_EOF is not None) and (_INTR is not None): return try: from termios import VEOF, VINTR fd = None for name in 'stdin', 'stdout': stream = getattr(sys, '__%s__' % name, None) if stream is None or not hasattr(stream, 'fileno'): continue try: fd = stream.fileno() except ValueError: continue if fd is None: raise ValueError("No stream has a fileno") intr = ord(termios.tcgetattr(fd)[6][VINTR]) eof = ord(termios.tcgetattr(fd)[6][VEOF]) except (ImportError, OSError, IOError, ValueError, termios.error): try: from termios import CEOF, CINTR (intr, eof) = (CINTR, CEOF) except ImportError: (intr, eof) = (3, 4) _INTR = _byte(intr) _EOF = _byte(eof)
Set constants _EOF and _INTR. This avoids doing potentially costly operations on module load.
def unique_reactions(df): reaction_list =[] for idx, entry in enumerate(df['reactants']): reaction = [] for x in entry: reaction.append(x) reaction.append('-->') for y in df['products'][idx]: reaction.append(y) reaction_list.append(reaction) string_list = [str(reaction) for reaction in reaction_list] string_list = sorted(list(set(string_list))) reaction_list = [ast.literal_eval(entry) for entry in string_list] return(reaction_list)
Identifies unique elementary reactions in data frame. Parameters ---------- df : Data frame. Returns ------- reaction_list : List of unique elementary reactions.
def is_normalized_address(value: Any) -> bool: if not is_address(value): return False else: return value == to_normalized_address(value)
Returns whether the provided value is an address in its normalized form.
def stop(self): self._done() if self._server: self._server.stop() self._server = None log.info('Stop!')
Stop all tasks, and the local proxy server if it's running.
def format_label(self, field, counter): return '<label for="id_formfield_%s" %s>%s</label>' % ( counter, field.field.required and 'class="required"', field.label)
Format the label for each field
def dp020(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dp020`'.format(value)) self._dp020 = value
Corresponds to IDD Field `dp020` Dew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `dp020` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def css_load_time(self): load_times = self.get_load_times('css') return round(mean(load_times), self.decimal_precision)
Returns aggregate css load time for all pages.
def register_proper_name(self, name): with self.proper_names_db_path.open("a") as f: f.write(u"{0}\n".format(name))
Registers a proper name to the database.
def html(self) -> str: if self.parentNode and self.parentNode._should_escape_text: return html.escape(self.data) return self.data
Return html-escaped string representation of this node.
def ftdetect(filename): _, extension = os.path.splitext(filename) md_exts = ['.md', '.markdown', '.mkd', '.mdown', '.mkdn', '.Rmd'] nb_exts = ['.ipynb'] if extension in md_exts: return 'markdown' elif extension in nb_exts: return 'notebook' else: return None
Determine if filename is markdown or notebook, based on the file extension.
def get_source_dnde(self, name): name = self.roi.get_source_by_name(name).name if self.roi[name]['SpectrumType'] != 'FileFunction': src = self.components[0].like.logLike.getSource(str(name)) spectrum = src.spectrum() file_function = pyLike.FileFunction_cast(spectrum) loge = file_function.log_energy() logdnde = file_function.log_dnde() loge = np.log10(np.exp(loge)) dnde = np.exp(logdnde) return loge, dnde else: ebinsz = (self.log_energies[-1] - self.log_energies[0]) / self.enumbins loge = utils.extend_array(self.log_energies, ebinsz, 0.5, 6.5) dnde = np.array([self.like[name].spectrum()(pyLike.dArg(10 ** egy)) for egy in loge]) return loge, dnde
Return differential flux distribution of a source. For sources with FileFunction spectral type this returns the internal differential flux array. Returns ------- loge : `~numpy.ndarray` Array of energies at which the differential flux is evaluated (log10(E/MeV)). dnde : `~numpy.ndarray` Array of differential flux values (cm^{-2} s^{-1} MeV^{-1}) evaluated at energies in ``loge``.
def _resolve_args(self, env, args): pos_args, kw_args = args def check_value(v): if isinstance(v, AstTypeRef): return self._resolve_type(env, v) else: return v new_pos_args = [check_value(pos_arg) for pos_arg in pos_args] new_kw_args = {k: check_value(v) for k, v in kw_args.items()} return new_pos_args, new_kw_args
Resolves type references in data type arguments to data types in the environment.
def _download_wrapper(self, url, *args, **kwargs): try: return url, self._file_downloader.download(url, *args, **kwargs) except Exception as e: logging.error("AbstractDownloader: %s", traceback.format_exc()) return url, e
Actual download call. Calls the underlying file downloader, catches all exceptions and returns the result.
def finish(self, message: Optional[Message_T] = None, **kwargs) -> None: if message: asyncio.ensure_future(self.send(message, **kwargs)) raise _FinishException
Finish the session.
def _update_project_watch(config, task_presenter, results, long_description, tutorial): logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') path = os.getcwd() event_handler = PbsHandler(config, task_presenter, results, long_description, tutorial) observer = Observer() observer.schedule(event_handler, path, recursive=False) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
Update a project in a loop.
def _load_data(data_file, data_type): raw_data = data_file.read() if data_type is None: data_type = data_file.name.split('.')[-1] data = [] if data_type == 'json': data = json.loads(raw_data) return data elif data_type == 'csv': csv_data = StringIO(raw_data) reader = csv.DictReader(csv_data, delimiter=',') for line in reader: data.append(line) return data elif data_type in ['xlsx', 'xlsm', 'xltx', 'xltm']: excel_data = StringIO(raw_data) wb = openpyxl.load_workbook(excel_data) ws = wb.active headers = [] for row in ws.iter_rows(max_row=1): for cell in row: tmp = '_'.join(cell.value.split(" ")).lower() headers.append(tmp) for row in ws.iter_rows(row_offset=1): values = [] for cell in row: values.append(cell.value) tmp = dict(itertools.izip(headers, values)) if len(values) == len(headers) and not row_empty(values): data.append(tmp) return data elif data_type == 'po': po = polib.pofile(raw_data) for entry in po.untranslated_entries(): data.append(entry.__dict__) return data elif data_type == 'properties': lines = raw_data.split('\n') for l in lines: if l: var_id, string = l.split('=') tmp = dict(var_id=var_id, string=string) data.append(tmp) return data else: return data
Load data from CSV, JSON, Excel, ..., formats.
def remove_listener(self, registration_id): try: self.listeners.pop(registration_id) return True except KeyError: return False
Removes the specified membership listener. :param registration_id: (str), registration id of the listener to be deleted. :return: (bool), if the registration is removed, ``false`` otherwise.
def delete_project_avatar(self, project, avatar): url = self._get_url('project/' + project + '/avatar/' + avatar) return self._session.delete(url)
Delete a project's avatar. :param project: ID or key of the project to delete the avatar from :param avatar: ID of the avatar to delete
def _backup_file(path): backup_base = '/var/local/woven-backup' backup_path = ''.join([backup_base,path]) if not exists(backup_path): directory = ''.join([backup_base,os.path.split(path)[0]]) sudo('mkdir -p %s'% directory) sudo('cp %s %s'% (path,backup_path))
Backup a file but never overwrite an existing backup file
def generate_random_type(valid): type = choice(['int', 'str']) r = lambda: randrange(-1000000000, 1000000000) if type == 'int': return int, (r() if valid else str(r()) for i in itertools.count()) elif type == 'str': return str, (str(r()) if valid else r() for i in itertools.count()) else: raise AssertionError('!')
Generate a random type and samples for it. :param valid: Generate valid samples? :type valid: bool :return: type, sample-generator :rtype: type, generator
def command(self, request_type, uri, payload): self.command_count += 1 if payload is None: payload = {} message = { 'id': "{}_{}".format(type, self.command_count), 'type': request_type, 'uri': "ssap://{}".format(uri), 'payload': payload, } self.last_response = None try: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop)) finally: loop.close()
Build and send a command.
def _create_object(self, data, request): if request.method.upper() == 'POST' and self.post_factory: fac_func = self.post_factory.create else: fac_func = self.factory.create if isinstance(data, (list, tuple)): return map(fac_func, data) else: return fac_func(data)
Create a python object from the given data. This will use ``self.factory`` object's ``create()`` function to create the data. If no factory is defined, this will simply return the same data that was given.
def load(self): if self.is_persisted: with open(self.file_name, 'rb') as f: self.object_property = pickle.load(f)
a private method that loads an object from the filesystem
def inet_ntop(address_family, packed_ip): global __inet_ntop if __inet_ntop is None: if hasattr(socket, 'inet_ntop'): __inet_ntop = socket.inet_ntop else: from ospd import win_socket __inet_ntop = win_socket.inet_ntop return __inet_ntop(address_family, packed_ip)
A platform independent version of inet_ntop
def _create_inbound_thread(self): inbound_thread = threading.Thread(target=self._process_incoming_data, name=__name__) inbound_thread.daemon = True inbound_thread.start() return inbound_thread
Internal Thread that handles all incoming traffic. :rtype: threading.Thread
def apply(self, node): new_node = self.run(node) return self.update, new_node
Apply transformation and return if an update happened.
def set_dtreat_interp_indch(self, indch=None): lC = [indch is None, type(indch) in [np.ndarray,list], type(indch) is dict] assert any(lC) if lC[2]: lc = [type(k) is int and k<self._ddataRef['nt'] for k in indch.keys()] assert all(lc) for k in indch.keys(): assert hasattr(indch[k],'__iter__') indch[k] = _format_ind(indch[k], n=self._ddataRef['nch']) elif lC[1]: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['interp-indch'] = indch self._ddata['uptodate'] = False
Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X']
def _get_record(self, record_type): if (not self.has_record_type(record_type) and record_type.get_identifier() not in self._record_type_data_sets): raise errors.Unsupported() if str(record_type) not in self._records: record_initialized = self._init_record(str(record_type)) if record_initialized and str(record_type) not in self._my_map['recordTypeIds']: self._my_map['recordTypeIds'].append(str(record_type)) return self._records[str(record_type)]
This overrides _get_record in osid.Extensible. Perhaps we should leverage it somehow?