Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
700
def predict_variant_effect_on_transcript_or_failure(variant, transcript): try: return predict_variant_effect_on_transcript( variant=variant, transcript=transcript) except (AssertionError, ValueError) as error: logger.warn( "Encountered error annotating %s for %s: %s", variant, transcript, error) return Failure(variant, transcript)
Try predicting the effect of a variant on a particular transcript but suppress raised exceptions by converting them into `Failure` effect values.
701
def generate_safemode_windows(): try: import pyblish import pyblish_qml import PyQt5 except ImportError: return sys.stderr.write( "Run this in a terminal with access to " "the Pyblish libraries and PyQt5.\n") template = r values = {} for lib in (pyblish, pyblish_qml, PyQt5): values[lib.__name__] = os.path.dirname(os.path.dirname(lib.__file__)) values["python"] = os.path.dirname(sys.executable) with open("run.bat", "w") as f: print("Writing %s" % template.format(**values)) f.write(template.format(**values))
Produce batch file to run QML in safe-mode Usage: $ python -c "import compat;compat.generate_safemode_windows()" $ run.bat
702
def connect(self, interface=None): if self._simulate: return True if not interface: match = self._autodiscover_usb() self._log.debug("Auto-discovered USB port: %s" % match) else: self.USB_PORT = interface try: self._conn = serial.Serial(self.USB_PORT, baudrate=self.BAUDRATE, bytesize=self.BYTE_SIZE, parity=self.PARITY, stopbits=self.STOPBITS, timeout=self.TIMEOUT) except serial.serialutil.SerialException as e: raise SerialConnectionError(str(e)) self._log.debug("Serial connection set") if not self._conn.isOpen(): self._conn.open() self._log.debug("Serial connection opened") return True
Connect to the USB for the hottop. Attempt to discover the USB port used for the Hottop and then form a connection using the serial library. :returns: bool :raises SerialConnectionError:
703
def hash_for_signing(self, msg): magic = self.msg_magic_for_netcode() fd = io.BytesIO() stream_satoshi_string(fd, magic.encode()) stream_satoshi_string(fd, msg.encode()) return from_bytes_32(double_sha256(fd.getvalue()))
Return a hash of msg, according to odd bitcoin method: double SHA256 over a bitcoin encoded stream of two strings: a fixed magic prefix and the actual message.
704
def add_args(parser, positional=False): group = parser.add_argument_group("read loading") group.add_argument("reads" if positional else "--reads", nargs="+", default=[], help="Paths to bam files. Any number of paths may be specified.") group.add_argument( "--read-source-name", nargs="+", help="Names for each read source. The number of names specified " "must match the number of bam files. If not specified, filenames are " "used for names.") group = parser.add_argument_group( "read filtering", "A number of read filters are available. See the pysam " "documentation (http://pysam.readthedocs.org/en/latest/api.html) " "for details on what these fields mean. When multiple filter " "options are specified, reads must match *all* filters.") for (name, (kind, message, function)) in READ_FILTERS.items(): extra = {} if kind is bool: extra["action"] = "store_true" extra["default"] = None elif kind is int: extra["type"] = int extra["metavar"] = "N" elif kind is str: extra["metavar"] = "STRING" group.add_argument("--" + name.replace("_", "-"), help=message, **extra)
Extends a commandline argument parser with arguments for specifying read sources.
705
def pkey(self): if self._pkey is None: self._pkey = self._get_pkey() return self._pkey
Returns the private key for quick authentication on the SSH server.
706
def rename(self, target): if self._closed: self._raise_closed() self._accessor.rename(self, target)
Rename this path to the given path.
707
def has_button(self, button): rc = self._libinput.libinput_device_pointer_has_button( self._handle, button) assert rc >= 0, return bool(rc)
Check if this device has a given button. Args: button (int): Button to check for, see ``input.h`` for button definitions. Returns: bool: :obj:`True` if the device has this button, :obj:`False` if it does not. Raises: AssertionError
708
def feedkeys(self, keys, options=, escape_csi=True): return self.request(, keys, options, escape_csi)
Push `keys` to Nvim user input buffer. Options can be a string with the following character flags: - 'm': Remap keys. This is default. - 'n': Do not remap keys. - 't': Handle keys as if typed; otherwise they are handled as if coming from a mapping. This matters for undo, opening folds, etc.
709
def convert_tensor_to_label(scope, operator, container): TensorToLabelab model_type = operator.raw_operator.WhichOneof() if model_type == : model = operator.raw_operator.neuralNetworkClassifier if model.WhichOneof() == : labels = list(s.encode() for s in model.stringClassLabels.vector) label_type = onnx_proto.TensorProto.STRING elif model.WhichOneof() == : labels = list(int(i) for i in model.int64ClassLabels.vector) label_type = onnx_proto.TensorProto.INT64 else: raise ValueError() elif model_type == : model = operator.raw_operator.pipelineClassifier if model.WhichOneof() == : labels = list(s.encode() for s in model.pipelineClassifier.stringClassLabels.vector) label_type = onnx_proto.TensorProto.STRING elif model.WhichOneof() == : labels = list(int(i) for i in model.int64ClassLabels.vector) label_type = onnx_proto.TensorProto.INT64 else: raise ValueError() else: raise ValueError() label_loader_name = scope.get_unique_operator_name() label_buffer_name = scope.get_unique_variable_name() label_loader_value = helper.make_tensor(label_buffer_name, label_type, [len(labels)], labels) apply_constant(scope, [label_buffer_name], container, operator_name=label_loader_name, value=label_loader_value) label_id_extractor_name = scope.get_unique_operator_name() label_id_extractor_attrs = {: label_id_extractor_name} label_id_extractor_attrs[] = 1 label_id_extractor_attrs[] = 1 extracted_id_name = scope.get_unique_variable_name() container.add_node(, [operator.inputs[0].full_name], [extracted_id_name], **label_id_extractor_attrs) label_selector_name = scope.get_unique_operator_name() label_selector_attrs = {: label_selector_name} container.add_node(, [label_buffer_name, extracted_id_name], [operator.outputs[0].full_name], op_domain=, **label_selector_attrs)
This converter tries to convert a dummy operator 'TensorToLabel' into a sequence of some ONNX operators. Those operators are used to extract the label with the highest probability for doing a prediction. We assume that the elements in the given probability tensor are aligned with the class labels specified in the CoreML model. That is, if you have a class label vector ['a', 'b'] in our CoreML classifier, the first (and the only) input of this operator should be [probability_of_class_a, probability_of_class_b]. Assume that we have C classes with batch size N (N must be 1. If not, the output class probabilities need to be encoded as a sequence of dictionary, which is not allowed in ONNX). The ONNX computation graph of this operator may look like Probability tensor [1, C] (the variable defined at operator.inputs[0]) | v ArgMax LoadConstant (its attribute is extracted from | | operator.raw_operator, which is a | | CoreML classifier) | | v | best index [1] | | | v v ArrayFeatureExtractor <-------------------- a 1-D tensor of class labels [C] | v predicted label [1]
710
def set_break( self, filename, lineno=None, temporary=False, cond=None, funcname=None ): log.info( % (filename, lineno, temporary, cond, funcname) ) breakpoint = self.get_break( filename, lineno, temporary, cond, funcname ) self.breakpoints.add(breakpoint) log.info( % breakpoint) return breakpoint
Put a breakpoint for filename
711
def clean_translation(self): translation = self.cleaned_data[] if self.instance and self.instance.content_object: obj = self.instance.content_object field = obj._meta.get_field(self.instance.field) max_length = field.max_length if max_length and len(translation) > max_length: raise forms.ValidationError( _( ) % { : len(translation), : max_length, } ) else: raise forms.ValidationError( _( ) ) return translation
Do not allow translations longer than the max_lenght of the field to be translated.
712
def update(self): self._attrs = self._session.refresh_attributes(self.name) self._attrs = assert_is_dict(self._attrs) if self.base_station: self.base_station.update()
Update object properties.
713
def changes(self): output = [] if self.status() is self.UNMODIFIED: output = [self.formatter % (, self.key, self.old_value)] elif self.status() is self.ADDED: output.append(self.formatter % (, self.key, self.new_value)) elif self.status() is self.REMOVED: output.append(self.formatter % (, self.key, self.old_value)) elif self.status() is self.MODIFIED: output.append(self.formatter % (, self.key, self.old_value)) output.append(self.formatter % (, self.key, self.new_value)) return output
Returns a list of changes to represent the diff between old and new value. Returns: list: [string] representation of the change (if any) between old and new value
714
def reload(self): self.rows = [] for r in self.iterload(): self.addRow(r)
Loads rows and/or columns. Override in subclass.
715
def lookup_hist(self, mh): result = mh.similar_blank_histogram() points = np.stack([mh.all_axis_bin_centers(i) for i in range(mh.dimensions)]).reshape(mh.dimensions, -1) values = self.lookup(*points) result.histogram = values.reshape(result.histogram.shape) return result
Return histogram within binning of Histdd mh, with values looked up in this histogram. This is not rebinning: no interpolation /renormalization is performed. It's just a lookup.
716
def username(self): entry = self._proto.commandQueueEntry if entry.HasField(): return entry.username return None
The username of the issuer.
717
def calculate_file_distances(dicom_files, field_weights=None, dist_method_cls=None, **kwargs): if dist_method_cls is None: dist_method = LevenshteinDicomFileDistance(field_weights) else: try: dist_method = dist_method_cls(field_weights=field_weights, **kwargs) except: log.exception( .format(dist_method_cls, kwargs)) dist_dtype = np.float16 n_files = len(dicom_files) try: file_dists = np.zeros((n_files, n_files), dtype=dist_dtype) except MemoryError as mee: import scipy.sparse file_dists = scipy.sparse.lil_matrix((n_files, n_files), dtype=dist_dtype) for idxi in range(n_files): dist_method.set_dicom_file1(dicom_files[idxi]) for idxj in range(idxi+1, n_files): dist_method.set_dicom_file2(dicom_files[idxj]) if idxi != idxj: file_dists[idxi, idxj] = dist_method.transform() return file_dists
Calculates the DicomFileDistance between all files in dicom_files, using an weighted Levenshtein measure between all field names in field_weights and their corresponding weights. Parameters ---------- dicom_files: iterable of str Dicom file paths field_weights: dict of str to float A dict with header field names to float scalar values, that indicate a distance measure ratio for the levenshtein distance averaging of all the header field names in it. e.g., {'PatientID': 1} dist_method_cls: DicomFileDistance class Distance method object to compare the files. If None, the default DicomFileDistance method using Levenshtein distance between the field_wieghts will be used. kwargs: DicomFileDistance instantiation named arguments Apart from the field_weitghts argument. Returns ------- file_dists: np.ndarray or scipy.sparse.lil_matrix of shape NxN Levenshtein distances between each of the N items in dicom_files.
718
def pcolormesh(x, y, z, ax, infer_intervals=None, **kwargs): x = np.asarray(x) if infer_intervals is None: if hasattr(ax, ): if len(x.shape) == 1: infer_intervals = True else: infer_intervals = False else: infer_intervals = True if (infer_intervals and ((np.shape(x)[0] == np.shape(z)[1]) or ((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1])))): if len(x.shape) == 1: x = _infer_interval_breaks(x, check_monotonic=True) else: x = _infer_interval_breaks(x, axis=1) x = _infer_interval_breaks(x, axis=0) if (infer_intervals and (np.shape(y)[0] == np.shape(z)[0])): if len(y.shape) == 1: y = _infer_interval_breaks(y, check_monotonic=True) else: y = _infer_interval_breaks(y, axis=1) y = _infer_interval_breaks(y, axis=0) primitive = ax.pcolormesh(x, y, z, **kwargs) if not hasattr(ax, ) and x.ndim == 1 and y.ndim == 1: ax.set_xlim(x[0], x[-1]) ax.set_ylim(y[0], y[-1]) return primitive
Pseudocolor plot of 2d DataArray Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh`
719
def execute_script(self, script, g=None): if not g==None: self.extra_globals.update(g) if not _s.fun.is_iterable(script): if script is None: return None [expression, v] = self._parse_script(script) if v is None: print("ERROR: Could not parse ") return None g = self._globals() g.update(v) return eval(expression, g) output = [] for s in script: output.append(self.execute_script(s)) return output
Runs a script, returning the result. Parameters ---------- script String script to be evaluated (see below). g=None Optional dictionary of additional globals for the script evaluation. These will automatically be inserted into self.extra_globals. Usage ----- Scripts are of the form: "3.0 + x/y - d[0] where x=3.0*c('my_column')+h('setting'); y=d[1]" By default, "d" refers to the databox object itself, giving access to everything and enabling complete control over the universe. Meanwhile, c() and h() give quick reference to d.c() and d.h() to get columns and header lines. Additionally, these scripts can see all of the numpy functions like sin, cos, sqrt, etc. If you would like access to additional globals in a script, there are a few options in addition to specifying the g parametres. You can set self.extra_globals to the appropriate globals dictionary or add globals using self.insert_global(). Setting g=globals() will automatically insert all of your current globals into this databox instance. There are a few shorthand scripts available as well. You can simply type a column name such as 'my_column' or a column number like 2. However, I only added this functionality as a shortcut, and something like "2.0*a where a=my_column" will not work unless 'my_column is otherwise defined. I figure since you're already writing a complicated script in that case, you don't want to accidentally shortcut your way into using a column instead of a constant! Use "2.0*a where a=c('my_column')" instead.
720
def from_euler312(self, roll, pitch, yaw): c3 = cos(pitch) s3 = sin(pitch) s2 = sin(roll) c2 = cos(roll) s1 = sin(yaw) c1 = cos(yaw) self.a.x = c1 * c3 - s1 * s2 * s3 self.b.y = c1 * c2 self.c.z = c3 * c2 self.a.y = -c2*s1 self.a.z = s3*c1 + c3*s2*s1 self.b.x = c3*s1 + s3*s2*c1 self.b.z = s1*s3 - s2*c1*c3 self.c.x = -s3*c2 self.c.y = s2
fill the matrix from Euler angles in radians in 312 convention
721
def indexes_all(ol,value): aaaa length = ol.__len__() indexes =[] for i in range(0,length): if(value == ol[i]): indexes.append(i) else: pass return(indexes)
from elist.elist import * ol = [1,'a',3,'a',4,'a',5] indexes_all(ol,'a')
722
def put_file(self, in_path, out_path): try: st = os.stat(in_path) except OSError as e: self._throw_io_error(e, in_path) raise if not stat.S_ISREG(st.st_mode): raise IOError( % (in_path,)) if st.st_size <= self.SMALL_FILE_LIMIT: try: fp = open(in_path, ) try: s = fp.read(self.SMALL_FILE_LIMIT + 1) finally: fp.close() except OSError: self._throw_io_error(e, in_path) raise if len(s) == st.st_size: return self.put_data(out_path, s, mode=st.st_mode, utimes=(st.st_atime, st.st_mtime)) self._connect() self.parent.call_service( service_name=, method_name=, path=mitogen.utils.cast(in_path) ) self.get_chain().call( ansible_mitogen.target.transfer_file, context=self.parent, in_path=in_path, out_path=out_path )
Implement put_file() by streamily transferring the file via FileService. :param str in_path: Local filesystem path to read. :param str out_path: Remote filesystem path to write.
723
def append(self, observation, action, reward, terminal, training=True): super(SequentialMemory, self).append(observation, action, reward, terminal, training=training) if training: self.observations.append(observation) self.actions.append(action) self.rewards.append(reward) self.terminals.append(terminal)
Append an observation to the memory # Argument observation (dict): Observation returned by environment action (int): Action taken to obtain this observation reward (float): Reward obtained by taking this action terminal (boolean): Is the state terminal
724
def _get_fullname(obj): if not hasattr(obj, "__name__"): obj = obj.__class__ if obj.__module__ in ("builtins", "__builtin__"): return obj.__name__ return "{}.{}".format(obj.__module__, obj.__name__)
Get the full name of an object including the module. Args: obj: An object. Returns: The full class name of the object.
725
def get_param(self, number): logger.debug("retreiving param number %s" % number) type_ = snap7.snap7types.param_types[number] value = type_() code = self.library.Par_GetParam(self.pointer, ctypes.c_int(number), ctypes.byref(value)) check_error(code) return value.value
Reads an internal Partner object parameter.
726
def save(self): self._ensure_have_load_only() for fname, parser in self._modified_parsers: logger.info("Writing to %s", fname) ensure_dir(os.path.dirname(fname)) with open(fname, "w") as f: parser.write(f)
Save the currentin-memory state.
727
def validate_token(refresh_url, exceptions=(), callback=None, access_key=, refresh_key=): def _validate_token(f): def wrapper(*args): self = args[0] try: return f(*args) except exceptions: try: self.log() self.source[access_key] = None token = self.source.get(refresh_key) data = dict(self.options[], **{refresh_key: token}) r = requests.post(refresh_url, data=data) self.source[access_key] = r.json()[access_key] changes = {access_key: self.source[access_key]} self.fire(, changes) if callback: if callable(callback): _callback = callback else: _callback = getattr(self, callback) _callback(self.source.get(access_key)) return f(*args) except Exception, e: self.log(t be revalidated. The user would have to re-authenticateaccess token could not be refreshed ({})' .format(str(e)), retryable=False) return wrapper return _validate_token
a decorator used to validate the access_token for oauth based data sources. This decorator should be used on every method in the data source that fetches data from the oauth controlled resource, and that relies on a valid access_token in order to operate properly. If the token is valid, the normal flow continues without any change. Otherwise, if any of `exceptions` tuple is raised, the normal flow will be preceded by the following steps: 1. `refresh_url` will be called in order to refresh the token 2. the newly refreshed token will be saved in the source 3. the `callback` function will be called If the refresh fails for any reason, the user would have to re-grant permission for the application Parameters ---------- refresh_url : str The URL to be called in order to refresh the access token. callback : str or callable A callback function to be called whenever the access_token is validated. The callback function would be called with the refreshed token as an argument. If the `callback` is not `callable`, but an `str` it will be called on `self` (i.e. call a method on your Data Source) Defaults to None exceptions : tuple A list of exceptions that should cause token revalidation Defaults to Exception, meaning that all errors will cause token refresh access_key : str The access token key as defined in the source and in the response from the refresh URL. Defaults to `access_token` refresh_key : str The refresh token key as defined in the source and in the request to the refresh URL. Defaults to `refresh_token`
728
def filter_service_by_host_bp_rule_label(label): def inner_filter(items): service = items["service"] host = items["hosts"][service.host] if service is None or host is None: return False return label in host.labels return inner_filter
Filter for service Filter on label :param label: label to filter :type label: str :return: Filter :rtype: bool
729
def add_virtual_columns_aitoff(self, alpha, delta, x, y, radians=True): transform = "" if radians else "*pi/180." aitoff_alpha = "__aitoff_alpha_%s_%s" % (alpha, delta) aitoff_alpha = re.sub("[^a-zA-Z_]", "_", aitoff_alpha) self.add_virtual_column(aitoff_alpha, "arccos(cos({delta}{transform})*cos({alpha}{transform}/2))".format(**locals())) self.add_virtual_column(x, "2*cos({delta}{transform})*sin({alpha}{transform}/2)/sinc({aitoff_alpha}/pi)/pi".format(**locals())) self.add_virtual_column(y, "sin({delta}{transform})/sinc({aitoff_alpha}/pi)/pi".format(**locals()))
Add aitoff (https://en.wikipedia.org/wiki/Aitoff_projection) projection :param alpha: azimuth angle :param delta: polar angle :param x: output name for x coordinate :param y: output name for y coordinate :param radians: input and output in radians (True), or degrees (False) :return:
730
def override_account_fields(self, settled_cash=not_overridden, accrued_interest=not_overridden, buying_power=not_overridden, equity_with_loan=not_overridden, total_positions_value=not_overridden, total_positions_exposure=not_overridden, regt_equity=not_overridden, regt_margin=not_overridden, initial_margin_requirement=not_overridden, maintenance_margin_requirement=not_overridden, available_funds=not_overridden, excess_liquidity=not_overridden, cushion=not_overridden, day_trades_remaining=not_overridden, leverage=not_overridden, net_leverage=not_overridden, net_liquidation=not_overridden): self._dirty_account = True self._account_overrides = kwargs = { k: v for k, v in locals().items() if v is not not_overridden } del kwargs[]
Override fields on ``self.account``.
731
def get_changes(self, changers, in_hierarchy=False, resources=None, task_handle=taskhandle.NullTaskHandle()): function_changer = _FunctionChangers(self.pyname.get_object(), self._definfo(), changers) return self._change_calls(function_changer, in_hierarchy, resources, task_handle)
Get changes caused by this refactoring `changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy` is `True` the changers are applyed to all matching methods in the class hierarchy. `resources` can be a list of `rope.base.resource.File`\s that should be searched for occurrences; if `None` all python files in the project are searched.
732
def ltime(etobs, obs, direct, targ): etobs = ctypes.c_double(etobs) obs = ctypes.c_int(obs) direct = stypes.stringToCharP(direct) targ = ctypes.c_int(targ) ettarg = ctypes.c_double() elapsd = ctypes.c_double() libspice.ltime_c(etobs, obs, direct, targ, ctypes.byref(ettarg), ctypes.byref(elapsd)) return ettarg.value, elapsd.value
This routine computes the transmit (or receive) time of a signal at a specified target, given the receive (or transmit) time at a specified observer. The elapsed time between transmit and receive is also returned. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ltime_c.html :param etobs: Epoch of a signal at some observer :type etobs: float :param obs: NAIF ID of some observer :type obs: int :param direct: Direction the signal travels ( "->" or "<-" ) :type direct: str :param targ: NAIF ID of the target object :type targ: int :return: epoch and time :rtype: tuple
733
def _feed_calendar_span(gtfs, stats): n_feeds = _n_gtfs_sources(gtfs)[0] max_start = None min_end = None if n_feeds > 1: for i in range(n_feeds): feed_key = "feed_" + str(i) + "_" start_key = feed_key + "calendar_start" end_key = feed_key + "calendar_end" calendar_span = gtfs.conn.cursor().execute( , (feed_key + ,)).fetchone() stats[start_key] = calendar_span[0] stats[end_key] = calendar_span[1] if calendar_span[0] is not None and calendar_span[1] is not None: if not max_start and not min_end: max_start = calendar_span[0] min_end = calendar_span[1] else: if gtfs.get_day_start_ut(calendar_span[0]) > gtfs.get_day_start_ut(max_start): max_start = calendar_span[0] if gtfs.get_day_start_ut(calendar_span[1]) < gtfs.get_day_start_ut(min_end): min_end = calendar_span[1] stats["latest_feed_start_date"] = max_start stats["earliest_feed_end_date"] = min_end else: stats["latest_feed_start_date"] = stats["start_date"] stats["earliest_feed_end_date"] = stats["end_date"] return stats
Computes the temporal coverage of each source feed Parameters ---------- gtfs: gtfspy.GTFS object stats: dict where to append the stats Returns ------- stats: dict
734
def send(self, sender: PytgbotApiBot): return sender.send_voice( voice=self.voice, chat_id=self.receiver, reply_to_message_id=self.reply_id, caption=self.caption, parse_mode=self.parse_mode, duration=self.duration, disable_notification=self.disable_notification, reply_markup=self.reply_markup )
Send the message via pytgbot. :param sender: The bot instance to send with. :type sender: pytgbot.bot.Bot :rtype: PytgbotApiMessage
735
def initializerepo(self): try: os.mkdir(self.repopath) except OSError: pass cmd = self.repo.init(bare=self.bare, shared=self.shared) if not self.bare: self.write_testing_data([], []) self.write_training_data([], []) self.write_classifier(None) cmd = self.repo.add() cmd = self.repo.add() cmd = self.repo.add() cmd = self.repo.commit(m=) cmd = self.repo.tag() cmd = self.set_version()
Fill empty directory with products and make first commit
736
def binomial_coefficient(n, k): if not isinstance(k, int) or not isinstance(n, int): raise TypeError("Expecting positive integers") if k > n: raise ValueError("k must be lower or equal than n") if k < 0 or n < 0: raise ValueError("Expecting positive integers") return factorial(n) // (factorial(k) * factorial(n - k))
Calculate the binomial coefficient indexed by n and k. Args: n (int): positive integer k (int): positive integer Returns: The binomial coefficient indexed by n and k Raises: TypeError: If either n or k is not an integer ValueError: If either n or k is negative, or if k is strictly greater than n
737
def writeDetails(accept, readId, taxonomy, fp): fp.write( % ( if accept else , readId, .join(taxonomy) if taxonomy else ))
Write read and taxonomy details. @param accept: A C{bool} indicating whether the read was accepted, according to its taxonomy. @param readId: The C{str} id of the read. @taxonomy: A C{list} of taxonomy C{str} levels. @fp: An open file pointer to write to.
738
def user_warning(self, message, caption=): dlg = wx.MessageDialog(self, message, caption, wx.OK | wx.CANCEL | wx.ICON_WARNING) if self.show_dlg(dlg) == wx.ID_OK: continue_bool = True else: continue_bool = False dlg.Destroy() return continue_bool
Shows a dialog that warns the user about some action Parameters ---------- message : message to display to user caption : title for dialog (default: "Warning!") Returns ------- continue_bool : True or False
739
def get_partitions(self, persistence=None): if persistence is None: persistence = self.persistence partitions = {} for key, items in self.base_partitions.items(): new_key = key while ( self.merge_sequence[new_key][0] < persistence and self.merge_sequence[new_key][1] != new_key ): new_key = self.merge_sequence[new_key][1] if new_key not in partitions: partitions[new_key] = [] partitions[new_key].extend(items) for key in partitions: partitions[key] = sorted(list(set(partitions[key]))) return partitions
Returns the partitioned data based on a specified persistence level. @ In, persistence, a floating point value specifying the size of the smallest feature we want to track. Default = None means consider all features. @ Out, a dictionary lists where each key is a integer specifying the index of the maximum. Each entry will hold a list of indices specifying points that are associated to this maximum.
740
def _get_step_inout(step): inputs = [] outputs = [] prescatter = collections.defaultdict(list) remapped = {} assert step.outputs_record_schema["type"] == "record" output_names = set([]) for outp in step.outputs_record_schema["fields"]: outputs.append({"id": outp["name"]}) output_names.add(outp["name"]) assert step.inputs_record_schema["type"] == "record" for inp in step.inputs_record_schema["fields"]: source = inp["source"].split(" if inp["valueFrom"].find(attr_access) > 0: source += ".%s" % inp["name"] if isinstance(inp["type"], dict) and isinstance(inp["type"].get("items"), dict): if inp["type"]["items"].get("type") == "array" and "inputBinding" in inp["type"]: source, prescatter = _unpack_object_array(inp, source, prescatter) if inp["name"] in output_names: new_name = inp["name"] + "_input" remapped[inp["name"]] = new_name inp["name"] = new_name inputs.append({"id": inp["name"], "value": source}) return inputs, outputs, remapped, dict(prescatter)
Retrieve set of inputs and outputs connecting steps.
741
def set_name(address, name, anyway=False): success = idaapi.set_name(address, name, idaapi.SN_NOWARN | idaapi.SN_NOCHECK) if success: return if anyway: success = idaapi.do_name_anyway(address, name) if success: return raise exceptions.SarkSetNameFailed("Failed renaming 0x{:08X} to {!r}.".format(address, name)) raise exceptions.SarkErrorNameAlreadyExists( "Can't rename 0x{:08X}. Name {!r} already exists.".format(address, name))
Set the name of an address. Sets the name of an address in IDA. If the name already exists, check the `anyway` parameter: True - Add `_COUNTER` to the name (default IDA behaviour) False - Raise an `exceptions.SarkErrorNameAlreadyExists` exception. Args address: The address to rename. name: The desired name. anyway: Set anyway or not. Defualt ``False``.
742
def _convert_etree_element_to_rule(entry_element): http://www.w3.org/2005/Atomapplication/xmlXYZABC rule = Rule() rule_element = entry_element.find(, _etree_sb_feed_namespaces) if rule_element is not None: filter_element = rule_element.find(, _etree_sb_feed_namespaces) if filter_element is not None: rule.filter_type = filter_element.attrib.get( _make_etree_ns_attr_name(_etree_sb_feed_namespaces[], ), None) sql_exp_element = filter_element.find(, _etree_sb_feed_namespaces) if sql_exp_element is not None: rule.filter_expression = sql_exp_element.text action_element = rule_element.find(, _etree_sb_feed_namespaces) if action_element is not None: rule.action_type = action_element.attrib.get( _make_etree_ns_attr_name(_etree_sb_feed_namespaces[], ), None) sql_exp_element = action_element.find(, _etree_sb_feed_namespaces) if sql_exp_element is not None: rule.action_expression = sql_exp_element.text for name, value in _ETreeXmlToObject.get_entry_properties_from_element( entry_element, True, ).items(): setattr(rule, name, value) return rule
Converts entry element to rule object. The format of xml for rule: <entry xmlns='http://www.w3.org/2005/Atom'> <content type='application/xml'> <RuleDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"> <Filter i:type="SqlFilterExpression"> <SqlExpression>MyProperty='XYZ'</SqlExpression> </Filter> <Action i:type="SqlFilterAction"> <SqlExpression>set MyProperty2 = 'ABC'</SqlExpression> </Action> </RuleDescription> </content> </entry>
743
def write_reports(self, relative_path, suite_name, reports, package_name=None): dest_path = self.reserve_file(relative_path) with open(dest_path, ) as outf: outf.write(toxml(reports, suite_name, package_name=package_name)) return dest_path
write the collection of reports to the given path
744
def get_esri_extent(esriobj): extent = None srs = None if in esriobj._json_struct: extent = esriobj._json_struct[] if in esriobj._json_struct: extent = esriobj._json_struct[] try: srs = extent[][] except KeyError, err: LOGGER.error(err, exc_info=True) return [extent, srs]
Get the extent of an ESRI resource
745
def query_string(context, **kwargs): params = context["request"].GET.copy() for key, value in list(kwargs.items()): params[key] = value return "?" + params.urlencode()
Add param to the given query string
746
def json_dumps(obj, **kwargs ): if six.PY2: kwargs[] = return json.dumps(convert_to_dict(obj), **kwargs)
Force use of unicode.
747
def to_dict(self): return { : self.content, : [r._asdict() for r in self.resources], : self.js_init_fn, : self.js_init_version, : self.json_init_args }
Returns the fragment in a dictionary representation.
748
def frame_paths(frame_type, start_time, end_time, server=None, url_type=): site = frame_type[0] connection = datafind_connection(server) connection.find_times(site, frame_type, gpsstart=start_time, gpsend=end_time) cache = connection.find_frame_urls(site, frame_type, start_time, end_time,urltype=url_type) paths = [entry.path for entry in cache] return paths
Return the paths to a span of frame files Parameters ---------- frame_type : string The string representation of the frame type (ex. 'H1_ER_C00_L1') start_time : int The start time that we need the frames to span. end_time : int The end time that we need the frames to span. server : {None, SERVER:PORT string}, optional Optional string to specify the datafind server to use. By default an attempt is made to use a local datafind server. url_type : string Returns only frame URLs with a particular scheme or head such as "file" or "gsiftp". Default is "file", which queries locally stored frames. Option can be disabled if set to None. Returns ------- paths : list of paths The list of paths to the frame files. Examples -------- >>> paths = frame_paths('H1_LDAS_C02_L2', 968995968, 968995968+2048)
749
def get_valid_cwd(): try: cwd = _current_dir() except: warn("Your current directory is invalid. If you open a ticket at " + "https://github.com/milkbikis/powerline-shell/issues/new " + "we would love to help fix the issue.") sys.stdout.write("> ") sys.exit(1) parts = cwd.split(os.sep) up = cwd while parts and not os.path.exists(up): parts.pop() up = os.sep.join(parts) if cwd != up: warn("Your current directory is invalid. Lowest valid directory: " + up) return cwd
Determine and check the current working directory for validity. Typically, an directory arises when you checkout a different branch on git that doesn't have this directory. When an invalid directory is found, a warning is printed to the screen, but the directory is still returned as-is, since this is what the shell considers to be the cwd.
750
def remove_foreign_key(self, name): name = self._normalize_identifier(name) if not self.has_foreign_key(name): raise ForeignKeyDoesNotExist(name, self._name) del self._fk_constraints[name]
Removes the foreign key constraint with the given name. :param name: The constraint name :type name: str
751
def store_zonefiles( self, zonefile_names, zonefiles, zonefile_txids, zonefile_block_heights, peer_zonefile_hashes, peer_hostport, path, con=None ): ret = [] with AtlasDBOpen(con=con, path=path) as dbcon: for fetched_zfhash, zonefile_txt in zonefiles.items(): if fetched_zfhash not in peer_zonefile_hashes or fetched_zfhash not in zonefile_block_heights: log.warn("%s: Unsolicited zonefile %s" % (self.hostport, fetched_zfhash)) continue rc = self.store_zonefile_data( fetched_zfhash, zonefile_txt, min(zonefile_block_heights[fetched_zfhash]), peer_hostport, dbcon, path ) if rc: ret.append( fetched_zfhash ) return ret
Store a list of RPC-fetched zonefiles (but only ones in peer_zonefile_hashes) from the given peer_hostport Return the list of zonefile hashes stored.
752
def _adjust_rowcol(self, insertion_point, no_to_insert, axis, tab=None): if axis == 2: self._shift_rowcol(insertion_point, no_to_insert) return assert axis in (0, 1) cell_sizes = self.col_widths if axis else self.row_heights set_cell_size = self.set_col_width if axis else self.set_row_height new_sizes = {} del_sizes = [] for pos, table in cell_sizes: if pos > insertion_point and (tab is None or tab == table): if 0 <= pos + no_to_insert < self.shape[axis]: new_sizes[(pos + no_to_insert, table)] = \ cell_sizes[(pos, table)] del_sizes.append((pos, table)) for pos, table in new_sizes: set_cell_size(pos, table, new_sizes[(pos, table)]) for pos, table in del_sizes: if (pos, table) not in new_sizes: set_cell_size(pos, table, None)
Adjusts row and column sizes on insertion/deletion
753
def stream_bloom_filters(dataset, keys, schema ): tokenizers = [tokenizer.get_tokenizer(field.hashing_properties) for field in schema.fields] return (crypto_bloom_filter(s, tokenizers, schema, keys) for s in dataset)
Compute composite Bloom filters (CLKs) for every record in an iterable dataset. :param dataset: An iterable of indexable records. :param schema: An instantiated Schema instance :param keys: A tuple of two lists of secret keys used in the HMAC. :return: Generator yielding bloom filters as 3-tuples
754
def when(self, *bools): self.passes = self.passes and all(bools) return self
:type bools: bool :param bools: Boolean arguments All boolean arguments passed to this method must evaluate to `True` for printing to be enabled. So for example, the following code would print ``x: 1`` .. code-block:: python for x in range(10): Behold().when(x == 1).show('x')
755
def lp_to_simple_rdd(lp_rdd, categorical=False, nb_classes=None): if categorical: if not nb_classes: labels = np.asarray(lp_rdd.map( lambda lp: lp.label).collect(), dtype=) nb_classes = np.max(labels) + 1 rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), encode_label(lp.label, nb_classes))) else: rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), lp.label)) return rdd
Convert a LabeledPoint RDD into an RDD of feature-label pairs :param lp_rdd: LabeledPoint RDD of features and labels :param categorical: boolean, if labels should be one-hot encode when returned :param nb_classes: int, number of total classes :return: Spark RDD with feature-label pairs
756
def tradeBreaksSSE(symbols=None, on_data=None, token=, version=): return _runSSE(, symbols, on_data, token, version)
Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill. https://iexcloud.io/docs/api/#deep-trades Args: symbols (string); Tickers to request on_data (function): Callback on data token (string); Access token version (string); API version
757
def get_assessments_taken_by_search(self, assessment_taken_query, assessment_taken_search): if not self._can(): raise PermissionDenied() return self._provider_session.get_assessments_taken_by_search(assessment_taken_query, assessment_taken_search)
Pass through to provider AssessmentTakenSearchSession.get_assessments_taken_by_search
758
def pre_save(self, model_instance, add): value = super().pre_save(model_instance, add) if isinstance(value, LocalizedValue): for file in value.__dict__.values(): if file and not file._committed: file.save(file.name, file, save=False) return value
Returns field's value just before saving.
759
def patch_stackless(): global _application_set_schedule_callback _application_set_schedule_callback = stackless.set_schedule_callback(_schedule_callback) def set_schedule_callback(callable): global _application_set_schedule_callback old = _application_set_schedule_callback _application_set_schedule_callback = callable return old def get_schedule_callback(): global _application_set_schedule_callback return _application_set_schedule_callback set_schedule_callback.__doc__ = stackless.set_schedule_callback.__doc__ if hasattr(stackless, "get_schedule_callback"): get_schedule_callback.__doc__ = stackless.get_schedule_callback.__doc__ stackless.set_schedule_callback = set_schedule_callback stackless.get_schedule_callback = get_schedule_callback if not hasattr(stackless.tasklet, "trace_function"): __call__.__doc__ = stackless.tasklet.__call__.__doc__ stackless.tasklet.__call__ = __call__ setup.__doc__ = stackless.tasklet.setup.__doc__ stackless.tasklet.setup = setup run.__doc__ = stackless.run.__doc__ stackless.run = run
This function should be called to patch the stackless module so that new tasklets are properly tracked in the debugger.
760
def to_api_repr(self): configuration = self._configuration.to_api_repr() resource = { "jobReference": self._properties["jobReference"], "configuration": configuration, } configuration["query"]["query"] = self.query return resource
Generate a resource for :meth:`_begin`.
761
def file_compile(self, path): log.info(+path) cmd = % path res = self.__exchange(cmd) log.info(res) return res
Compiles a file specified by path on the device
762
def dim_range_key(eldim): if isinstance(eldim, dim): dim_name = repr(eldim) if dim_name.startswith(""): dim_name = dim_name[1:-1] else: dim_name = eldim.name return dim_name
Returns the key to look up a dimension range.
763
def hysteresis_magic2(path_to_file=, hyst_file="rmag_hysteresis.txt", save=False, save_folder=, fmt="svg", plots=True): user, meas_file, rmag_out, rmag_file = "", "agm_measurements.txt", "rmag_hysteresis.txt", "" pltspec = "" dir_path = save_folder verbose = pmagplotlib.verbose version_num = pmag.get_version() rmag_out = save_folder + + rmag_out meas_file = path_to_file + + hyst_file rmag_rem = save_folder + "/rmag_remanence.txt" meas_data, file_type = pmag.magic_read(meas_file) if file_type != : print(hysteresis_magic.__doc__) print() return HystRecs, RemRecs = [], [] HDD = {} HDD[], HDD[], HDD[] = 1, 2, 3 experiment_names, sids = [], [] for rec in meas_data: meths = rec[].split() methods = [] for meth in meths: methods.append(meth.strip()) if in methods: if in list(rec.keys()) and rec[] != "": rec[] = rec[] if rec[] not in experiment_names: experiment_names.append(rec[]) if rec[] not in sids: sids.append(rec[]) fignum = 1 sample_num = 0 first_dcd_rec, first_rec, first_imag_rec = 1, 1, 1 while sample_num < len(sids): sample = sids[sample_num] print(sample, sample_num + 1, , len(sids)) B, M, Bdcd, Mdcd = [], [], [], [] Bimag, Mimag = [], [] for rec in meas_data: methcodes = rec[].split() meths = [] for meth in methcodes: meths.append(meth.strip()) if rec[] == sample and "LP-HYS" in meths: B.append(float(rec[])) M.append(float(rec[])) if first_rec == 1: e = rec[] HystRec = {} first_rec = 0 if "er_location_name" in list(rec.keys()): HystRec["er_location_name"] = rec["er_location_name"] locname = rec[].replace(, ) if "er_sample_name" in list(rec.keys()): HystRec["er_sample_name"] = rec["er_sample_name"] if "er_site_name" in list(rec.keys()): HystRec["er_site_name"] = rec["er_site_name"] if "er_synthetic_name" in list(rec.keys()) and rec[] != "": HystRec["er_synthetic_name"] = rec["er_synthetic_name"] else: HystRec["er_specimen_name"] = rec["er_specimen_name"] if rec[] == sample and "LP-IRM-DCD" in meths: Bdcd.append(float(rec[])) Mdcd.append(float(rec[])) if first_dcd_rec == 1: RemRec = {} irm_exp = rec[] first_dcd_rec = 0 if "er_location_name" in list(rec.keys()): RemRec["er_location_name"] = rec["er_location_name"] if "er_sample_name" in list(rec.keys()): RemRec["er_sample_name"] = rec["er_sample_name"] if "er_site_name" in list(rec.keys()): RemRec["er_site_name"] = rec["er_site_name"] if "er_synthetic_name" in list(rec.keys()) and rec[] != "": RemRec["er_synthetic_name"] = rec["er_synthetic_name"] else: RemRec["er_specimen_name"] = rec["er_specimen_name"] if rec[] == sample and "LP-IMAG" in meths: if first_imag_rec == 1: imag_exp = rec[] first_imag_rec = 0 Bimag.append(float(rec[])) Mimag.append(float(rec[])) if len(B) > 0: hmeths = [] for meth in meths: hmeths.append(meth) fig = plt.figure(figsize=(8, 8)) hpars, deltaM, Bdm, B, Mnorm, MadjN = iplot_hys(1, B, M, sample) ax1 = fig.add_subplot(2, 2, 1) ax1.axhline(0, color=) ax1.axvline(0, color=) ax1.plot(B, Mnorm, ) ax1.plot(B, MadjN, ) ax1.set_xlabel() ax1.set_ylabel("M/Msat") ax1.set_xlim(-1, 1) ax1.set_ylim(-1, 1) bounds = ax1.axis() n4 = + \ % (float(hpars[])) + ax1.text(bounds[1] - .9 * bounds[1], -.9, n4, fontsize=9) n1 = + \ % (float(hpars[])) + ax1.text(bounds[1] - .9 * bounds[1], -.7, n1, fontsize=9) n2 = + % (float(hpars[])) + ax1.text(bounds[1] - .9 * bounds[1], -.5, n2, fontsize=9) if in list(hpars.keys()): n3 = r + \ % (float(hpars[])) + ax1.text(bounds[1] - .9 * bounds[1], -.3, n3, fontsize=9) DdeltaM = [] Mhalf = "" for k in range(2, len(Bdm)): DdeltaM.append( old_div(abs(deltaM[k] - deltaM[k - 2]), (Bdm[k] - Bdm[k - 2]))) for k in range(len(deltaM)): if old_div(deltaM[k], deltaM[0]) < 0.5: Mhalf = k break try: Bhf = Bdm[Mhalf - 1:Mhalf + 1] Mhf = deltaM[Mhalf - 1:Mhalf + 1] poly = polyfit(Bhf, Mhf, 1) Bcr = old_div((.5 * deltaM[0] - poly[1]), poly[0]) hpars[] = % (Bcr) hpars[] = "LP-BCR-HDM" if HDD[] != 0: ax2 = fig.add_subplot(2, 2, 2) ax2.plot(Bdm, deltaM, ) ax2.set_xlabel() ax2.set_ylabel() linex = [0, Bcr, Bcr] liney = [old_div(deltaM[0], 2.), old_div(deltaM[0], 2.), 0] ax2.plot(linex, liney, ) ax3 = fig.add_subplot(2, 2, 3) ax3.plot(Bdm[(len(Bdm) - len(DdeltaM)):], DdeltaM, ) ax3.set_xlabel() ax3.set_ylabel() ax4 = fig.add_subplot(2, 2, 4) ax4.plot(Bdcd, Mdcd) ax4.yaxis.set_major_formatter(mtick.FormatStrFormatter()) ax4.axhline(0, color=) ax4.axvline(0, color=) ax4.set_xlabel() ax4.set_ylabel() except: print("not doing it") hpars[] = hpars[] = "" plt.gcf() plt.gca() plt.tight_layout() if save: plt.savefig(save_folder + + sample + + fmt) plt.show() sample_num += 1
Calculates hysteresis parameters, saves them in rmag_hysteresis format file. If selected, this function also plots hysteresis loops, delta M curves, d (Delta M)/dB curves, and IRM backfield curves. Parameters (defaults are used if not specified) ---------- path_to_file : path to directory that contains files (default is current directory, '.') hyst_file : hysteresis file (default is 'rmag_hysteresis.txt') save : boolean argument to save plots (default is False) save_folder : relative directory where plots will be saved (default is current directory, '.') fmt : format of saved figures (default is 'pdf') plots: whether or not to display the plots (default is true)
764
def is_secret_registered( self, secrethash: SecretHash, block_identifier: BlockSpecification, ) -> bool: if not self.client.can_query_state_for_block(block_identifier): raise NoStateForBlockIdentifier() block = self.get_secret_registration_block_by_secrethash( secrethash=secrethash, block_identifier=block_identifier, ) return block is not None
True if the secret for `secrethash` is registered at `block_identifier`. Throws NoStateForBlockIdentifier if the given block_identifier is older than the pruning limit
765
def import_surf_mesh(file_name): raw_content = read_file(file_name) raw_content = raw_content.split("\n") content = [] for rc in raw_content: temp = rc.strip().split() content.append(temp) if int(content[0][0]) != 3: raise TypeError("Input mesh must be 3-dimensional") surf = shortcuts.generate_surface(rational=True) surf.degree_u = int(content[1][0]) surf.degree_v = int(content[1][1]) dim_u = int(content[2][0]) dim_v = int(content[2][1]) ctrlpts_end = 5 + (dim_u * dim_v) ctrlpts_mesh = content[5:ctrlpts_end] ctrlpts = compatibility.flip_ctrlpts_u(ctrlpts_mesh, dim_u, dim_v) ctrlptsw = compatibility.generate_ctrlptsw(ctrlpts) surf.set_ctrlpts(ctrlptsw, dim_u, dim_v) surf.knotvector_u = [float(u) for u in content[3]] surf.knotvector_v = [float(v) for v in content[4]] return surf
Generates a NURBS surface object from a mesh file. :param file_name: input mesh file :type file_name: str :return: a NURBS surface :rtype: NURBS.Surface
766
def clean_whitespace(string, compact=False): for a, b in ((, ), (, ), (, ), (, ), (, )): string = string.replace(a, b) if compact: for a, b in ((, ), (, ), (, ), (, ), (, )): string = string.replace(a, b) return string.strip()
Return string with compressed whitespace.
767
def _value_and_batch_jacobian(f, x): if tf.executing_eagerly(): with tf.GradientTape() as tape: tape.watch(x) value = f(x) batch_jacobian = tape.batch_jacobian(value, x) else: value = f(x) batch_jacobian = gradients.batch_jacobian(value, x) return value, batch_jacobian
Enables uniform interface to value and batch jacobian calculation. Works in both eager and graph modes. Arguments: f: The scalar function to evaluate. x: The value at which to compute the value and the batch jacobian. Returns: A tuple (f(x), J(x)), where J(x) is the batch jacobian.
768
def create_table(table, data): fields = data[] query = indexed_fields = for key, value in fields.items(): non_case_field = value[0][0:value[0].find()] if non_case_field == : sign = value[0][value[0].find() + 1:-1:].strip() if sign == : field_type = else: field_type = bits = re.findall(, value[0])[0] field = key + + field_type + bits query += field + elif non_case_field == : field_type = field = key + + field_type query += field + elif non_case_field == : field_type = bits = re.findall(, value[0])[0] field = key + + field_type + bits query += field + if value[1] == : indexed_fields += key + query = query[:-1:] + f",date Date) ENGINE = MergeTree(date, ({indexed_fields} date), 8192)" client.execute(f"CREATE TABLE {table} {query}")
Create table with defined name and fields :return: None
769
def authenticate_request(self, method, bucket=, key=, headers=None): path = self.conn.calling_format.build_path_base(bucket, key) auth_path = self.conn.calling_format.build_auth_path(bucket, key) http_request = boto.connection.AWSAuthConnection.build_base_http_request( self.conn, method, path, auth_path, {}, headers ) http_request.authorize(connection=self.conn) return http_request
Authenticate a HTTP request by filling in Authorization field header. :param method: HTTP method (e.g. GET, PUT, POST) :param bucket: name of the bucket. :param key: name of key within bucket. :param headers: dictionary of additional HTTP headers. :return: boto.connection.HTTPRequest object with Authorization header filled (NB: will also have a Date field if none before and a User-Agent field will be set to Boto).
770
def RegisterSourceType(cls, source_type_class): if source_type_class.TYPE_INDICATOR in cls._source_type_classes: raise KeyError( .format( source_type_class.TYPE_INDICATOR)) cls._source_type_classes[source_type_class.TYPE_INDICATOR] = ( source_type_class)
Registers a source type. Source types are identified based on their type indicator. Args: source_type_class (type): source type. Raises: KeyError: if source types is already set for the corresponding type indicator.
771
def _find_usages_vpn_gateways(self): vpngws = self.conn.describe_vpn_gateways(Filters=[ { : , : [ , ] } ])[] self.limits[]._add_current_usage( len(vpngws), aws_type= )
find usage of vpn gateways
772
def post(self, *args, **kwargs): json_data = request.get_json() or {} relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data() if not in json_data: raise BadRequest(, source={: }) if isinstance(json_data[], dict): if not in json_data[]: raise BadRequest(, source={: }) if not in json_data[]: raise BadRequest(, source={: }) if json_data[][] != related_type_: raise InvalidType(, source={: }) if isinstance(json_data[], list): for obj in json_data[]: if not in obj: raise BadRequest(, source={: }) if not in obj: raise BadRequest(, source={: }) if obj[] != related_type_: raise InvalidType(, source={: }) self.before_post(args, kwargs, json_data=json_data) obj_, updated = self._data_layer.create_relationship(json_data, model_relationship_field, related_id_field, kwargs) status_code = 200 result = {: {: }} if updated is False: result = status_code = 204 final_result = self.after_post(result, status_code) return final_result
Add / create relationship(s)
773
def AIMAFile(components, mode=): "Open a file based at the AIMA root directory." import utils dir = os.path.dirname(utils.__file__) return open(apply(os.path.join, [dir] + components), mode)
Open a file based at the AIMA root directory.
774
def _in_header(self, col): if not self._has_header: icol_ex = pexdoc.exh.addex(RuntimeError, "Invalid column specification") hnf_ex = pexdoc.exh.addex(ValueError, "Column *[column_identifier]* not found") col_list = [col] if isinstance(col, (str, int)) else col for col in col_list: edata = {"field": "column_identifier", "value": col} if not self._has_header: icol_ex(not isinstance(col, int)) hnf_ex((col < 0) or (col > len(self._header) - 1), edata) else: hnf_ex( (isinstance(col, int) and ((col < 0) or (col > self._data_cols))) or ( isinstance(col, str) and (col.upper() not in self._header_upper) ), edata, ) return col_list
Validate column identifier(s).
775
def create_issue(self, data, params=None): return self._post(self.API_URL + , data=data, params=params)
Creates an issue or a sub-task from a JSON representation. You can provide two parameters in request's body: update or fields. The fields, that can be set on an issue create operation, can be determined using the /rest/api/2/issue/createmeta resource. If a particular field is not configured to appear on the issue's Create screen, then it will not be returned in the createmeta response. A field validation error will occur if such field is submitted in request. Creating a sub-task is similar to creating an issue with the following differences: issueType field must be set to a sub-task issue type (use /issue/createmeta to find sub-task issue types), and You must provide a parent field with the ID or key of the parent issue. Args: data: params: Returns:
776
def send (self, command, *args, **kwargs): status = False cmdobj = self._cmddict.create(command, *args, **kwargs) messages = [] if not cmdobj.validate(messages): for msg in messages: log.error(msg) else: encoded = cmdobj.encode() if self._verbose: size = len(cmdobj.name) pad = (size - len(cmdobj.name) + 1) * gds.hexdump(encoded, preamble=cmdobj.name + + pad) try: values = (self._host, self._port, str(cmdobj)) log.command( % values) self._socket.sendto(encoded, (self._host, self._port)) status = True with pcap.open(self.CMD_HIST_FILE, ) as output: output.write(str(cmdobj)) except socket.error as e: log.error(e.message) except IOError as e: log.error(e.message) return status
Creates, validates, and sends the given command as a UDP packet to the destination (host, port) specified when this CmdAPI was created. Returns True if the command was created, valid, and sent, False otherwise.
777
def _all_arcs(self): arcs = set() for bp in self.child_parsers(): arcs.update(bp._arcs()) return arcs
Get the set of all arcs in this code object and its children. See `_arcs` for details.
778
def add_observee_with_credentials(self, user_id, access_token=None, observee_password=None, observee_unique_id=None): path = {} data = {} params = {} path["user_id"] = user_id if observee_unique_id is not None: data["observee[unique_id]"] = observee_unique_id if observee_password is not None: data["observee[password]"] = observee_password if access_token is not None: data["access_token"] = access_token self.logger.debug("POST /api/v1/users/{user_id}/observees with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/users/{user_id}/observees".format(**path), data=data, params=params, single_item=True)
Add an observee with credentials. Register the given user to observe another user, given the observee's credentials. *Note:* all users are allowed to add their own observees, given the observee's credentials or access token are provided. Administrators can add observees given credentials, access token or the {api:UserObserveesController#update observee's id}.
779
def _check_input_files(nspc, parser): if not len(nspc.filenames) == 3: parser.print_help() msg = .format(len(nspc.filenames), .join(nspc.filenames)) raise Exception(msg) for fin in nspc.filenames: if not os.path.exists(fin): return "*{}* does not exist".format(fin) return False
check filename args. otherwise if one of the 3 filenames is bad it's hard to tell which one
780
def actnorm_center(name, x, reverse=False, init=False): shape = common_layers.shape_list(x) with tf.variable_scope(name, reuse=tf.AUTO_REUSE): assert len(shape) == 2 or len(shape) == 4 if len(shape) == 2: x_mean = tf.reduce_mean(x, [0], keepdims=True) b = get_variable_ddi("b", (1, shape[1]), initial_value=-x_mean, init=init) elif len(shape) == 4: x_mean = tf.reduce_mean(x, [0, 1, 2], keepdims=True) b = get_variable_ddi( "b", (1, 1, 1, shape[3]), initial_value=-x_mean, init=init) if not reverse: x += b else: x -= b return x
Add a bias to x. Initialize such that the output of the first minibatch is zero centered per channel. Args: name: scope x: 2-D or 4-D Tensor. reverse: Forward or backward operation. init: data-dependent initialization. Returns: x_center: (x + b), if reverse is True and (x - b) otherwise.
781
def reset(db_name): conn = psycopg2.connect(database=) db = Database(db_name) conn.autocommit = True with conn.cursor() as cursor: cursor.execute(db.drop_statement()) cursor.execute(db.create_statement()) conn.close()
Reset database.
782
def mavlink_packet(self, m): if m.get_type() == : if not self.packet_is_for_me(m): self.dropped += 1 return if self.sender is None and m.seqno == 0: if self.log_settings.verbose: print("DFLogger: Received data packet - starting new log") self.start_new_log() self.sender = (m.get_srcSystem(), m.get_srcComponent()) if self.sender is None: return if self.stopped: self.tell_sender_to_stop(m) return if self.sender is not None: size = len(m.data) data = .join(str(chr(x)) for x in m.data[:size]) ofs = size*(m.seqno) self.logfile.seek(ofs) self.logfile.write(data) if m.seqno in self.missing_blocks: if self.log_settings.verbose: print("DFLogger: Got missing block: %d" % (m.seqno,)) del self.missing_blocks[m.seqno] self.missing_found += 1 self.blocks_to_ack_and_nack.append( [self.master, m.seqno, 1, time.time(), None] ) self.acking_blocks[m.seqno] = 1 else: self.do_ack_block(m.seqno) if self.last_seqno < m.seqno: self.last_seqno = m.seqno self.download += size
handle mavlink packets
783
def _dmi_cast(key, val, clean=True): if clean and not _dmi_isclean(key, val): return elif not re.match(r, key, flags=re.IGNORECASE): if in val: val = [el.strip() for el in val.split()] else: try: val = int(val) except Exception: pass return val
Simple caster thingy for trying to fish out at least ints & lists from strings
784
def _write_plan(self, stream): if self.plan is not None: if not self._plan_written: print("1..{0}".format(self.plan), file=stream) self._plan_written = True
Write the plan line to the stream. If we have a plan and have not yet written it out, write it to the given stream.
785
def _theorem5p4(adj, ub): new_edges = set() for u, v in itertools.combinations(adj, 2): if u in adj[v]: continue if len(adj[u].intersection(adj[v])) > ub: new_edges.add((u, v)) while new_edges: for u, v in new_edges: adj[u].add(v) adj[v].add(u) new_edges = set() for u, v in itertools.combinations(adj, 2): if u in adj[v]: continue if len(adj[u].intersection(adj[v])) > ub: new_edges.add((u, v))
By Theorem 5.4, if any two vertices have ub + 1 common neighbors then we can add an edge between them.
786
def ancestors(self, cl=None, noduplicates=True): if not cl: cl = self if cl.parents(): bag = [] for x in cl.parents(): if x.uri != cl.uri: bag += [x] + self.ancestors(x, noduplicates) else: bag += [x] if noduplicates: return remove_duplicates(bag) else: return bag else: return []
returns all ancestors in the taxonomy
787
def screenshot(path=None): if not _rootinitialized: raise TDLError() if isinstance(path, str): _lib.TCOD_sys_save_screenshot(_encodeString(path)) elif path is None: filelist = _os.listdir() n = 1 filename = % n while filename in filelist: n += 1 filename = % n _lib.TCOD_sys_save_screenshot(_encodeString(filename)) else: tmpname = _os.tempnam() _lib.TCOD_sys_save_screenshot(_encodeString(tmpname)) with tmpname as tmpfile: path.write(tmpfile.read()) _os.remove(tmpname)
Capture the screen and save it as a png file. If path is None then the image will be placed in the current folder with the names: ``screenshot001.png, screenshot002.png, ...`` Args: path (Optional[Text]): The file path to save the screenshot.
788
def _clear_pattern(self): self.__interval = None self.__days_of_week = set() self.__first_day_of_week = None self.__day_of_month = None self.__month = None self.__index = self.__start_date = None self.__end_date = None self.__occurrences = None
Clears this event recurrence
789
def _is_valid_ins(self, ins_ir): invalid_instrs = [ ReilMnemonic.JCC, ReilMnemonic.UNDEF, ReilMnemonic.UNKN, ] return not any([i.mnemonic in invalid_instrs for i in ins_ir])
Check for instruction validity as a gadgets.
790
def register(self, classes=[]): if not isinstance(classes, list): raise AttributeError("plugins must be a list, not %s." % type(classes)) plugin_registered = [] for plugin_class in classes: plugin_name = plugin_class.__name__ self.register_class(plugin_class, plugin_name) self._log.debug("Plugin %s registered" % plugin_name) plugin_registered.append(plugin_name) self._log.info("Plugins registered: %s" % ", ".join(plugin_registered))
Registers new plugins. The registration only creates a new entry for a plugin inside the _classes dictionary. It does not activate or even initialise the plugin. A plugin must be a class, which inherits directly or indirectly from GwBasePattern. :param classes: List of plugin classes :type classes: list
791
def delete(self): method = _dopost(method, auth=True, photoset_id=self.id) return True
Deletes the photoset.
792
def publish_avatar_set(self, avatar_set): id_ = avatar_set.png_id done = False with (yield from self._publish_lock): if (yield from self._pep.available()): yield from self._pep.publish( namespaces.xep0084_data, avatar_xso.Data(avatar_set.image_bytes), id_=id_ ) yield from self._pep.publish( namespaces.xep0084_metadata, avatar_set.metadata, id_=id_ ) done = True if self._synchronize_vcard: my_vcard = yield from self._vcard.get_vcard() my_vcard.set_photo_data("image/png", avatar_set.image_bytes) self._vcard_id = avatar_set.png_id yield from self._vcard.set_vcard(my_vcard) self._presence_server.resend_presence() done = True if not done: raise RuntimeError( "failed to publish avatar: no protocol available" )
Make `avatar_set` the current avatar of the jid associated with this connection. If :attr:`synchronize_vcard` is true and PEP is available the vCard is only synchronized if the PEP update is successful. This means publishing the ``image/png`` avatar data and the avatar metadata set in pubsub. The `avatar_set` must be an instance of :class:`AvatarSet`. If :attr:`synchronize_vcard` is true the avatar is additionally published in the user vCard.
793
def flush(self, stats, cs_status=None): self.erase() self.display(stats, cs_status=cs_status)
Clear and update the screen. stats: Stats database to display cs_status: "None": standalone or server mode "Connected": Client is connected to the server "Disconnected": Client is disconnected from the server
794
def write(self, fptr): self._validate(writing=True) num_components = len(self.association) fptr.write(struct.pack(, 8 + 2 + num_components * 6, b)) fptr.write(struct.pack(, num_components)) for j in range(num_components): fptr.write(struct.pack( + * 3, self.index[j], self.channel_type[j], self.association[j]))
Write a channel definition box to file.
795
def find_argname(self, argname, rec=False): if self.args: return _find_arg(argname, self.args, rec) return None, None
Get the index and :class:`AssignName` node for given name. :param argname: The name of the argument to search for. :type argname: str :param rec: Whether or not to include arguments in unpacked tuples in the search. :type rec: bool :returns: The index and node for the argument. :rtype: tuple(str or None, AssignName or None)
796
async def delTrigger(self, iden): trig = self.cell.triggers.get(iden) self._trig_auth_check(trig.get()) self.cell.triggers.delete(iden)
Deletes a trigger from the cortex
797
def updateDynamics(self): history_vars_string = arg_names = list(getArgNames(self.calcDynamics)) if in arg_names: arg_names.remove() for name in arg_names: history_vars_string += \ + name + update_dict = eval( + history_vars_string + ) dynamics = self.calcDynamics(**update_dict) for var_name in self.dyn_vars: this_obj = getattr(dynamics,var_name) for this_type in self.agents: setattr(this_type,var_name,this_obj) return dynamics
Calculates a new "aggregate dynamic rule" using the history of variables named in track_vars, and distributes this rule to AgentTypes in agents. Parameters ---------- none Returns ------- dynamics : instance The new "aggregate dynamic rule" that agents believe in and act on. Should have attributes named in dyn_vars.
798
def from_pycbc(cls, fs, copy=True): return cls(fs.data, f0=0, df=fs.delta_f, epoch=fs.epoch, copy=copy)
Convert a `pycbc.types.frequencyseries.FrequencySeries` into a `FrequencySeries` Parameters ---------- fs : `pycbc.types.frequencyseries.FrequencySeries` the input PyCBC `~pycbc.types.frequencyseries.FrequencySeries` array copy : `bool`, optional, default: `True` if `True`, copy these data to a new array Returns ------- spectrum : `FrequencySeries` a GWpy version of the input frequency series
799
def _create_datadict(cls, internal_name): if internal_name == "LOCATION": return Location() if internal_name == "DESIGN CONDITIONS": return DesignConditions() if internal_name == "TYPICAL/EXTREME PERIODS": return TypicalOrExtremePeriods() if internal_name == "GROUND TEMPERATURES": return GroundTemperatures() if internal_name == "HOLIDAYS/DAYLIGHT SAVINGS": return HolidaysOrDaylightSavings() if internal_name == "COMMENTS 1": return Comments1() if internal_name == "COMMENTS 2": return Comments2() if internal_name == "DATA PERIODS": return DataPeriods() raise ValueError( "No DataDictionary known for {}".format(internal_name))
Creates an object depending on `internal_name` Args: internal_name (str): IDD name Raises: ValueError: if `internal_name` cannot be matched to a data dictionary object