code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def get_bodies(self, obj): return reverse( 'electionnight_api_body-election-list', request=self.context['request'], kwargs={'date': obj.date} )
Bodies with offices up for election on election day.
def get_node_text(self, node, depth, sep, params=False,): indent = sep * depth s = '' tag = node.tag if tag == 'Indicator': node_text = self.get_i_text(node) elif tag == 'IndicatorItem': node_text = self.get_ii_text(node) else: raise IOCParseError('Invalid node encountered: {}'.format(tag)) s += '{}{}\n'.format(indent, node_text) if params: param_text = self.get_param_text(node.attrib.get('id')) for pt in param_text: s += '{}{}\n'.format(indent+sep, pt) if node.tag == 'Indicator': for child in node.getchildren(): s += self.get_node_text(node=child, depth=depth+1, sep=sep, params=params) return s
Get the text for a given Indicator or IndicatorItem node. This does walk an IndicatorItem node to get its children text as well. :param node: Node to get the text for. :param depth: Track the number of recursions that have occured, modifies the indentation. :param sep: Seperator used for formatting the text. Multiplied by the depth to get the indentation. :param params: Boolean, set to True in order to display node parameters. :return:
def cftime_to_nptime(times): times = np.asarray(times) new = np.empty(times.shape, dtype='M8[ns]') for i, t in np.ndenumerate(times): try: dt = pd.Timestamp(t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond) except ValueError as e: raise ValueError('Cannot convert date {} to a date in the ' 'standard calendar. Reason: {}.'.format(t, e)) new[i] = np.datetime64(dt) return new
Given an array of cftime.datetime objects, return an array of numpy.datetime64 objects of the same size
def callback(self, timestamp, event_type, payload): try: data = (event_type, payload) LOG.debug('RX NOTIFICATION ==>\nevent_type: %(event)s, ' 'payload: %(payload)s\n', ( {'event': event_type, 'payload': payload})) if 'create' in event_type: pri = self._create_pri elif 'delete' in event_type: pri = self._delete_pri elif 'update' in event_type: pri = self._update_pri else: pri = self._delete_pri self._pq.put((pri, timestamp, data)) except Exception as exc: LOG.exception('Error: %(err)s for event %(event)s', {'err': str(exc), 'event': event_type})
Callback method for processing events in notification queue. :param timestamp: time the message is received. :param event_type: event type in the notification queue such as identity.project.created, identity.project.deleted. :param payload: Contains information of an event
def add(self, event, subscriber, append=True): subs = self._subscribers if event not in subs: subs[event] = deque([subscriber]) else: sq = subs[event] if append: sq.append(subscriber) else: sq.appendleft(subscriber)
Add a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be added (and called when the event is published). :param append: Whether to append or prepend the subscriber to an existing subscriber list for the event.
def image_set_aspect(aspect=1.0, axes="gca"): if axes is "gca": axes = _pylab.gca() e = axes.get_images()[0].get_extent() axes.set_aspect(abs((e[1]-e[0])/(e[3]-e[2]))/aspect)
sets the aspect ratio of the current zoom level of the imshow image
def get_peak_mem(): import resource rusage_denom = 1024. if sys.platform == 'darwin': rusage_denom = rusage_denom * rusage_denom mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom return mem
this returns peak memory use since process starts till the moment its called
def add_f90_to_env(env): try: F90Suffixes = env['F90FILESUFFIXES'] except KeyError: F90Suffixes = ['.f90'] try: F90PPSuffixes = env['F90PPFILESUFFIXES'] except KeyError: F90PPSuffixes = [] DialectAddToEnv(env, "F90", F90Suffixes, F90PPSuffixes, support_module = 1)
Add Builders and construction variables for f90 to an Environment.
async def check_response(response, valid_response_codes): if response.status == 204: return True if response.status in valid_response_codes: _js = await response.json() return _js else: raise PvApiResponseStatusError(response.status)
Check the response for correctness.
def is_imap(self, model): from pgmpy.models import BayesianModel if not isinstance(model, BayesianModel): raise TypeError("model must be an instance of BayesianModel") factors = [cpd.to_factor() for cpd in model.get_cpds()] factor_prod = six.moves.reduce(mul, factors) JPD_fact = DiscreteFactor(self.variables, self.cardinality, self.values) if JPD_fact == factor_prod: return True else: return False
Checks whether the given BayesianModel is Imap of JointProbabilityDistribution Parameters ----------- model : An instance of BayesianModel Class, for which you want to check the Imap Returns -------- boolean : True if given bayesian model is Imap for Joint Probability Distribution False otherwise Examples -------- >>> from pgmpy.models import BayesianModel >>> from pgmpy.factors.discrete import TabularCPD >>> from pgmpy.factors.discrete import JointProbabilityDistribution >>> bm = BayesianModel([('diff', 'grade'), ('intel', 'grade')]) >>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]]) >>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]]) >>> grade_cpd = TabularCPD('grade', 3, ... [[0.1,0.1,0.1,0.1,0.1,0.1], ... [0.1,0.1,0.1,0.1,0.1,0.1], ... [0.8,0.8,0.8,0.8,0.8,0.8]], ... evidence=['diff', 'intel'], ... evidence_card=[2, 3]) >>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd) >>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032, 0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128] >>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val) >>> JPD.is_imap(bm) True
def get_system_config_directory(): if platform.system().lower() == 'windows': _cfg_directory = Path(os.getenv('APPDATA') or '~') elif platform.system().lower() == 'darwin': _cfg_directory = Path('~', 'Library', 'Preferences') else: _cfg_directory = Path(os.getenv('XDG_CONFIG_HOME') or '~/.config') logger.debug('Fetching configt directory for {}.' .format(platform.system())) return _cfg_directory.joinpath(Path('mayalauncher/.config'))
Return platform specific config directory.
def sun(self, date=None, local=True, use_elevation=True): if local and self.timezone is None: raise ValueError("Local time requested but Location has no timezone set.") if self.astral is None: self.astral = Astral() if date is None: date = datetime.date.today() elevation = self.elevation if use_elevation else 0 sun = self.astral.sun_utc(date, self.latitude, self.longitude, observer_elevation=elevation) if local: for key, dt in sun.items(): sun[key] = dt.astimezone(self.tz) return sun
Returns dawn, sunrise, noon, sunset and dusk as a dictionary. :param date: The date for which to calculate the times. If no date is specified then the current date will be used. :type date: :class:`~datetime.date` :param local: True = Time to be returned in location's time zone; False = Time to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :returns: Dictionary with keys ``dawn``, ``sunrise``, ``noon``, ``sunset`` and ``dusk`` whose values are the results of the corresponding methods. :rtype: dict
def all_dims(self): return [ _get_dims(arr) if not isinstance(arr, ArrayList) else arr.all_dims for arr in self]
The dimensions for each of the arrays in this list
def fromkeys(cls, iterable, value=None): if not callable(value): return cls(dict.fromkeys(iterable, value)) return cls((key, value(key)) for key in iterable)
Create a new d from Args: iterable: Iterable containing keys value: value to associate with each key. If callable, will be value[key] Returns: new DictWrapper Example: >>> from ww import d >>> sorted(d.fromkeys('123', value=4).items()) [('1', 4), ('2', 4), ('3', 4)] >>> sorted(d.fromkeys(range(3), value=lambda e:e**2).items()) [(0, 0), (1, 1), (2, 4)]
def datastore(self, domain, data_type, mapping=None): from .tcex_datastore import TcExDataStore return TcExDataStore(self, domain, data_type, mapping)
Get instance of the DataStore module. Args: domain (str): The domain can be either "system", "organization", or "local". When using "organization" the data store can be accessed by any Application in the entire org, while "local" access is restricted to the App writing the data. The "system" option should not be used in almost all cases. data_type (str): The data type descriptor (e.g., tc:whois:cache). Returns: object: An instance of the DataStore Class.
def idle_connections(self): return [c for c in self.connections.values() if not c.busy and not c.closed]
Return a list of idle connections :rtype: list
async def get_speaker_settings(self) -> List[Setting]: speaker_settings = await self.services["audio"]["getSpeakerSettings"]({}) return [Setting.make(**x) for x in speaker_settings]
Return speaker settings.
def serialise(self, default_endianness=None): endianness = (default_endianness or DEFAULT_ENDIANNESS) if hasattr(self, '_Meta'): endianness = self._Meta.get('endianness', endianness) inferred_fields = set() for k, v in iteritems(self._type_mapping): inferred_fields |= {x._name for x in v.dependent_fields()} for field in inferred_fields: setattr(self, field, None) for k, v in iteritems(self._type_mapping): v.prepare(self, getattr(self, k)) message = b'' for k, v in iteritems(self._type_mapping): message += v.value_to_bytes(self, getattr(self, k), default_endianness=endianness) return message
Serialise a message, without including any framing. :param default_endianness: The default endianness, unless overridden by the fields or class metadata. Should usually be left at ``None``. Otherwise, use ``'<'`` for little endian and ``'>'`` for big endian. :type default_endianness: str :return: The serialised message. :rtype: bytes
def run_std_server(self): config = tf.estimator.RunConfig() server = tf.train.Server( config.cluster_spec, job_name=config.task_type, task_index=config.task_id, protocol=config.protocol) server.join()
Starts a TensorFlow server and joins the serving thread. Typically used for parameter servers. Raises: ValueError: if not enough information is available in the estimator's config to create a server.
def _value_ref(self, column, value, *, dumped=False, inner=False): ref = ":v{}".format(self.next_index) if not dumped: typedef = column.typedef for segment in path_of(column): typedef = typedef[segment] if inner: typedef = typedef.inner_typedef value = self.engine._dump(typedef, value) self.attr_values[ref] = value self.counts[ref] += 1 return ref, value
inner=True uses column.typedef.inner_type instead of column.typedef
def get_report_overview(self, month, file_path): api = self._get_api(billing.DefaultApi) month = self._month_converter(month) response = api.get_billing_report(month=month) if file_path and response: content = api.api_client.sanitize_for_serialization(response.to_dict()) with open(file_path, 'w') as fh: fh.write( json.dumps( content, sort_keys=True, indent=2, ) ) return response
Downloads a report overview :param month: month as datetime instance, or string in YYYY-MM format :type month: str or datetime :param str file_path: location to store output file :return: outcome :rtype: True or None
def binned_entropy(x, max_bins): if not isinstance(x, (np.ndarray, pd.Series)): x = np.asarray(x) hist, bin_edges = np.histogram(x, bins=max_bins) probs = hist / x.size return - np.sum(p * np.math.log(p) for p in probs if p != 0)
First bins the values of x into max_bins equidistant bins. Then calculates the value of .. math:: - \\sum_{k=0}^{min(max\\_bins, len(x))} p_k log(p_k) \\cdot \\mathbf{1}_{(p_k > 0)} where :math:`p_k` is the percentage of samples in bin :math:`k`. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param max_bins: the maximal number of bins :type max_bins: int :return: the value of this feature :return type: float
def _collect_paths(element): output = [] path = vectors.el_to_path_vector(element) root = path[0] params = element.params if element.params else None match = root.find(element.getTagName(), params) if len(match) == 1: output.append( PathCall("find", 0, [element.getTagName(), params]) ) output.extend(path_patterns.neighbours_pattern(element)) output.extend(path_patterns.predecesors_pattern(element, root)) index_backtrack = [] last_index_backtrack = [] params_backtrack = [] last_params_backtrack = [] for el in reversed(path): if not el.parent: continue tag_name = el.getTagName() match = el.parent.wfind(tag_name).childs index = match.index(el) index_backtrack.append( PathCall("wfind", index, [tag_name]) ) last_index_backtrack.append( PathCall("wfind", index - len(match), [tag_name]) ) if el.params: match = el.parent.wfind(tag_name, el.params).childs index = match.index(el) params_backtrack.append( PathCall("wfind", index, [tag_name, el.params]) ) last_params_backtrack.append( PathCall("wfind", index - len(match), [tag_name, el.params]) ) else: params_backtrack.append( PathCall("wfind", index, [tag_name]) ) last_params_backtrack.append( PathCall("wfind", index - len(match), [tag_name]) ) output.extend([ Chained(reversed(params_backtrack)), Chained(reversed(last_params_backtrack)), Chained(reversed(index_backtrack)), Chained(reversed(last_index_backtrack)), ]) return output
Collect all possible path which leads to `element`. Function returns standard path from root element to this, reverse path, which uses negative indexes for path, also some pattern matches, like "this is element, which has neighbour with id 7" and so on. Args: element (obj): HTMLElement instance. Returns: list: List of :class:`.PathCall` and :class:`.Chained` objects.
def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]: if "number" in answer and answer["number"]: return tuple([str(answer["number"])]), "number" elif "spans" in answer and answer["spans"]: return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans" elif "date" in answer: return tuple(["{0} {1} {2}".format(answer["date"]["day"], answer["date"]["month"], answer["date"]["year"])]), "date" else: raise ValueError(f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}")
Takes an answer JSON blob from the DROP data release and converts it into strings used for evaluation.
def process_messages(tag, retries=0): from furious.batcher import bump_batch from furious.batcher import MESSAGE_DEFAULT_QUEUE from furious.batcher import MessageIterator from furious.batcher import MessageProcessor from google.appengine.api import memcache if retries > 5: logging.info("Process messages hit max retry and is exiting") return message_iterator = MessageIterator(tag, MESSAGE_DEFAULT_QUEUE, 500) client = memcache.Client() stats = client.gets(tag) stats = json.loads(stats) if stats else get_default_stats() work_processed = False for message in message_iterator: work_processed = True value = int(message.get("value", 0)) color = message.get("color").lower() set_stats(stats["totals"], value) set_stats(stats["colors"][color], value) json_stats = json.dumps(stats) if not client.add(tag, json_stats): if not client.cas(tag, json_stats): raise Exception("Transaction Collision.") bump_batch(tag) if work_processed: retries = 0 else: retries += 1 processor = MessageProcessor( target=process_messages, args=("colors",), kwargs={'retries': retries}, tag="colors") processor.start()
Processes the messages pulled fromm a queue based off the tag passed in. Will insert another processor if any work was processed or the retry count is under the max retry count. Will update a aggregated stats object with the data in the payload of the messages processed. :param tag: :class: `str` Tag to query the queue on :param retry: :class: `int` Number of retries the job has processed
def RunJob(self, job): if not job.leased_until: raise LockError("CronJob must be leased for Run() to be called.") if job.leased_until < rdfvalue.RDFDatetime.Now(): raise LockError("CronJob lease expired for %s." % job.cron_job_id) logging.info("Starting cron job: %s", job.cron_job_id) if job.args.action_type == job.args.ActionType.SYSTEM_CRON_ACTION: cls_name = job.args.system_cron_action.job_class_name job_cls = registry.SystemCronJobRegistry.CronJobClassByName(cls_name) name = "%s runner" % cls_name elif job.args.action_type == job.args.ActionType.HUNT_CRON_ACTION: job_cls = registry.CronJobRegistry.CronJobClassByName("RunHunt") name = "Hunt runner" else: raise ValueError( "CronJob %s doesn't have a valid args type set." % job.cron_job_id) run_state = rdf_cronjobs.CronJobRun( cron_job_id=job.cron_job_id, status="RUNNING") run_state.GenerateRunId() run_obj = job_cls(run_state, job) wait_for_start_event, signal_event, wait_for_write_event = ( threading.Event(), threading.Event(), threading.Event()) try: self._GetThreadPool().AddTask( target=run_obj.StartRun, args=(wait_for_start_event, signal_event, wait_for_write_event), name=name, blocking=False, inline=False) if not wait_for_start_event.wait(TASK_STARTUP_WAIT): logging.error("Cron job run task for %s is too slow to start.", job.cron_job_id) return False signal_event.set() wait_for_write_event.wait(TASK_STARTUP_WAIT) return True except threadpool.Full: return False
Does the actual work of the Cron, if the job is due to run. Args: job: The cronjob rdfvalue that should be run. Must be leased. Returns: A boolean indicating if this cron job was started or not. False may be returned when the threadpool is already full. Raises: LockError: if the object is not locked. ValueError: If the job argument is invalid.
def _write(self, context, report_dir, report_name, assets_dir=None, template=None): if template is None: template = self._get_template() report = template.render(context) output_file = os.path.join(report_dir, report_name) with open(output_file, 'w', encoding='utf-8') as fh: fh.write(report) if assets_dir: self._copy_static_assets(assets_dir)
Writes the data in `context` in the report's template to `report_name` in `report_dir`. If `assets_dir` is supplied, copies all assets for this report to the specified directory. If `template` is supplied, uses that template instead of automatically finding it. This is useful if a single report generates multiple files using the same template. :param context: context data to render within the template :type context: `dict` :param report_dir: directory to write the report to :type report_dir: `str` :param report_name: name of file to write the report to :type report_name: `str` :param assets_dir: optional directory to output report assets to :type assets_dir: `str` :param template: template to render and output :type template: `jinja2.Template`
def _depth_image_callback(self, image_msg): encoding = image_msg.encoding try: depth_arr = self._bridge.imgmsg_to_cv2(image_msg, encoding) import pdb; pdb.set_trace() except CvBridgeError as e: rospy.logerr(e) depth = np.array(depth_arr*MM_TO_METERS, np.float32) self._cur_depth_im = DepthImage(depth, self._frame)
subscribe to depth image topic and keep it up to date
def _check_for_default_values(fname, arg_val_dict, compat_args): for key in arg_val_dict: try: v1 = arg_val_dict[key] v2 = compat_args[key] if (v1 is not None and v2 is None) or \ (v1 is None and v2 is not None): match = False else: match = (v1 == v2) if not is_bool(match): raise ValueError("'match' is not a boolean") except ValueError: match = (arg_val_dict[key] is compat_args[key]) if not match: raise ValueError(("the '{arg}' parameter is not " "supported in the pandas " "implementation of {fname}()". format(fname=fname, arg=key)))
Check that the keys in `arg_val_dict` are mapped to their default values as specified in `compat_args`. Note that this function is to be called only when it has been checked that arg_val_dict.keys() is a subset of compat_args
def _webdav_move_copy(self, remote_path_source, remote_path_target, operation): if operation != "MOVE" and operation != "COPY": return False if remote_path_target[-1] == '/': remote_path_target += os.path.basename(remote_path_source) if not (remote_path_target[0] == '/'): remote_path_target = '/' + remote_path_target remote_path_source = self._normalize_path(remote_path_source) headers = { 'Destination': self._webdav_url + parse.quote( self._encode_string(remote_path_target)) } return self._make_dav_request( operation, remote_path_source, headers=headers )
Copies or moves a remote file or directory :param remote_path_source: source file or folder to copy / move :param remote_path_target: target file to which to copy / move :param operation: MOVE or COPY :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
def cookie_length(self, domain): cookies = self.cookie_jar._cookies if domain not in cookies: return 0 length = 0 for path in cookies[domain]: for name in cookies[domain][path]: cookie = cookies[domain][path][name] length += len(path) + len(name) + len(cookie.value or '') return length
Return approximate length of all cookie key-values for a domain.
def _drop_gracefully(self): mr_id = self.request.get("mapreduce_id") logging.error("Failed to kick off job %s", mr_id) state = model.MapreduceState.get_by_job_id(mr_id) if not self._check_mr_state(state, mr_id): return config = util.create_datastore_write_config(state.mapreduce_spec) model.MapreduceControl.abort(mr_id, config=config) state.active = False state.result_status = model.MapreduceState.RESULT_FAILED ControllerCallbackHandler._finalize_job(state.mapreduce_spec, state)
See parent.
def enumerate(vendor_id=0, product_id=0): info = hidapi.hid_enumerate(vendor_id, product_id) while info: yield DeviceInfo(info) info = info.next hidapi.hid_free_enumeration(info)
Enumerate the HID Devices. Returns a generator that yields all of the HID devices attached to the system. :param vendor_id: Only return devices which match this vendor id :type vendor_id: int :param product_id: Only return devices which match this product id :type product_id: int :return: Generator that yields informations about attached HID devices :rval: generator(DeviceInfo)
def inv(self): if self.det == 0: raise ValueError("SquareTensor is non-invertible") return SquareTensor(np.linalg.inv(self))
shorthand for matrix inverse on SquareTensor
def new_approve_transaction(self, asset: str, b58_send_address: str, b58_recv_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: if not isinstance(b58_send_address, str) or not isinstance(b58_recv_address, str): raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.')) if len(b58_send_address) != 34 or len(b58_recv_address) != 34: raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) contract_address = self.get_asset_address(asset) raw_send = Address.b58decode(b58_send_address).to_bytes() raw_recv = Address.b58decode(b58_recv_address).to_bytes() raw_payer = Address.b58decode(b58_payer_address).to_bytes() args = {"from": raw_send, "to": raw_recv, "amount": amount} invoke_code = build_native_invoke_code(contract_address, b'\x00', 'approve', args) return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, raw_payer, invoke_code, bytearray(), list())
This interface is used to generate a Transaction object for approve. :param asset: a string which is used to indicate which asset we want to approve. :param b58_send_address: a base58 encode address which indicate where the approve from. :param b58_recv_address: a base58 encode address which indicate where the approve to. :param amount: the amount of asset that will be approved. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for approve.
def _DecodeUnknownMessages(message, encoded_message, pair_type): field_type = pair_type.value.type new_values = [] all_field_names = [x.name for x in message.all_fields()] for name, value_dict in six.iteritems(encoded_message): if name in all_field_names: continue value = PyValueToMessage(field_type, value_dict) if pair_type.value.repeated: value = _AsMessageList(value) new_pair = pair_type(key=name, value=value) new_values.append(new_pair) return new_values
Process unknown fields in encoded_message of a message type.
def coinc(self, s0, s1, slide, step): loglr = - s0 - s1 threshes = [self.fits_by_tid[i]['thresh'] for i in self.ifos] loglr += sum([t**2. / 2. for t in threshes]) return (2. * loglr) ** 0.5
Calculate the final coinc ranking statistic
def register_service(self, name: str, service: Any): if not isinstance(service, type): if hasattr(service, '__class__'): _ensure_service_name(service.__class__, name) self.services[name] = service return if self._services_initialized: from warnings import warn warn('Services have already been initialized. Please register ' f'{name} sooner.') return self._services_registry[_ensure_service_name(service, name)] = service
Method to register a service.
def filters(self): results = [] filters = self.qs.get('filter') if filters is not None: try: results.extend(json.loads(filters)) except (ValueError, TypeError): raise InvalidFilters("Parse error") if self._get_key_values('filter['): results.extend(self._simple_filters(self._get_key_values('filter['))) return results
Return filters from query string. :return list: filter information
def getLogger(cls): if cls.logger is None: with cls.lock: if cls.logger is None: cls.logger = logging.getLogger('toil-rt') try: level = os.environ[cls.envPrefix + 'LEVEL'] except KeyError: cls.logger.setLevel(logging.CRITICAL) else: toil.lib.bioio.setLogLevel(level, cls.logger) try: address = os.environ[cls.envPrefix + 'ADDRESS'] except KeyError: pass else: host, port = address.split(':') cls.logger.addHandler(JSONDatagramHandler(host, int(port))) return cls.logger
Get the logger that logs real-time to the leader. Note that if the returned logger is used on the leader, you will see the message twice, since it still goes to the normal log handlers, too.
def list(self, allJobs=False): jobs = self.server.call('get', '/job') return [Job(job['id'], self.server) for job in jobs if allJobs or self._job_owned(job)]
Return list of jobs at this endpoint. Call get(allJobs=True) to see all jobs, not just the ones managed by this Client
def nla_for_each_attr(head, len_, rem): pos = head rem.value = len_ while nla_ok(pos, rem): yield pos pos = nla_next(pos, rem)
Iterate over a stream of attributes. https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/attr.h#L262 Positional arguments: head -- first nlattr with more in its bytearray payload (nlattr class instance). len_ -- length of attribute stream (integer). rem -- initialized to len, holds bytes currently remaining in stream (c_int). Returns: Generator yielding nlattr instances.
def _op(self, line, op=None, offset=0): if op is None: op = self.op_count[line] return "line{}_gate{}".format(line, op + offset)
Returns the gate name for placing a gate on a line. :param int line: Line number. :param int op: Operation number or, by default, uses the current op count. :return: Gate name. :rtype: string
def approxQuantile(self, col, probabilities, relativeError): if not isinstance(col, (basestring, list, tuple)): raise ValueError("col should be a string, list or tuple, but got %r" % type(col)) isStr = isinstance(col, basestring) if isinstance(col, tuple): col = list(col) elif isStr: col = [col] for c in col: if not isinstance(c, basestring): raise ValueError("columns should be strings, but got %r" % type(c)) col = _to_list(self._sc, col) if not isinstance(probabilities, (list, tuple)): raise ValueError("probabilities should be a list or tuple") if isinstance(probabilities, tuple): probabilities = list(probabilities) for p in probabilities: if not isinstance(p, (float, int, long)) or p < 0 or p > 1: raise ValueError("probabilities should be numerical (float, int, long) in [0,1].") probabilities = _to_list(self._sc, probabilities) if not isinstance(relativeError, (float, int, long)) or relativeError < 0: raise ValueError("relativeError should be numerical (float, int, long) >= 0.") relativeError = float(relativeError) jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError) jaq_list = [list(j) for j in jaq] return jaq_list[0] if isStr else jaq_list
Calculates the approximate quantiles of numerical columns of a DataFrame. The result of this algorithm has the following deterministic bound: If the DataFrame has N elements and if we request the quantile at probability `p` up to error `err`, then the algorithm will return a sample `x` from the DataFrame so that the *exact* rank of `x` is close to (p * N). More precisely, floor((p - err) * N) <= rank(x) <= ceil((p + err) * N). This method implements a variation of the Greenwald-Khanna algorithm (with some speed optimizations). The algorithm was first present in [[https://doi.org/10.1145/375663.375670 Space-efficient Online Computation of Quantile Summaries]] by Greenwald and Khanna. Note that null values will be ignored in numerical columns before calculation. For columns only containing null values, an empty list is returned. :param col: str, list. Can be a single column name, or a list of names for multiple columns. :param probabilities: a list of quantile probabilities Each number must belong to [0, 1]. For example 0 is the minimum, 0.5 is the median, 1 is the maximum. :param relativeError: The relative target precision to achieve (>= 0). If set to zero, the exact quantiles are computed, which could be very expensive. Note that values greater than 1 are accepted but give the same result as 1. :return: the approximate quantiles at the given probabilities. If the input `col` is a string, the output is a list of floats. If the input `col` is a list or tuple of strings, the output is also a list, but each element in it is a list of floats, i.e., the output is a list of list of floats. .. versionchanged:: 2.2 Added support for multiple columns.
def base_domain_matches(d1, d2): r1 = tldextract.extract(d1) r2 = tldextract.extract(d2) return r1.domain == r2.domain and r1.suffix == r2.suffix
Check if two domains have the same base domain, using the Public Suffix List. >>> base_domain_matches('https://hasjob.co', 'hasjob.co') True >>> base_domain_matches('hasgeek.hasjob.co', 'hasjob.co') True >>> base_domain_matches('hasgeek.com', 'hasjob.co') False >>> base_domain_matches('static.hasgeek.co.in', 'hasgeek.com') False >>> base_domain_matches('static.hasgeek.co.in', 'hasgeek.co.in') True >>> base_domain_matches('[email protected]', 'example.com') True
def put_many(self, items): for key, value in items: self.put(key, value)
Put many key-value pairs. This method may take advantage of performance or atomicity features of the underlying store. It does not guarantee that all items will be set in the same transaction, only that transactions may be used for performance. :param items: An iterable producing (key, value) tuples.
def set_version_state(name,object=None,delete=False): if env.project_fullname: state_name = '-'.join([env.project_fullname,name]) else: state_name = name with fab_settings(warn_only=True): if not exists('/var/local/woven', use_sudo=True): sudo('mkdir /var/local/woven') if not delete: sudo('touch /var/local/woven/%s'% state_name) if object <> None: fd, file_path = tempfile.mkstemp() f = os.fdopen(fd,'w') f.write(json.dumps(object)) f.close() put(file_path,'/tmp/%s'% state_name) os.remove(file_path) sudo('cp /tmp/%s /var/local/woven/%s'% (state_name,state_name)) else: sudo('rm -f /var/local/woven/%s'% state_name) return state_name
Sets a simple 'state' on the server by creating a file with the desired state's name + version and storing ``content`` as json strings if supplied returns the filename used to store state
def build_swagger(graph, ns, operations): base_path = graph.build_route_path(ns.path, ns.prefix) schema = swagger.Swagger( swagger="2.0", info=swagger.Info( title=graph.metadata.name, version=ns.version, ), consumes=swagger.MediaTypeList([ swagger.MimeType("application/json"), ]), produces=swagger.MediaTypeList([ swagger.MimeType("application/json"), ]), basePath=base_path, paths=swagger.Paths(), definitions=swagger.Definitions(), ) add_paths(schema.paths, base_path, operations) add_definitions(schema.definitions, operations) try: schema.validate() except Exception: logger.exception("Swagger definition did not validate against swagger schema") raise return schema
Build out the top-level swagger definition.
def get_subgraph_by_annotation_value(graph, annotation, values): if isinstance(values, str): values = {values} return get_subgraph_by_annotations(graph, {annotation: values})
Induce a sub-graph over all edges whose annotations match the given key and value. :param pybel.BELGraph graph: A BEL graph :param str annotation: The annotation to group by :param values: The value(s) for the annotation :type values: str or iter[str] :return: A subgraph of the original BEL graph :rtype: pybel.BELGraph
def release(ctx, version): invoke.run("git tag -s {0} -m '{0} release'".format(version)) invoke.run("git push --tags") invoke.run("python setup.py sdist") invoke.run("twine upload -s dist/PyNaCl-{0}* ".format(version)) session = requests.Session() token = getpass.getpass("Input the Jenkins token: ") response = session.post( "{0}/build".format(JENKINS_URL), params={ "cause": "Building wheels for {0}".format(version), "token": token } ) response.raise_for_status() wait_for_build_completed(session) paths = download_artifacts(session) invoke.run("twine upload {0}".format(" ".join(paths)))
``version`` should be a string like '0.4' or '1.0'.
def fmt_to_datatype_v4(fmt, shape, array=False): size = fmt.itemsize * 8 if not array and shape[1:] and fmt.itemsize == 1 and fmt.kind == "u": data_type = v4c.DATA_TYPE_BYTEARRAY for dim in shape[1:]: size *= dim else: if fmt.kind == "u": if fmt.byteorder in "=<|": data_type = v4c.DATA_TYPE_UNSIGNED_INTEL else: data_type = v4c.DATA_TYPE_UNSIGNED_MOTOROLA elif fmt.kind == "i": if fmt.byteorder in "=<|": data_type = v4c.DATA_TYPE_SIGNED_INTEL else: data_type = v4c.DATA_TYPE_SIGNED_MOTOROLA elif fmt.kind == "f": if fmt.byteorder in "=<": data_type = v4c.DATA_TYPE_REAL_INTEL else: data_type = v4c.DATA_TYPE_REAL_MOTOROLA elif fmt.kind in "SV": data_type = v4c.DATA_TYPE_STRING_LATIN_1 elif fmt.kind == "b": data_type = v4c.DATA_TYPE_UNSIGNED_INTEL size = 1 else: message = f"Unknown type: dtype={fmt}, shape={shape}" logger.exception(message) raise MdfException(message) return data_type, size
convert numpy dtype format string to mdf version 4 channel data type and size Parameters ---------- fmt : numpy.dtype numpy data type shape : tuple numpy array shape array : bool disambiguate between bytearray and channel array Returns ------- data_type, size : int, int integer data type as defined by ASAM MDF and bit size
def execute(self, input_data): if (input_data['meta']['type_tag'] != 'zip'): return {'error': self.__class__.__name__+': called on '+input_data['meta']['type_tag']} view = {} view['payload_md5s'] = input_data['unzip']['payload_md5s'] view['yara_sigs'] = input_data['yara_sigs']['matches'].keys() view.update(input_data['meta']) view['payload_meta'] = [self.workbench.work_request('meta', md5) for md5 in input_data['unzip']['payload_md5s']] return view
Execute the ViewZip worker
def add_flag(var, flag): if var: var = flag + " " + str(var) else: var = "" return var
for use when calling command-line scripts from withing a program. if a variable is present, add its proper command_line flag. return a string.
def set_deployment_run_name(self): log = logging.getLogger(self.cls_logger + '.set_deployment_run_name') self.deployment_run_name = self.get_value('cons3rt.deploymentRun.name') log.info('Found deployment run name: {n}'.format(n=self.deployment_run_name))
Sets the deployment run name from deployment properties :return: None
def write_elements(elements, mass_erase_used, progress=None): mem_layout = get_memory_layout(__dev) for elem in elements: addr = elem['addr'] size = elem['size'] data = elem['data'] elem_size = size elem_addr = addr if progress: progress(elem_addr, 0, elem_size) while size > 0: write_size = size if not mass_erase_used: for segment in mem_layout: if addr >= segment['addr'] and \ addr <= segment['last_addr']: page_size = segment['page_size'] page_addr = addr & ~(page_size - 1) if addr + write_size > page_addr + page_size: write_size = page_addr + page_size - addr page_erase(page_addr) break write_memory(addr, data[:write_size], progress, elem_addr, elem_size) data = data[write_size:] addr += write_size size -= write_size if progress: progress(elem_addr, addr - elem_addr, elem_size)
Writes the indicated elements into the target memory, erasing as needed.
def path_to_pattern(path, metadata=None): if not isinstance(path, str): return pattern = path if metadata: cache = metadata.get('cache') if cache: regex = next(c.get('regex') for c in cache if c.get('argkey') == 'urlpath') pattern = pattern.split(regex)[-1] return pattern
Remove source information from path when using chaching Returns None if path is not str Parameters ---------- path : str Path to data optionally containing format_strings metadata : dict, optional Extra arguments to the class, contains any cache information Returns ------- pattern : str Pattern style path stripped of everything to the left of cache regex.
def load_module(self, module): m_ref = self._modules_map.get(module) if m_ref is None: raise LoaderError('Module "{0}" was not found'.format(module)) mod = importlib.import_module('ansible.modules{0}'.format( '.'.join([elm.split('.')[0] for elm in m_ref.split(os.path.sep)]))) return mod
Introspect Ansible module. :param module: :return:
def open_file(link, session=None, stream=True): if not isinstance(link, six.string_types): try: link = link.url_without_fragment except AttributeError: raise ValueError("Cannot parse url from unkown type: {0!r}".format(link)) if not is_valid_url(link) and os.path.exists(link): link = path_to_url(link) if is_file_url(link): local_path = url_to_path(link) if os.path.isdir(local_path): raise ValueError("Cannot open directory for read: {}".format(link)) else: with io.open(local_path, "rb") as local_file: yield local_file else: headers = {"Accept-Encoding": "identity"} if not session: from requests import Session session = Session() with session.get(link, headers=headers, stream=stream) as resp: try: raw = getattr(resp, "raw", None) result = raw if raw else resp yield result finally: if raw: conn = getattr(raw, "_connection") if conn is not None: conn.close() result.close()
Open local or remote file for reading. :type link: pip._internal.index.Link or str :type session: requests.Session :param bool stream: Try to stream if remote, default True :raises ValueError: If link points to a local directory. :return: a context manager to the opened file-like object
def solar_azimuth_analytical(latitude, hourangle, declination, zenith): numer = (np.cos(zenith) * np.sin(latitude) - np.sin(declination)) denom = (np.sin(zenith) * np.cos(latitude)) with np.errstate(invalid='ignore', divide='ignore'): cos_azi = numer / denom cos_azi = \ np.where(np.isclose(denom, 0.0, rtol=0.0, atol=1e-8), 1.0, cos_azi) cos_azi = \ np.where(np.isclose(cos_azi, 1.0, rtol=0.0, atol=1e-8), 1.0, cos_azi) cos_azi = \ np.where(np.isclose(cos_azi, -1.0, rtol=0.0, atol=1e-8), -1.0, cos_azi) with np.errstate(invalid='ignore'): sign_ha = np.sign(hourangle) return sign_ha * np.arccos(cos_azi) + np.pi
Analytical expression of solar azimuth angle based on spherical trigonometry. Parameters ---------- latitude : numeric Latitude of location in radians. hourangle : numeric Hour angle in the local solar time in radians. declination : numeric Declination of the sun in radians. zenith : numeric Solar zenith angle in radians. Returns ------- azimuth : numeric Solar azimuth angle in radians. References ---------- [1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal Processes, 3rd Edition" pp. 14, J. Wiley and Sons, New York (2006) [2] J. H. Seinfeld and S. N. Pandis, "Atmospheric Chemistry and Physics" p. 132, J. Wiley (1998) [3] `Wikipedia: Solar Azimuth Angle <https://en.wikipedia.org/wiki/Solar_azimuth_angle>`_ [4] `PVCDROM: Azimuth Angle <http://www.pveducation.org/pvcdrom/2- properties-sunlight/azimuth-angle>`_ See Also -------- declination_spencer71 declination_cooper69 hour_angle solar_zenith_analytical
def unmount_path(path, force=False): r = util.subp(['umount', path]) if not force: if r.return_code != 0: raise ValueError(r.stderr)
Unmounts the directory specified by path.
def print_help(self): seen_aliases = set() print('-'*80) for cmd in sorted(self.cmds): if cmd not in self.builtin_cmds: if cmd not in seen_aliases: if cmd in self.aliases: seen_aliases.update(self.aliases[cmd]) disp = '/'.join(self.aliases[cmd]) else: disp = cmd _, parser = self.cmds[cmd] usage = parser.format_usage() print('%s: %s' % (disp, ' '.join(usage.split()[2:]))) print('External CLIs: %s' % ', '.join(sorted(self.clis)))
Prints usage of all registered commands, collapsing aliases into one record
def random_variant(variants, weights): total = 0 accumulator = [] for w in weights: total += w accumulator.append(total) r = randint(0, total - 1) yield variants[bisect(accumulator, r)]
A generator that, given a list of variants and a corresponding list of weights, returns one random weighted selection.
def include(self, target): if self._clean.isDict(): return self._wrap(target in self.obj.values()) else: return self._wrap(target in self.obj)
Determine if a given value is included in the array or object using `is`.
def mesh(self): triangles = np.empty((self.f.shape[0], 4)) triangles[:, -3:] = self.f triangles[:, 0] = 3 return vtki.PolyData(self.v, triangles, deep=False)
Return the surface mesh
def tagAttributes(fdef_master_list,node,depth=0): if type(node)==list: for i in node: depth+=1 tagAttributes(fdef_master_list,i,depth) if type(node)==dict: for x in fdef_master_list: if jsName(x.path,x.name)==node['name']: node['path']=x.path node['depth']=depth if "children" not in node: node["size"]=x.weight for i in node.values(): depth+=1 tagAttributes(fdef_master_list,i,depth) return node
recursively tag objects with sizes, depths and path names
def _ensure_exists(wrapped): @functools.wraps(wrapped) def check_exists(name, *args, **kwargs): if not exists(name): raise CommandExecutionError( 'Container \'{0}\' does not exist'.format(name) ) return wrapped(name, *args, **salt.utils.args.clean_kwargs(**kwargs)) return check_exists
Decorator to ensure that the named container exists.
def max_pool(x_input, pool_size): return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], strides=[1, pool_size, pool_size, 1], padding='SAME')
max_pool downsamples a feature map by 2X.
def parse_task_runtime(self, runtime_subAST): runtime_attributes = OrderedDict() if isinstance(runtime_subAST, wdl_parser.Terminal): raise NotImplementedError elif isinstance(runtime_subAST, wdl_parser.Ast): raise NotImplementedError elif isinstance(runtime_subAST, wdl_parser.AstList): for ast in runtime_subAST: key = self.parse_task_runtime_key(ast.attr('key')) value = self.parse_declaration_expressn(ast.attr('value'), es='') if value.startswith('"'): value = self.translate_wdl_string_to_python_string(value[1:-1]) runtime_attributes[key] = value return runtime_attributes
Parses the runtime section of the WDL task AST subtree. The task "runtime" section currently supports context fields for a docker container, CPU resources, RAM resources, and disk resources. :param runtime_subAST: A subAST representing runtime parameters. :return: A list=[] of runtime attributes, for example: runtime_attributes = [('docker','quay.io/encode-dcc/map:v1.0'), ('cpu','2'), ('memory','17.1 GB'), ('disks','local-disk 420 HDD')]
def generate_tensor_filename(self, field_name, file_num, compressed=True): file_ext = TENSOR_EXT if compressed: file_ext = COMPRESSED_TENSOR_EXT filename = os.path.join(self.filename, 'tensors', '%s_%05d%s' %(field_name, file_num, file_ext)) return filename
Generate a filename for a tensor.
def validate_request_certificate(headers, data): if 'SignatureCertChainUrl' not in headers or \ 'Signature' not in headers: log.error('invalid request headers') return False cert_url = headers['SignatureCertChainUrl'] sig = base64.b64decode(headers['Signature']) cert = _get_certificate(cert_url) if not cert: return False try: crypto.verify(cert, sig, data, 'sha1') return True except: log.error('invalid request signature') return False
Ensure that the certificate and signature specified in the request headers are truely from Amazon and correctly verify. Returns True if certificate verification succeeds, False otherwise. :param headers: Dictionary (or sufficiently dictionary-like) map of request headers. :param data: Raw POST data attached to this request.
def addApplication(self, name, version=None, path=None, disk_num=0, soft=-1): fapp = Features() fapp.features.append(Feature("name", "=", name)) if version: fapp.features.append(Feature("version", "=", version)) if path: fapp.features.append(Feature("path", "=", path)) self.features.append(Feature("disk.%d.applications" % disk_num, "contains", fapp, soft > 0))
Add a new application in some disk.
def _delete_entry_file(self, entry_name=None, entry=None): if entry_name is None and entry is None: raise RuntimeError("Either `entry_name` or `entry` must be given.") elif entry_name is not None and entry is not None: raise RuntimeError("Cannot use both `entry_name` and `entry`.") if entry_name is not None: entry = self.entries[entry_name] else: entry_name = entry[ENTRY.NAME] entry_filename = self.entry_filename(entry_name) if self.args.write_entries: self.log.info("Deleting entry file '{}' of entry '{}'".format( entry_filename, entry_name)) if not os.path.exists(entry_filename): self.log.error( "Filename '{}' does not exist".format(entry_filename)) os.remove(entry_filename) else: self.log.debug("Not deleting '{}' because `write_entries`" " is False".format(entry_filename)) return
Delete the file associated with the given entry.
def build_attributes(cls, attributes, namespace): config_path = attributes.get('config_path') tokens = {} def build_config_key(value_def, config_key): key = value_def.config_key or config_key return '%s.%s' % (config_path, key) if config_path else key def build_token(name, value_def): config_key = build_config_key(value_def, name) value_token = ValueToken.from_definition( value_def, namespace, config_key) getters.register_value_proxy(namespace, value_token, value_def.help) tokens[name] = value_token return name, build_property(value_token) def build_attr(name, attribute): if not isinstance(attribute, ValueTypeDefinition): return name, attribute return build_token(name, attribute) attributes = dict(build_attr(*item) for item in six.iteritems(attributes)) attributes['_tokens'] = tokens return attributes
Return an attributes dictionary with ValueTokens replaced by a property which returns the config value.
def interpolate(self, factor, minInfo, maxInfo, round=True, suppressError=True): factor = normalizers.normalizeInterpolationFactor(factor) if not isinstance(minInfo, BaseInfo): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % (self.__class__.__name__, minInfo.__class__.__name__)) if not isinstance(maxInfo, BaseInfo): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % (self.__class__.__name__, maxInfo.__class__.__name__)) round = normalizers.normalizeBoolean(round) suppressError = normalizers.normalizeBoolean(suppressError) self._interpolate(factor, minInfo, maxInfo, round=round, suppressError=suppressError)
Interpolate all pairs between minInfo and maxInfo. The interpolation occurs on a 0 to 1.0 range where minInfo is located at 0 and maxInfo is located at 1.0. factor is the interpolation value. It may be less than 0 and greater than 1.0. It may be a number (integer, float) or a tuple of two numbers. If it is a tuple, the first number indicates the x factor and the second number indicates the y factor. round indicates if the result should be rounded to integers. suppressError indicates if incompatible data should be ignored or if an error should be raised when such incompatibilities are found.
def _next_id(self): id_str_lst = self.xpath('./p:sldId/@id') return max([255]+[int(id_str) for id_str in id_str_lst])+1
Return the next available slide ID as an int. Valid slide IDs start at 256. The next integer value greater than the max value in use is chosen, which minimizes that chance of reusing the id of a deleted slide.
def start(self, timeout=None): self.thread.start() start_time = time.time() if not timeout: timeout = self.timeout while start_time + timeout > time.time(): self.thread.join(1) if self.started: return True if self.error: return False return False
Start running the command
def _aix_get_machine_id(): grains = {} cmd = salt.utils.path.which('lsattr') if cmd: data = __salt__['cmd.run']('{0} -El sys0'.format(cmd)) + os.linesep uuid_regexes = [re.compile(r'(?im)^\s*os_uuid\s+(\S+)\s+(.*)')] for regex in uuid_regexes: res = regex.search(data) if res and len(res.groups()) >= 1: grains['machine_id'] = res.group(1).strip() break else: log.error('The \'lsattr\' binary was not found in $PATH.') return grains
Parse the output of lsattr -El sys0 for os_uuid
def _fill(self, direction, limit=None): if limit is None: limit = -1 return self._get_cythonized_result('group_fillna_indexer', self.grouper, needs_mask=True, cython_dtype=np.int64, result_is_index=True, direction=direction, limit=limit)
Shared function for `pad` and `backfill` to call Cython method. Parameters ---------- direction : {'ffill', 'bfill'} Direction passed to underlying Cython function. `bfill` will cause values to be filled backwards. `ffill` and any other values will default to a forward fill limit : int, default None Maximum number of consecutive values to fill. If `None`, this method will convert to -1 prior to passing to Cython Returns ------- `Series` or `DataFrame` with filled values See Also -------- pad backfill
def getWorkflowDir(workflowID, configWorkDir=None): workDir = configWorkDir or os.getenv('TOIL_WORKDIR') or tempfile.gettempdir() if not os.path.exists(workDir): raise RuntimeError("The directory specified by --workDir or TOIL_WORKDIR (%s) does not " "exist." % workDir) workflowDir = os.path.join(workDir, 'toil-%s-%s' % (workflowID, getNodeID())) try: os.mkdir(workflowDir) except OSError as err: if err.errno != 17: raise else: logger.debug('Created the workflow directory at %s' % workflowDir) return workflowDir
Returns a path to the directory where worker directories and the cache will be located for this workflow. :param str workflowID: Unique identifier for the workflow :param str configWorkDir: Value passed to the program using the --workDir flag :return: Path to the workflow directory :rtype: str
def pad(x, p=3): return tf.pad(x, [[0, 0], [0, 0], [p, p], [p, p]])
Pad tensor in H, W Remarks: TensorFlow uses "ceil(input_spatial_shape[i] / strides[i])" rather than explicit padding like Caffe, pyTorch does. Hence, we need to pad here beforehand. Args: x (tf.tensor): incoming tensor p (int, optional): padding for H, W Returns: tf.tensor: padded tensor
def Update(self, data): m = len(data) self.params[:m] += data
Updates a Dirichlet distribution. data: sequence of observations, in order corresponding to params
def build_time(start_time): diff_time = round(time.time() - start_time, 2) if diff_time <= 59.99: sum_time = str(diff_time) + " Sec" elif diff_time > 59.99 and diff_time <= 3599.99: sum_time = round(diff_time / 60, 2) sum_time_list = re.findall(r"\d+", str(sum_time)) sum_time = ("{0} Min {1} Sec".format(sum_time_list[0], sum_time_list[1])) elif diff_time > 3599.99: sum_time = round(diff_time / 3600, 2) sum_time_list = re.findall(r"\d+", str(sum_time)) sum_time = ("{0} Hours {1} Min".format(sum_time_list[0], sum_time_list[1])) return sum_time
Calculate build time per package
def tag_and_push(context): tag_option = '--annotate' if probe.has_signing_key(context): tag_option = '--sign' shell.dry_run( TAG_TEMPLATE % (tag_option, context.new_version, context.new_version), context.dry_run, ) shell.dry_run('git push --tags', context.dry_run)
Tags your git repo with the new version number
def _openapi_redoc(self): return render_template('openapi/redoc.html', title=self.app.config.API_TITLE or self.app.name, redoc_url=self.app.config.API_REDOC_SOURCE_URL)
Expose OpenAPI spec with ReDoc The ReDoc script URL can be specified as ``API_REDOC_SOURCE_URL``
def get_data_for_sensors(macs=[], search_duratio_sec=5, bt_device=''): log.info('Get latest data for sensors. Stop with Ctrl+C.') log.info('Stops automatically in %ss', search_duratio_sec) log.info('MACs: %s', macs) datas = dict() for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, search_duratio_sec, bt_device=bt_device): datas[new_data[0]] = new_data[1] return datas
Get lates data for sensors in the MAC's list. Args: macs (array): MAC addresses search_duratio_sec (int): Search duration in seconds. Default 5 bt_device (string): Bluetooth device id Returns: dict: MAC and state of found sensors
def _get_last_dirs(path, num=1): head, tail = os.path.split(path) last_path = str(tail) for ii in range(num): head, tail = os.path.split(head) last_path = os.path.join(tail, last_path) last_path = "..." + last_path return last_path
Get a path including only the trailing `num` directories. Returns ------- last_path : str
def predictions(self, setup, n_jobs=-1): stimuli, inhibitors, readouts = setup.stimuli, setup.inhibitors, setup.readouts nc = len(setup.cues()) predictions = np.zeros((len(self), 2**nc, len(setup))) predictions[:, :, :] = Parallel(n_jobs=n_jobs)(delayed(__parallel_predictions__)(n, list(setup.clampings_iter(setup.cues())), readouts, stimuli, inhibitors) for n in self) avg = np.average(predictions[:, :, nc:], axis=0, weights=self.__networks) var = np.average((predictions[:, :, nc:]-avg)**2, axis=0, weights=self.__networks) rcues = ["TR:%s" % c for c in setup.cues(True)] cols = np.concatenate([rcues, ["AVG:%s" % r for r in readouts], ["VAR:%s" % r for r in readouts]]) df = pd.DataFrame(np.concatenate([predictions[0, :, :nc], avg, var], axis=1), columns=cols) df[rcues] = df[rcues].astype(int) return df
Returns a `pandas.DataFrame`_ with the weighted average predictions and variance of all readouts for each possible clampings in the given experimental setup. For each logical network the weight corresponds to the number of networks having the same behavior. Parameters ---------- setup : :class:`caspo.core.setup.Setup` Experimental setup n_jobs : int Number of jobs to run in parallel. Default to -1 (all cores available) Returns ------- `pandas.DataFrame`_ DataFrame with the weighted average predictions and variance of all readouts for each possible clamping .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe .. seealso:: `Wikipedia: Weighted sample variance <https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance>`_
def attributes(self): attr = { 'name': self.name, 'id': self.sync_id, 'network_id': self.network_id, 'serial': self.serial, 'status': self.status, 'region': self.region, 'region_id': self.region_id, } return attr
Return sync attributes.
def _do_dispatch(self, listeners, event_type, details): possible_calls = len(listeners) call_failures = 0 for listener in listeners: try: listener(event_type, details.copy()) except Exception: self._logger.warn( "Failure calling listener %s to notify about event" " %s, details: %s", listener, event_type, details, exc_info=True) call_failures += 1 return _Notified(possible_calls, possible_calls - call_failures, call_failures)
Calls into listeners, handling failures and logging as needed.
def remove_device(self, device, id_override=None, type_override=None): object_id = id_override or device.object_id() object_type = type_override or device.object_type() url_string = "{}/{}s/{}".format(self.BASE_URL, object_type, object_id) try: arequest = requests.delete(url_string, headers=API_HEADERS) if arequest.status_code == 204: return True _LOGGER.error("Failed to remove device. Status code: %s", arequest.status_code) return False except requests.exceptions.RequestException: _LOGGER.error("Failed to remove device.") return False
Remove a device. Args: device (WinkDevice): The device the change is being requested for. id_override (String, optional): A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional): Used to override the device type when a device inherits from a device other than WinkDevice. Returns: (boolean): True if the device was removed.
def behaviors_distribution(df, filepath=None): cols = ["networks", "index"] rcols = ["Logical networks", "Input-Output behaviors"] sort_cols = ["networks"] if "mse" in df.columns: cols.append("mse") rcols.append("MSE") sort_cols = ["mse"] + sort_cols df.mse = df.mse.map(lambda f: "%.4f" % f) df = df.sort_values(sort_cols).reset_index(drop=True).reset_index(level=0)[cols] df.columns = rcols if "MSE" in df.columns: g = sns.factorplot(x='Input-Output behaviors', y='Logical networks', hue='MSE', data=df, aspect=3, kind='bar', legend_out=False) else: g = sns.factorplot(x='Input-Output behaviors', y='Logical networks', data=df, aspect=3, kind='bar', legend_out=False) g.ax.set_xticks([]) if filepath: g.savefig(os.path.join(filepath, 'behaviors-distribution.pdf')) return g
Plots the distribution of logical networks across input-output behaviors. Optionally, input-output behaviors can be grouped by MSE. Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns `networks` and optionally `mse` filepath: str Absolute path to a folder where to write the plot Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
def open_organisation_logo_path(self): file_name, __ = QFileDialog.getOpenFileName( self, self.tr('Set organisation logo file'), self.organisation_logo_path_line_edit.text(), self.tr( 'Portable Network Graphics files (*.png *.PNG);;' 'JPEG Images (*.jpg *.jpeg);;' 'GIF Images (*.gif *.GIF);;' 'SVG Images (*.svg *.SVG);;')) if file_name: self.organisation_logo_path_line_edit.setText(file_name)
Open File dialog to choose the organisation logo path.
def ctrl_x(self, x, to=None): seq = [Keys.CONTROL, x, Keys.CONTROL] if (self.firefox and self.windows) or (self.linux and self.chrome): seq.append(Keys.PAUSE) if to is None: ActionChains(self.driver) \ .send_keys(seq) \ .perform() else: self.send_keys(to, seq)
Sends a character to the currently active element with Ctrl pressed. This method takes care of pressing and releasing Ctrl.
def setup_water_budget_obs(self): if self.mflist_waterbudget: org_listfile = os.path.join(self.org_model_ws,self.m.lst.file_name[0]) if os.path.exists(org_listfile): shutil.copy2(org_listfile,os.path.join(self.m.model_ws, self.m.lst.file_name[0])) else: self.logger.warn("can't find existing list file:{0}...skipping". format(org_listfile)) return list_file = os.path.join(self.m.model_ws,self.m.lst.file_name[0]) flx_file = os.path.join(self.m.model_ws,"flux.dat") vol_file = os.path.join(self.m.model_ws,"vol.dat") df = pyemu.gw_utils.setup_mflist_budget_obs(list_file, flx_filename=flx_file, vol_filename=vol_file, start_datetime=self.m.start_datetime) if df is not None: self.obs_dfs["wb"] = df self.tmp_files.append(os.path.split(list_file)[-1]) line = "pyemu.gw_utils.apply_mflist_budget_obs('{0}',flx_filename='{1}',vol_filename='{2}',start_datetime='{3}')".\ format(os.path.split(list_file)[-1], os.path.split(flx_file)[-1], os.path.split(vol_file)[-1], self.m.start_datetime) self.logger.statement("forward_run line:{0}".format(line)) self.frun_post_lines.append(line)
setup observations from the MODFLOW list file for volume and flux water buget information
def refresh(self): self._screen.force_update() self._screen.refresh() self._update(1)
Refresh the list and the screen
def gamma(self, gamma=1.0): if isinstance(gamma, (list, tuple)): gamma = self.xrify_tuples(gamma) elif gamma == 1.0: return logger.debug("Applying gamma %s", str(gamma)) attrs = self.data.attrs self.data = self.data.clip(min=0) self.data **= 1.0 / gamma self.data.attrs = attrs
Apply gamma correction to the channels of the image. If *gamma* is a tuple, then it should have as many elements as the channels of the image, and the gamma correction is applied elementwise. If *gamma* is a number, the same gamma correction is applied on every channel, if there are several channels in the image. The behaviour of :func:`gamma` is undefined outside the normal [0,1] range of the channels.
def Proxy(f): def Wrapped(self, *args): return getattr(self, f)(*args) return Wrapped
A helper to create a proxy method in a class.
def task_factory(loop, coro): task = asyncio.Task(coro, loop=loop) if task._source_traceback: del task._source_traceback[-1] current_task = asyncio.Task.current_task(loop=loop) if current_task is not None and hasattr(current_task, 'context'): setattr(task, 'context', current_task.context) return task
Task factory function Fuction closely mirrors the logic inside of asyncio.BaseEventLoop.create_task. Then if there is a current task and the current task has a context then share that context with the new task
def set_model(self, m): self._model = m self.new_root.emit(QtCore.QModelIndex()) self.model_changed(m)
Set the model for the level :param m: the model that the level should use :type m: QtCore.QAbstractItemModel :returns: None :rtype: None :raises: None
def partition_chem_env(self, n_sphere=4, use_lookup=None): if use_lookup is None: use_lookup = settings['defaults']['use_lookup'] def get_chem_env(self, i, n_sphere): env_index = self.get_coordination_sphere( i, n_sphere=n_sphere, only_surface=False, give_only_index=True, use_lookup=use_lookup) env_index.remove(i) atoms = self.loc[env_index, 'atom'] environment = frozenset(collections.Counter(atoms).most_common()) return (self.loc[i, 'atom'], environment) chemical_environments = collections.defaultdict(set) for i in self.index: chemical_environments[get_chem_env(self, i, n_sphere)].add(i) return dict(chemical_environments)
This function partitions the molecule into subsets of the same chemical environment. A chemical environment is specified by the number of surrounding atoms of a certain kind around an atom with a certain atomic number represented by a tuple of a string and a frozenset of tuples. The ``n_sphere`` option determines how many branches the algorithm follows to determine the chemical environment. Example: A carbon atom in ethane has bonds with three hydrogen (atomic number 1) and one carbon atom (atomic number 6). If ``n_sphere=1`` these are the only atoms we are interested in and the chemical environment is:: ('C', frozenset([('H', 3), ('C', 1)])) If ``n_sphere=2`` we follow every atom in the chemical enviromment of ``n_sphere=1`` to their direct neighbours. In the case of ethane this gives:: ('C', frozenset([('H', 6), ('C', 1)])) In the special case of ethane this is the whole molecule; in other cases you can apply this operation recursively and stop after ``n_sphere`` or after reaching the end of branches. Args: n_sphere (int): use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: dict: The output will look like this:: { (element_symbol, frozenset([tuples])) : set([indices]) } A dictionary mapping from a chemical environment to the set of indices of atoms in this environment.