code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def Stop(self): self._shutdown = True self._new_updates.set() if self._main_thread is not None: self._main_thread.join() self._main_thread = None if self._transmission_thread is not None: self._transmission_thread.join() self._transmission_thread = None
Signals the worker threads to shut down and waits until it exits.
def _approx_eq_(self, other: Any, atol: float) -> bool: if not isinstance(other, LinearDict): return NotImplemented all_vs = set(self.keys()) | set(other.keys()) return all(abs(self[v] - other[v]) < atol for v in all_vs)
Checks whether two linear combinations are approximately equal.
def _extract_cookies(self, response: Response): self._cookie_jar.extract_cookies( response, response.request, self._get_cookie_referrer_host() )
Load the cookie headers from the Response.
def gitrepo(cwd): repo = Repository(cwd) if not repo.valid(): return {} return { 'head': { 'id': repo.gitlog('%H'), 'author_name': repo.gitlog('%aN'), 'author_email': repo.gitlog('%ae'), 'committer_name': repo.gitlog('%cN'), 'committer_email': repo.gitlog('%ce'), 'message': repo.gitlog('%s') }, 'branch': os.environ.get('TRAVIS_BRANCH', os.environ.get('APPVEYOR_REPO_BRANCH', repo.git('rev-parse', '--abbrev-ref', 'HEAD')[1].strip())), 'remotes': [{'name': line.split()[0], 'url': line.split()[1]} for line in repo.git('remote', '-v')[1] if '(fetch)' in line] }
Return hash of Git data that can be used to display more information to users. Example: "git": { "head": { "id": "5e837ce92220be64821128a70f6093f836dd2c05", "author_name": "Wil Gieseler", "author_email": "[email protected]", "committer_name": "Wil Gieseler", "committer_email": "[email protected]", "message": "depend on simplecov >= 0.7" }, "branch": "master", "remotes": [{ "name": "origin", "url": "https://github.com/lemurheavy/coveralls-ruby.git" }] } From https://github.com/coagulant/coveralls-python (with MIT license).
def get_instance(self, payload): return EnvironmentInstance(self._version, payload, service_sid=self._solution['service_sid'], )
Build an instance of EnvironmentInstance :param dict payload: Payload response from the API :returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance :rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
def begin(self): if self.start: self.at_beginning = True self.pos = 0 else: self.at_beginning = False self._new_song() return self._get_song()
Start over and get a track.
async def delete_pool_ledger_config(config_name: str) -> None: logger = logging.getLogger(__name__) logger.debug("delete_pool_ledger_config: >>> config_name: %r", config_name) if not hasattr(delete_pool_ledger_config, "cb"): logger.debug("delete_pool_ledger_config: Creating callback") delete_pool_ledger_config.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32)) c_config_name = c_char_p(config_name.encode('utf-8')) res = await do_call('indy_delete_pool_ledger_config', c_config_name, delete_pool_ledger_config.cb) logger.debug("delete_pool_ledger_config: <<< res: %r", res) return res
Deletes created pool ledger configuration. :param config_name: Name of the pool ledger configuration to delete. :return: Error code
def find_distinct(self, collection, key): obj = getattr(self.db, collection) result = obj.distinct(key) return result
Search a collection for the distinct key values provided. Args: collection: The db collection. See main class documentation. key: The name of the key to find distinct values. For example with the indicators collection, the key could be "type". Returns: List of distinct values.
def new_closure(vals): args = ','.join('x%i' % i for i in range(len(vals))) f = eval("lambda %s:lambda:(%s)" % (args, args)) if sys.version_info[0] >= 3: return f(*vals).__closure__ return f(*vals).func_closure
Build a new closure
def percent(self, value: float) -> 'Size': raise_not_number(value) self.maximum = '{}%'.format(value) return self
Set the percentage of free space to use.
def python(self, func, *args, **kwargs): self.ops.append(lambda: func(*args, **kwargs))
Run python code.
def existing_users(context): members = IWorkspace(context).members info = [] for userid, details in members.items(): user = api.user.get(userid) if user is None: continue user = user.getUser() title = user.getProperty('fullname') or user.getId() or userid description = _(u'Here we could have a nice status of this person') classes = description and 'has-description' or 'has-no-description' portal = api.portal.get() portrait = '%s/portal_memberdata/portraits/%s' % \ (portal.absolute_url(), userid) info.append( dict( id=userid, title=title, description=description, portrait=portrait, cls=classes, member=True, admin='Admins' in details['groups'], ) ) return info
Look up the full user details for current workspace members
def clean_url(self): raw_url = self.request['url'] parsed_url = urlparse(raw_url) qsl = parse_qsl(parsed_url.query) for qs in qsl: new_url = self._join_url(parsed_url, [i for i in qsl if i is not qs]) new_request = deepcopy(self.request) new_request['url'] = new_url self._add_task('qsl', qs, new_request) return self
Only clean the url params and return self.
def get_session(credentials, config): session = requests.Session() session.verify = False auth_url = config.get("auth_url") if auth_url: cookie = session.post( auth_url, data={ "j_username": credentials[0], "j_password": credentials[1], "submit": "Log In", "rememberme": "true", }, headers={"Content-Type": "application/x-www-form-urlencoded"}, ) if not cookie: raise Dump2PolarionException("Cookie was not retrieved from {}.".format(auth_url)) else: session.auth = credentials return session
Gets requests session.
def get_scoreboard(year, month, day): try: data = urlopen(BASE_URL.format(year, month, day) + 'scoreboard.xml') except HTTPError: data = os.path.join(PWD, 'default.xml') return data
Return the game file for a certain day matching certain criteria.
def limit(self, limit: int) -> "QuerySet": queryset = self._clone() queryset._limit = limit return queryset
Limits QuerySet to given length.
def footprints_from_place(place, footprint_type='building', retain_invalid=False): city = gdf_from_place(place) polygon = city['geometry'].iloc[0] return create_footprints_gdf(polygon, retain_invalid=retain_invalid, footprint_type=footprint_type)
Get footprints within the boundaries of some place. The query must be geocodable and OSM must have polygon boundaries for the geocode result. If OSM does not have a polygon for this place, you can instead get its footprints using the footprints_from_address function, which geocodes the place name to a point and gets the footprints within some distance of that point. Parameters ---------- place : string the query to geocode to get geojson boundary polygon footprint_type : string type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc. retain_invalid : bool if False discard any footprints with an invalid geometry Returns ------- GeoDataFrame
def to_array(self): array = super(InlineQueryResultAudio, self).to_array() array['audio_url'] = u(self.audio_url) array['title'] = u(self.title) if self.caption is not None: array['caption'] = u(self.caption) if self.parse_mode is not None: array['parse_mode'] = u(self.parse_mode) if self.performer is not None: array['performer'] = u(self.performer) if self.audio_duration is not None: array['audio_duration'] = int(self.audio_duration) if self.reply_markup is not None: array['reply_markup'] = self.reply_markup.to_array() if self.input_message_content is not None: array['input_message_content'] = self.input_message_content.to_array() return array
Serializes this InlineQueryResultAudio to a dictionary. :return: dictionary representation of this object. :rtype: dict
def dict(self): post_dict = { 'id': self.id, 'link': self.link, 'permalink': self.permalink, 'content_type': self.content_type, 'slug': self.slug, 'updated': self.updated, 'published': self.published, 'title': self.title, 'description': self.description, 'author': self.author, 'categories': self.categories[1:-1].split(',') if self.categories else None, 'summary': self.summary, } if self.attributes: attributes = simplejson.loads(self.attributes) post_dict.update(attributes) return post_dict
Returns dictionary of post fields and attributes
def get_all(self): if not self.vars: return self.parent if not self.parent: return self.vars return dict(self.parent, **self.vars)
Return the complete context as dict including the exported variables. For optimizations reasons this might not return an actual copy so be careful with using it.
def get_level(self): if not bool(self._my_map['levelId']): raise errors.IllegalState('this Assessment has no level') mgr = self._get_provider_manager('GRADING') if not mgr.supports_grade_lookup(): raise errors.OperationFailed('Grading does not support Grade lookup') lookup_session = mgr.get_grade_lookup_session(proxy=getattr(self, "_proxy", None)) lookup_session.use_federated_gradebook_view() osid_object = lookup_session.get_grade(self.get_level_id()) return osid_object
Gets the ``Grade`` corresponding to the assessment difficulty. return: (osid.grading.Grade) - the level raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
def decrease_priority(self, infohash_list): data = self._process_infohash_list(infohash_list) return self._post('command/decreasePrio', data=data)
Decrease priority of torrents. :param infohash_list: Single or list() of infohashes.
def wrap_json(cls, json): u = User(usertype=json['type'], name=json['name'], logo=json['logo'], twitchid=json['_id'], displayname=json['display_name'], bio=json['bio']) return u
Create a User instance for the given json :param json: the dict with the information of the user :type json: :class:`dict` | None :returns: the new user instance :rtype: :class:`User` :raises: None
def add_index(self, model, *columns, **kwargs): unique = kwargs.pop('unique', False) model._meta.indexes.append((columns, unique)) columns_ = [] for col in columns: field = model._meta.fields.get(col) if len(columns) == 1: field.unique = unique field.index = not unique if isinstance(field, pw.ForeignKeyField): col = col + '_id' columns_.append(col) self.ops.append(self.migrator.add_index(model._meta.table_name, columns_, unique=unique)) return model
Create indexes.
def _query(self, urls): urls = list(set(urls)) for i in range(0, len(urls), self.max_urls_per_request): chunk = urls[i:i+self.max_urls_per_request] response = self._query_once(chunk) if response.status_code == 200: yield chunk, response
Test URLs for being listed by the service. :param urls: a sequence of URLs to be tested :returns: a tuple containing chunk of URLs and a response pertaining to them if the code of response was 200, which means at least one of the queried URLs is matched in either the phishing, malware, or unwanted software lists.
def get_attr(obj, attr, default=None): if '.' not in attr: return getattr(obj, attr, default) else: L = attr.split('.') return get_attr(getattr(obj, L[0], default), '.'.join(L[1:]), default)
Recursive get object's attribute. May use dot notation. >>> class C(object): pass >>> a = C() >>> a.b = C() >>> a.b.c = 4 >>> get_attr(a, 'b.c') 4 >>> get_attr(a, 'b.c.y', None) >>> get_attr(a, 'b.c.y', 1) 1
def _get_keys(self, read, input_records): for i in range(read.value): ir = input_records[i] if ir.EventType in EventTypes: ev = getattr(ir.Event, EventTypes[ir.EventType]) if type(ev) == KEY_EVENT_RECORD and ev.KeyDown: for key_press in self._event_to_key_presses(ev): yield key_press elif type(ev) == MOUSE_EVENT_RECORD: for key_press in self._handle_mouse(ev): yield key_press
Generator that yields `KeyPress` objects from the input records.
def _init_c2ps(self, go_sources, traverse_child): if not traverse_child: return {} c2ps = defaultdict(set) goids_seen = set() go2obj = self.go2obj for goid_src in go_sources: goobj_src = go2obj[goid_src] if goid_src not in goids_seen: self._traverse_child_objs(c2ps, goobj_src, goids_seen) return c2ps
Traverse up children.
def _process_req_body(self, body): try: return json.loads(body) except ValueError: return urlparse.parse_qs(body, keep_blank_values=True)
Process the body of the HTTP request. If the body is valid JSON, return the JSON as a dict. Else, convert the key=value format to a dict and return that. Args: body: The body of the HTTP request.
def _get_pci_devices(self): system = self._get_host_details() if ('links' in system['Oem']['Hp'] and 'PCIDevices' in system['Oem']['Hp']['links']): pci_uri = system['Oem']['Hp']['links']['PCIDevices']['href'] status, headers, pci_device_list = self._rest_get(pci_uri) if status >= 300: msg = self._get_extended_error(pci_device_list) raise exception.IloError(msg) return pci_device_list else: msg = ('links/PCIDevices section in ComputerSystem/Oem/Hp' ' does not exist') raise exception.IloCommandNotSupportedError(msg)
Gets the PCI devices. :returns: PCI devices list if the pci resource exist. :raises: IloCommandNotSupportedError if the PCI resource doesn't exist. :raises: IloError, on an error from iLO.
def get_conversation(self, conversation, **kwargs): from canvasapi.conversation import Conversation conversation_id = obj_or_id(conversation, "conversation", (Conversation,)) response = self.__requester.request( 'GET', 'conversations/{}'.format(conversation_id), _kwargs=combine_kwargs(**kwargs) ) return Conversation(self.__requester, response.json())
Return single Conversation :calls: `GET /api/v1/conversations/:id \ <https://canvas.instructure.com/doc/api/conversations.html#method.conversations.show>`_ :param conversation: The object or ID of the conversation. :type conversation: :class:`canvasapi.conversation.Conversation` or int :rtype: :class:`canvasapi.conversation.Conversation`
def build(X_df=None, y_df=None): if X_df is None: X_df, _ = load_data() if y_df is None: _, y_df = load_data() features = get_contrib_features() mapper_X = ballet.feature.make_mapper(features) X = mapper_X.fit_transform(X_df) encoder_y = get_target_encoder() y = encoder_y.fit_transform(y_df) return { 'X_df': X_df, 'features': features, 'mapper_X': mapper_X, 'X': X, 'y_df': y_df, 'encoder_y': encoder_y, 'y': y, }
Build features and target Args: X_df (DataFrame): raw variables y_df (DataFrame): raw target Returns: dict with keys X_df, features, mapper_X, X, y_df, encoder_y, y
def android_example(): env = holodeck.make("AndroidPlayground") command = np.ones(94) * 10 for i in range(10): env.reset() for j in range(1000): if j % 50 == 0: command *= -1 state, reward, terminal, _ = env.step(command) pixels = state[Sensors.PIXEL_CAMERA] orientation = state[Sensors.ORIENTATION_SENSOR]
A basic example of how to use the android agent.
def stop(self, signal=None): signal = signal or self.int_signal self.out.log("Cleaning up local Heroku process...") if self._process is None: self.out.log("No local Heroku process was running.") return try: os.killpg(os.getpgid(self._process.pid), signal) self.out.log("Local Heroku process terminated.") except OSError: self.out.log("Local Heroku was already terminated.") self.out.log(traceback.format_exc()) finally: self._process = None
Stop the heroku local subprocess and all of its children.
def all_coarse_grains_for_blackbox(blackbox): for partition in all_partitions(blackbox.output_indices): for grouping in all_groupings(partition): coarse_grain = CoarseGrain(partition, grouping) try: validate.blackbox_and_coarse_grain(blackbox, coarse_grain) except ValueError: continue yield coarse_grain
Generator over all |CoarseGrains| for the given blackbox. If a box has multiple outputs, those outputs are partitioned into the same coarse-grain macro-element.
def canonical_name(sgf_name): sgf_name = os.path.normpath(sgf_name) assert sgf_name.endswith('.sgf'), sgf_name sgf_name = sgf_name[:-4] with_folder = re.search(r'/([^/]*/eval/.*)', sgf_name) if with_folder: return with_folder.group(1) return os.path.basename(sgf_name)
Keep filename and some date folders
def registerJavaUDAF(self, name, javaClassName): self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
Register a Java user-defined aggregate function as a SQL function. :param name: name of the user-defined aggregate function :param javaClassName: fully qualified name of java class >>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg") >>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"]) >>> df.createOrReplaceTempView("df") >>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect() [Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)]
def cftime_to_nptime(times): times = np.asarray(times) new = np.empty(times.shape, dtype='M8[ns]') for i, t in np.ndenumerate(times): try: dt = pd.Timestamp(t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond) except ValueError as e: raise ValueError('Cannot convert date {} to a date in the ' 'standard calendar. Reason: {}.'.format(t, e)) new[i] = np.datetime64(dt) return new
Given an array of cftime.datetime objects, return an array of numpy.datetime64 objects of the same size
def reportTimes(self): self.end = _ptime() total_time = 0 print(ProcSteps.__report_header) for step in self.order: if 'elapsed' in self.steps[step]: _time = self.steps[step]['elapsed'] else: _time = 0.0 total_time += _time print(' %20s %0.4f sec.' % (step, _time)) print(' %20s %s' % ('=' * 20, '=' * 20)) print(' %20s %0.4f sec.' % ('Total', total_time))
Print out a formatted summary of the elapsed times for all the performed steps.
def as_table(self, name=None): if name is None: name = self._id return alias(self.subquery(), name=name)
Return an alias to a table
def update_status(self, helper, status): if status: self.status(status[0]) if status[0] == 0: self.add_long_output(status[1]) else: self.add_summary(status[1])
update the helper
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True): all_versions = _get_uploaded_versions(project_name, index_url, requests_verify) return version_str in all_versions
Check to see if the version specified has already been uploaded to the configured index
def min(self): if len(self._data) == 0: return 10 return next(iter(sorted(self._data.keys())))
Return the minimum value in this histogram. If there are no values in the histogram at all, return 10. Returns: int: The minimum value in the histogram.
def send_no_servlet_response(self): response = _HTTPServletResponse(self) response.send_content(404, self._service.make_not_found_page(self.path))
Default response sent when no servlet is found for the requested path
def filelist_prune(self, at_data, *args, **kwargs): b_status = True l_file = [] str_path = at_data[0] al_file = at_data[1] if len(self.str_extension): al_file = [x for x in al_file if self.str_extension in x] if len(al_file): al_file.sort() l_file = al_file b_status = True else: self.dp.qprint( "No valid files to analyze found in path %s!" % str_path, comms = 'error', level = 3) l_file = None b_status = False return { 'status': b_status, 'l_file': l_file }
Given a list of files, possibly prune list by extension.
def _toggle_term_protect(name, value): instance_id = _get_node(name)['instanceId'] params = {'Action': 'ModifyInstanceAttribute', 'InstanceId': instance_id, 'DisableApiTermination.Value': value} result = aws.query(params, location=get_location(), provider=get_provider(), return_root=True, opts=__opts__, sigver='4') return show_term_protect(name=name, instance_id=instance_id, call='action')
Enable or Disable termination protection on a node
def transform(self, X): X_checked = check_input(X, type_of_inputs='classic', estimator=self, preprocessor=self.preprocessor_, accept_sparse=True) return X_checked.dot(self.transformer_.T)
Embeds data points in the learned linear embedding space. Transforms samples in ``X`` into ``X_embedded``, samples inside a new embedding space such that: ``X_embedded = X.dot(L.T)``, where ``L`` is the learned linear transformation (See :class:`MahalanobisMixin`). Parameters ---------- X : `numpy.ndarray`, shape=(n_samples, n_features) The data points to embed. Returns ------- X_embedded : `numpy.ndarray`, shape=(n_samples, num_dims) The embedded data points.
def save(self, *args, **kwargs): super(Layer, self).save(*args, **kwargs) if self.pk and self.is_published != self._current_is_published: layer_is_published_changed.send( sender=self.__class__, instance=self, old_is_published=self._current_is_published, new_is_published=self.is_published ) self.update_nodes_published() self._current_is_published = self.is_published
intercepts changes to is_published and fires layer_is_published_changed signal
def regen_keys(): for fn_ in os.listdir(__opts__['pki_dir']): path = os.path.join(__opts__['pki_dir'], fn_) try: os.remove(path) except os.error: pass channel = salt.transport.client.ReqChannel.factory(__opts__) channel.close()
Used to regenerate the minion keys. CLI Example: .. code-block:: bash salt '*' saltutil.regen_keys
def handle(self, *args, **options): last_id = Submission._objects.all().aggregate(Max('id'))['id__max'] log.info("Beginning uuid update") current = options['start'] while current < last_id: end_chunk = current + options['chunk'] if last_id - options['chunk'] >= current else last_id log.info("Updating entries in range [{}, {}]".format(current, end_chunk)) with transaction.atomic(): for submission in Submission._objects.filter(id__gte=current, id__lte=end_chunk).iterator(): submission.save(update_fields=['uuid']) time.sleep(options['wait']) current = end_chunk + 1
By default, we're going to do this in chunks. This way, if there ends up being an error, we can check log messages and continue from that point after fixing the issue.
def from_file(cls, xml_path): try: parsed_xml = cls._parse(xml_path) except OSError as e: raise XmlParser.XmlError("Problem reading xml file at {}: {}".format(xml_path, e)) return cls(xml_path, parsed_xml)
Parse .xml file and create a XmlParser object.
def instances(self, **kwargs): if self.category == Category.MODEL: return self._client.parts(model=self, category=Category.INSTANCE, **kwargs) else: raise NotFoundError("Part {} is not a model".format(self.name))
Retrieve the instances of this `Part` as a `PartSet`. For instance, if you have a model part, you can get the list of instances that are created based on this moodel. If there are no instances (only possible if the multiplicity is :attr:`enums.Multiplicity.ZERO_MANY`) than a :exc:`NotFoundError` is returned .. versionadded:: 1.8 :return: the instances of this part model :class:`PartSet` with category `INSTANCE` :raises NotFoundError: if no instances found Example ------- >>> wheel_model = project.model('Wheel') >>> wheel_instance_set = wheel_model.instances() An example with retrieving the front wheels only using the 'name__contains' search argument. >>> wheel_model = project.model('Wheel') >>> front_wheel_instances = wheel_model.instances(name__contains='Front')
def CountClientPlatformsByLabel(self, day_buckets): def ExtractPlatform(client_info): return client_info.last_snapshot.knowledge_base.os return self._CountClientStatisticByLabel(day_buckets, ExtractPlatform)
Computes client-activity stats for all client platforms in the DB.
def filename(self): if self.buildver: buildver = '-' + self.buildver else: buildver = '' pyver = '.'.join(self.pyver) abi = '.'.join(self.abi) arch = '.'.join(self.arch) version = self.version.replace('-', '_') return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, pyver, abi, arch)
Build and return a filename from the various components.
def _get_aeff_corrections(intensity_ratio, mask): nebins = len(intensity_ratio.data) aeff_corrections = np.zeros((nebins)) for i in range(nebins): bright_pixels_intensity = intensity_ratio.data[i][mask.data[i]] mean_bright_pixel = bright_pixels_intensity.mean() aeff_corrections[i] = 1. / mean_bright_pixel print("Aeff correction: ", aeff_corrections) return aeff_corrections
Compute a correction for the effective area from the brighter pixesl
def find_visible_birthdays(request, data): if request.user and (request.user.is_teacher or request.user.is_eighthoffice or request.user.is_eighth_admin): return data data['today']['users'] = [u for u in data['today']['users'] if u['public']] data['tomorrow']['users'] = [u for u in data['tomorrow']['users'] if u['public']] return data
Return only the birthdays visible to current user.
def ceiling_func(self, addr): try: next_addr = self._function_map.ceiling_addr(addr) return self._function_map.get(next_addr) except KeyError: return None
Return the function who has the least address that is greater than or equal to `addr`. :param int addr: The address to query. :return: A Function instance, or None if there is no other function after `addr`. :rtype: Function or None
def visit_return(self, node): if node.is_tuple_return() and len(node.value.elts) > 1: elts = [child.accept(self) for child in node.value.elts] return "return %s" % ", ".join(elts) if node.value: return "return %s" % node.value.accept(self) return "return"
return an astroid.Return node as string
def hour_angle(times, longitude, equation_of_time): naive_times = times.tz_localize(None) hrs_minus_tzs = 1 / NS_PER_HR * ( 2 * times.astype(np.int64) - times.normalize().astype(np.int64) - naive_times.astype(np.int64)) return np.asarray( 15. * (hrs_minus_tzs - 12.) + longitude + equation_of_time / 4.)
Hour angle in local solar time. Zero at local solar noon. Parameters ---------- times : :class:`pandas.DatetimeIndex` Corresponding timestamps, must be localized to the timezone for the ``longitude``. longitude : numeric Longitude in degrees equation_of_time : numeric Equation of time in minutes. Returns ------- hour_angle : numeric Hour angle in local solar time in degrees. References ---------- [1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal Processes, 3rd Edition" pp. 13, J. Wiley and Sons, New York (2006) [2] J. H. Seinfeld and S. N. Pandis, "Atmospheric Chemistry and Physics" p. 132, J. Wiley (1998) [3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable Energy Applications", p. 5 CRC Press (2013) See Also -------- equation_of_time_Spencer71 equation_of_time_pvcdrom
def calculate_delay(original, delay): original = datetime.strptime(original, '%H:%M') delayed = datetime.strptime(delay, '%H:%M') diff = delayed - original return diff.total_seconds() // 60
Calculate the delay
def put(self, user_name: str) -> User: current = current_user() if current.name == user_name or current.is_admin: user = self._get_or_abort(user_name) self.update(user) session.commit() session.add(user) return user else: abort(403)
Updates the User Resource with the name.
def save_to_disk(self, filename_pattern=None): if not self._converter: raise RuntimeError( 'Must set _converter on subclass or via set_converter before calling ' 'save_to_disk.') pattern = filename_pattern or self._default_filename_pattern if not pattern: raise RuntimeError( 'Must specify provide a filename_pattern or set a ' '_default_filename_pattern on subclass.') def save_to_disk_callback(test_record_obj): proto = self._convert(test_record_obj) output_to_file = callbacks.OutputToFile(pattern) with output_to_file.open_output_file(test_record_obj) as outfile: outfile.write(proto.SerializeToString()) return save_to_disk_callback
Returns a callback to convert test record to proto and save to disk.
def correct(self, image, keepSize=False, borderValue=0): image = imread(image) (h, w) = image.shape[:2] mapx, mapy = self.getUndistortRectifyMap(w, h) self.img = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=borderValue ) if not keepSize: xx, yy, ww, hh = self.roi self.img = self.img[yy: yy + hh, xx: xx + ww] return self.img
remove lens distortion from given image
def data64_send(self, type, len, data, force_mavlink1=False): return self.send(self.data64_encode(type, len, data), force_mavlink1=force_mavlink1)
Data packet, size 64 type : data type (uint8_t) len : data length (uint8_t) data : raw data (uint8_t)
def get_cmd_description(self): try: return self.description except AttributeError: pass try: return '\n'.join( get_localized_docstring( self, self.get_gettext_domain() ).splitlines()[1:] ).split('@EPILOG@', 1)[0].strip() except (AttributeError, IndexError, ValueError): pass
Get the leading, multi-line description of this command. :returns: ``self.description``, if defined :returns: A substring of the class docstring between the first line (which is discarded) and the string ``@EPILOG@``, if present, or the end of the docstring, if any :returns: None, otherwise The description string will be displayed after the usage string but before any of the detailed argument descriptions. Please consider following good practice by keeping the description line short enough not to require scrolling but useful enough to provide additional information that cannot be inferred from the name of the command or other arguments. Stating the purpose of the command is highly recommended.
def stop(self): Global.LOGGER.info("stopping the flow manager") self._stop_actions() self.isrunning = False Global.LOGGER.debug("flow manager stopped")
Stop all the processes
def location_path(self, path): path = path.strip("/") tmp = path.split("?") path = tmp[0] paths = path.split("/") for p in paths: option = Option() option.number = defines.OptionRegistry.LOCATION_PATH.number option.value = p self.add_option(option)
Set the Location-Path of the response. :type path: String :param path: the Location-Path as a string
def get_item_ids_metadata(self): metadata = dict(self._item_ids_metadata) metadata.update({'existing_id_values': self.my_osid_object_form._my_map['itemIds']}) return Metadata(**metadata)
get the metadata for item
def make_signature(signer_func, data_to_sign, public_algo, hashed_subpackets, unhashed_subpackets, sig_type=0): header = struct.pack('>BBBB', 4, sig_type, public_algo, 8) hashed = subpackets(*hashed_subpackets) unhashed = subpackets(*unhashed_subpackets) tail = b'\x04\xff' + struct.pack('>L', len(header) + len(hashed)) data_to_hash = data_to_sign + header + hashed + tail log.debug('hashing %d bytes', len(data_to_hash)) digest = hashlib.sha256(data_to_hash).digest() log.debug('signing digest: %s', util.hexlify(digest)) params = signer_func(digest=digest) sig = b''.join(mpi(p) for p in params) return bytes(header + hashed + unhashed + digest[:2] + sig)
Create new GPG signature.
def show_cursor(self, show): if show: self.displaycontrol |= LCD_CURSORON else: self.displaycontrol &= ~LCD_CURSORON self.write8(LCD_DISPLAYCONTROL | self.displaycontrol)
Show or hide the cursor. Cursor is shown if show is True.
def override(self, parameters, recursive = False): result = Parameters() if recursive: RecursiveObjectWriter.copy_properties(result, self) RecursiveObjectWriter.copy_properties(result, parameters) else: ObjectWriter.set_properties(result, self) ObjectWriter.set_properties(result, parameters) return result
Overrides parameters with new values from specified Parameters and returns a new Parameters object. :param parameters: Parameters with parameters to override the current values. :param recursive: (optional) true to perform deep copy, and false for shallow copy. Default: false :return: a new Parameters object.
def get_ip(self, access='public', addr_family=None, strict=None): if addr_family not in ['IPv4', 'IPv6', None]: raise Exception("`addr_family` must be 'IPv4', 'IPv6' or None") if access not in ['private', 'public']: raise Exception("`access` must be 'public' or 'private'") if not hasattr(self, 'ip_addresses'): self.populate() ip_addrs = [ ip_addr for ip_addr in self.ip_addresses if ip_addr.access == access ] preferred_family = addr_family if addr_family else 'IPv4' for ip_addr in ip_addrs: if ip_addr.family == preferred_family: return ip_addr.address return ip_addrs[0].address if ip_addrs and not addr_family else None
Return the server's IP address. Params: - addr_family: IPv4, IPv6 or None. None prefers IPv4 but will return IPv6 if IPv4 addr was not available. - access: 'public' or 'private'
def ok_check(function, *args, **kwargs): req = function(*args, **kwargs) if req.content.lower() != 'ok': raise ClientException(req.content) return req.content
Ensure that the response body is OK
def construct(self, mapping: dict, **kwargs): assert '__type__' not in kwargs and '__args__' not in kwargs mapping = {**mapping, **kwargs} factory_fqdn = mapping.pop('__type__') factory = self.load_name(factory_fqdn) args = mapping.pop('__args__', []) return factory(*args, **mapping)
Construct an object from a mapping :param mapping: the constructor definition, with ``__type__`` name and keyword arguments :param kwargs: additional keyword arguments to pass to the constructor
def get_padding(x, padding_value=0): with tf.name_scope("padding"): return tf.to_float(tf.equal(x, padding_value))
Return float tensor representing the padding values in x. Args: x: int tensor with any shape padding_value: int value that Returns: flaot tensor with same shape as x containing values 0 or 1. 0 -> non-padding, 1 -> padding
def playpause(self): msg = cr.Message() msg.type = cr.PLAYPAUSE self.send_message(msg)
Sends a "playpause" command to the player.
def extractDates(self, inp): def merge(param): day, time = param if not (day or time): return None if not day: return time if not time: return day return datetime.datetime( day.year, day.month, day.day, time.hour, time.minute ) days = self.extractDays(inp) times = self.extractTimes(inp) return map(merge, zip_longest(days, times, fillvalue=None))
Extract semantic date information from an input string. In effect, runs both parseDay and parseTime on the input string and merges the results to produce a comprehensive datetime object. Args: inp (str): Input string to be parsed. Returns: A list of datetime objects containing the extracted dates from the input snippet, or an empty list if not found.
def matrix(mat): import ROOT if isinstance(mat, (ROOT.TMatrixD, ROOT.TMatrixDSym)): return _librootnumpy.matrix_d(ROOT.AsCObject(mat)) elif isinstance(mat, (ROOT.TMatrixF, ROOT.TMatrixFSym)): return _librootnumpy.matrix_f(ROOT.AsCObject(mat)) raise TypeError( "unable to convert object of type {0} " "into a numpy matrix".format(type(mat)))
Convert a ROOT TMatrix into a NumPy matrix. Parameters ---------- mat : ROOT TMatrixT A ROOT TMatrixD or TMatrixF Returns ------- mat : numpy.matrix A NumPy matrix Examples -------- >>> from root_numpy import matrix >>> from ROOT import TMatrixD >>> a = TMatrixD(4, 4) >>> a[1][2] = 2 >>> matrix(a) matrix([[ 0., 0., 0., 0.], [ 0., 0., 2., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.]])
def call(method: Method, *args: Any, **kwargs: Any) -> Any: return validate_args(method, *args, **kwargs)(*args, **kwargs)
Validates arguments and then calls the method. Args: method: The method to call. *args, **kwargs: Arguments to the method. Returns: The "result" part of the JSON-RPC response (the return value from the method). Raises: TypeError: If arguments don't match function signature.
def _get_mpi_info(self): rank = self.comm.Get_rank() size = self.comm.Get_size() return rank, size
get basic MPI info Returns ------- comm : Intracomm Returns MPI communication group rank : integer Returns the rank of this process size : integer Returns total number of processes
def get_transition(self, line, line_index, column, is_escaped, comment_system_transitions, eof=False): if (column == 0 and comment_system_transitions.should_terminate_now( line, self._resume_waiting_for )): return (InTextParser(), 0, None) if (_token_at_col_in_line(line, column, "```", 3) and not _is_escaped(line, column, is_escaped)): return (self._resume_parser((line_index, column + 3), self._resume_waiting_for), 3, None) elif self._resume_waiting_for != ParserState.EOL: wait_until_len = len(self._resume_waiting_for) if (_token_at_col_in_line(line, column, self._resume_waiting_for, wait_until_len) and not _is_escaped(line, column, is_escaped)): return (InTextParser(), len(self._waiting_until), None) elif eof: return (InTextParser(), 0, None) return (self, 1, None)
Get transition from DisabledParser.
def _iter_over_selections(obj, dim, values): from .groupby import _dummy_copy dummy = None for value in values: try: obj_sel = obj.sel(**{dim: value}) except (KeyError, IndexError): if dummy is None: dummy = _dummy_copy(obj) obj_sel = dummy yield obj_sel
Iterate over selections of an xarray object in the provided order.
def checkGeneTreeMatchesSpeciesTree(speciesTree, geneTree, processID): def fn(tree, l): if tree.internal: fn(tree.left, l) fn(tree.right, l) else: l.append(processID(tree.iD)) l = [] fn(speciesTree, l) l2 = [] fn(geneTree, l2) for i in l2: assert i in l
Function to check ids in gene tree all match nodes in species tree
def remove_major_minor_suffix(scripts): minor_major_regex = re.compile("-\d.?\d?$") return [x for x in scripts if not minor_major_regex.search(x)]
Checks if executables already contain a "-MAJOR.MINOR" suffix.
def conv1d(ni:int, no:int, ks:int=1, stride:int=1, padding:int=0, bias:bool=False): "Create and initialize a `nn.Conv1d` layer with spectral normalization." conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias) nn.init.kaiming_normal_(conv.weight) if bias: conv.bias.data.zero_() return spectral_norm(conv)
Create and initialize a `nn.Conv1d` layer with spectral normalization.
def redraw_now(self, whence=0): try: time_start = time.time() self.redraw_data(whence=whence) self.update_image() time_done = time.time() time_delta = time_start - self.time_last_redraw time_elapsed = time_done - time_start self.time_last_redraw = time_done self.logger.debug( "widget '%s' redraw (whence=%d) delta=%.4f elapsed=%.4f sec" % ( self.name, whence, time_delta, time_elapsed)) except Exception as e: self.logger.error("Error redrawing image: %s" % (str(e))) try: (type, value, tb) = sys.exc_info() tb_str = "".join(traceback.format_tb(tb)) self.logger.error("Traceback:\n%s" % (tb_str)) except Exception: tb_str = "Traceback information unavailable." self.logger.error(tb_str)
Redraw the displayed image. Parameters ---------- whence See :meth:`get_rgb_object`.
async def set_headline(self, name, level, message): if name not in self.services: raise ArgumentError("Unknown service name", short_name=name) self.services[name]['state'].set_headline(level, message) headline = self.services[name]['state'].headline.to_dict() await self._notify_update(name, 'new_headline', headline)
Set the sticky headline for a service. Args: name (string): The short name of the service to query level (int): The level of the message (info, warning, error) message (string): The message contents
def vectorize(e, tolerance=0.1): tolerance = max(tolerance, e.linewidth) is_high = e.height > tolerance is_wide = e.width > tolerance if is_wide and not is_high: return (e.width, 0.0) if is_high and not is_wide: return (0.0, e.height)
vectorizes the pdf object's bounding box min_width is the width under which we consider it a line instead of a big rectangle
def parse_relative_path(root_path, experiment_config, key): if experiment_config.get(key) and not os.path.isabs(experiment_config.get(key)): absolute_path = os.path.join(root_path, experiment_config.get(key)) print_normal('expand %s: %s to %s ' % (key, experiment_config[key], absolute_path)) experiment_config[key] = absolute_path
Change relative path to absolute path
def list_aliases(self): r = self.requests.get(self.index_url + "/_alias", headers=HEADER_JSON, verify=False) try: r.raise_for_status() except requests.exceptions.HTTPError as ex: logger.warning("Something went wrong when retrieving aliases on %s.", self.anonymize_url(self.index_url)) logger.warning(ex) return aliases = r.json()[self.index]['aliases'] return aliases
List aliases linked to the index
def cleaned_date(day, keep_datetime=False): if not isinstance(day, (date, datetime)): raise UnsupportedDateType( "`{}` is of unsupported type ({})".format(day, type(day))) if not keep_datetime: if hasattr(day, 'date') and callable(day.date): day = day.date() return day
Return a "clean" date type. * keep a `date` unchanged * convert a datetime into a date, * convert any "duck date" type into a date using its `date()` method.
def check_for_input_len_diff(*args): arrays_len = [len(arr) for arr in args] if not all(a == arrays_len[0] for a in arrays_len): err_msg = ("Error: mismatched data lengths, check to ensure that all " "input data is the same length and valid") raise Exception(err_msg)
Check for Input Length Difference. This method checks if multiple data sets that are inputted are all the same size. If they are not the same length an error is raised with a custom message that informs the developer that the data set's lengths are not the same.
def _nonempty_project(string): value = str(string) if len(value) == 0: msg = "No project provided and no default project configured" raise argparse.ArgumentTypeError(msg) return value
Argparse validator for ensuring a workspace is provided
def paramtypes(self): for m in [p[1] for p in self.ports]: for p in [p[1] for p in m]: for pd in p: if pd[1] in self.params: continue item = (pd[1], pd[1].resolve()) self.params.append(item)
get all parameter types
def with_source(self, lease): self.partition_id = lease.partition_id self.epoch = lease.epoch self.owner = lease.owner self.token = lease.token self.event_processor_context = lease.event_processor_context
Init with existing lease. :param lease: An existing Lease. :type lease: ~azure.eventprocessorhost.lease.Lease
def add_transcript(self, transcript): logger.debug("Adding transcript {0} to variant {1}".format( transcript, self['variant_id'])) self['transcripts'].append(transcript)
Add the information transcript This adds a transcript dict to variant['transcripts'] Args: transcript (dict): A transcript dictionary
def load_all(self, key, default=None): value = getattr(self, key) if default is not None: def loader(path): return self.load_path_with_default(path, default) else: loader = self.load_path if isinstance(value, dict): return {key: loader(value) for key, value in value.items()} elif isinstance(value, list): return [loader(value) for value in value] else: raise ValueError('load_all must be list or dict')
Import settings key as a dict or list with values of importable paths If a default constructor is specified, and a path is not importable, it falls back to running the given constructor.
def add_dependency(self,my_dep): if self.dependency_layer is None: self.dependency_layer = Cdependencies() self.root.append(self.dependency_layer.get_node()) self.dependency_layer.add_dependency(my_dep)
Adds a dependency to the dependency layer @type my_dep: L{Cdependency} @param my_dep: dependency object
def get_connection(**kwargs): kwargs = clean_kwargs(**kwargs) if 'pyeapi.conn' in __proxy__: return __proxy__['pyeapi.conn']() conn, kwargs = _prepare_connection(**kwargs) return conn
Return the connection object to the pyeapi Node. .. warning:: This function returns an unserializable object, hence it is not meant to be used on the CLI. This should mainly be used when invoked from other modules for the low level connection with the network device. kwargs Key-value dictionary with the authentication details. USAGE Example: .. code-block:: python conn = __salt__['pyeapi.get_connection'](host='router1.example.com', username='example', password='example') show_ver = conn.run_commands(['show version', 'show interfaces'])
def lookup(self, lookup_url, url_key=None): url_ending = self._get_ending(lookup_url) params = { 'url_ending': url_ending, 'url_key': url_key } data, r = self._make_request(self.api_lookup_endpoint, params) if r.status_code == 401: if url_key is not None: raise exceptions.UnauthorizedKeyError('given url_key is not valid for secret lookup.') raise exceptions.UnauthorizedKeyError elif r.status_code == 404: return False action = data.get('action') full_url = data.get('result') if action == 'lookup' and full_url is not None: return full_url raise exceptions.DebugTempWarning
Looks up the url_ending to obtain information about the short url. If it exists, the API will return a dictionary with information, including the long_url that is the destination of the given short url URL. The lookup object looks like something like this: .. code-block:: python { 'clicks': 42, 'created_at': { 'date': '2017-12-03 00:40:45.000000', 'timezone': 'UTC', 'timezone_type': 3 }, 'long_url': 'https://stackoverflow.com/questions/tagged/python', 'updated_at': { 'date': '2017-12-24 13:37:00.000000', 'timezone': 'UTC', 'timezone_type': 3 } } :param str lookup_url: An url ending or full short url address :param url_key: optional URL ending key for lookups against secret URLs :type url_key: str or None :return: Lookup dictionary containing, among others things, the long url; or None if not existing :rtype: dict or None