code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def df(self):
import pandas as pd
return pd.concat([w.df(uwi=True) for w in self])
|
Makes a pandas DataFrame containing Curve data for all the wells
in the Project. The DataFrame has a dual index of well UWI and
curve Depths. Requires `pandas`.
Args:
No arguments.
Returns:
`pandas.DataFrame`.
|
def get_site(self, *args):
num_args = len(args)
if num_args == 1:
site = args[0]
elif num_args == 2:
host_name, path_to_site = args
path_to_site = '/' + path_to_site if not path_to_site.startswith(
'/') else path_to_site
site = '{}:{}:'.format(host_name, path_to_site)
elif num_args == 3:
site = ','.join(args)
else:
raise ValueError('Incorrect number of arguments')
url = self.build_url(self._endpoints.get('get_site').format(id=site))
response = self.con.get(url)
if not response:
return None
data = response.json()
return self.site_constructor(parent=self,
**{self._cloud_data_key: data})
|
Returns a sharepoint site
:param args: It accepts multiple ways of retrieving a site:
get_site(host_name): the host_name: host_name ej.
'contoso.sharepoint.com' or 'root'
get_site(site_id): the site_id: a comma separated string of
(host_name, site_collection_id, site_id)
get_site(host_name, path_to_site): host_name ej. 'contoso.
sharepoint.com', path_to_site: a url path (with a leading slash)
get_site(host_name, site_collection_id, site_id):
host_name ej. 'contoso.sharepoint.com'
:rtype: Site
|
def update(self):
con = self.subpars.pars.control
self(con.eqi1*con.tind)
|
Update |KI1| based on |EQI1| and |TInd|.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqi1(5.0)
>>> tind.value = 10.0
>>> derived.ki1.update()
>>> derived.ki1
ki1(50.0)
|
def fuzzy_subset(str_):
if str_ is None:
return str_
if ':' in str_:
return smart_cast(str_, slice)
if str_.startswith('['):
return smart_cast(str_[1:-1], list)
else:
return smart_cast(str_, list)
|
converts a string into an argument to list_take
|
def notify(self, msg, color='green', notify='true', message_format='text'):
self.message_dict = {
'message': msg,
'color': color,
'notify': notify,
'message_format': message_format,
}
if not self.debug:
return requests.post(
self.notification_url,
json.dumps(self.message_dict),
headers=self.headers
)
else:
print('HipChat message: <{}>'.format(msg))
return []
|
Send notification to specified HipChat room
|
def _collapseMsg(self, msg):
retval = {}
for logname in msg:
data = u""
for m in msg[logname]:
m = bytes2unicode(m, self.builder.unicode_encoding)
data += m
if isinstance(logname, tuple) and logname[0] == 'log':
retval['log'] = (logname[1], data)
else:
retval[logname] = data
return retval
|
Take msg, which is a dictionary of lists of output chunks, and
concatenate all the chunks into a single string
|
def make_link(title, url, blank=False):
attrs = 'href="%s"' % url
if blank:
attrs += ' target="_blank" rel="noopener noreferrer"'
return '<a %s>%s</a>' % (attrs, title)
|
Make a HTML link out of an URL.
Args:
title (str): Text to show for the link.
url (str): URL the link will point to.
blank (bool): If True, appends target=_blank, noopener and noreferrer to
the <a> element. Defaults to False.
|
def resolve_filename(self, package_dir, filename):
sass_path = os.path.join(package_dir, self.sass_path, filename)
if self.strip_extension:
filename, _ = os.path.splitext(filename)
css_filename = filename + '.css'
css_path = os.path.join(package_dir, self.css_path, css_filename)
return sass_path, css_path
|
Gets a proper full relative path of Sass source and
CSS source that will be generated, according to ``package_dir``
and ``filename``.
:param package_dir: the path of package directory
:type package_dir: :class:`str`, :class:`basestring`
:param filename: the filename of Sass/SCSS source to compile
:type filename: :class:`str`, :class:`basestring`
:returns: a pair of (sass, css) path
:rtype: :class:`tuple`
|
def export_data_object_info(bpmn_diagram, data_object_params, output_element):
output_element.set(consts.Consts.is_collection, data_object_params[consts.Consts.is_collection])
|
Adds DataObject node attributes to exported XML element
:param bpmn_diagram: BPMNDiagramGraph class instantion representing a BPMN process diagram,
:param data_object_params: dictionary with given subprocess parameters,
:param output_element: object representing BPMN XML 'subprocess' element.
|
def _gate_pre_offset(self, gate):
try:
gates = self.settings['gates']
delta_pos = gates[gate.__class__.__name__]['pre_offset']
except KeyError:
delta_pos = self._gate_offset(gate)
return delta_pos
|
Return the offset to use before placing this gate.
:param string gate: The name of the gate whose pre-offset is desired.
:return: Offset to use before the gate.
:rtype: float
|
def sync_role_definitions(self):
from superset import conf
logging.info('Syncing role definition')
self.create_custom_permissions()
self.set_role('Admin', self.is_admin_pvm)
self.set_role('Alpha', self.is_alpha_pvm)
self.set_role('Gamma', self.is_gamma_pvm)
self.set_role('granter', self.is_granter_pvm)
self.set_role('sql_lab', self.is_sql_lab_pvm)
if conf.get('PUBLIC_ROLE_LIKE_GAMMA', False):
self.set_role('Public', self.is_gamma_pvm)
self.create_missing_perms()
self.get_session.commit()
self.clean_perms()
|
Inits the Superset application with security roles and such
|
def is_quote_artifact(orig_text, span):
res = False
cursor = re.finditer(r'("|\')[^ .,:;?!()*+-].*?("|\')', orig_text)
for item in cursor:
if item.span()[1] == span[1]:
res = True
return res
|
Distinguish between quotes and units.
|
def visible_width(string):
if '\033' in string:
string = RE_COLOR_ANSI.sub('', string)
try:
string = string.decode('u8')
except (AttributeError, UnicodeEncodeError):
pass
width = 0
for char in string:
if unicodedata.east_asian_width(char) in ('F', 'W'):
width += 2
else:
width += 1
return width
|
Get the visible width of a unicode string.
Some CJK unicode characters are more than one byte unlike ASCII and latin unicode characters.
From: https://github.com/Robpol86/terminaltables/pull/9
:param str string: String to measure.
:return: String's width.
:rtype: int
|
def temp_shell_task(cls, inp, mpi_procs=1, workdir=None, manager=None):
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))
task.set_name('temp_shell_task')
return task
|
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
Args:
mpi_procs: Number of MPI processes to use.
|
def _get_redis_keys_opts():
return {
'bank_prefix': __opts__.get('cache.redis.bank_prefix', _BANK_PREFIX),
'bank_keys_prefix': __opts__.get('cache.redis.bank_keys_prefix', _BANK_KEYS_PREFIX),
'key_prefix': __opts__.get('cache.redis.key_prefix', _KEY_PREFIX),
'separator': __opts__.get('cache.redis.separator', _SEPARATOR)
}
|
Build the key opts based on the user options.
|
def set_path(dicts, keys, v):
for key in keys[:-1]:
dicts = dicts.setdefault(key, dict())
dicts = dicts.setdefault(keys[-1], list())
dicts.append(v)
|
Helper function for modifying nested dictionaries
:param dicts: dict: the given dictionary
:param keys: list str: path to added value
:param v: str: value to be added
Example:
>>> d = dict()
>>> set_path(d, ['a', 'b', 'c'], 'd')
>>> d
{'a': {'b': {'c': ['d']}}}
In case of duplicate paths, the additional value will
be added to the leaf node rather than simply replace it:
>>> set_path(d, ['a', 'b', 'c'], 'e')
>>> d
{'a': {'b': {'c': ['d', 'e']}}}
|
def set_sample_probability(probability):
global _sample_probability
if not 0.0 <= probability <= 1.0:
raise ValueError('Invalid probability value')
LOGGER.debug('Setting sample probability to %.2f', probability)
_sample_probability = float(probability)
|
Set the probability that a batch will be submitted to the InfluxDB
server. This should be a value that is greater than or equal to ``0`` and
less than or equal to ``1.0``. A value of ``0.25`` would represent a
probability of 25% that a batch would be written to InfluxDB.
:param float probability: The value between 0 and 1.0 that represents the
probability that a batch will be submitted to the InfluxDB server.
|
def getSampleFrequency(self,chn):
if 0 <= chn < self.signals_in_file:
return round(self.samplefrequency(chn))
else:
return 0
|
Returns the samplefrequency of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getSampleFrequency(0)==200.0
True
>>> f._close()
>>> del f
|
def _get_rsa_key(self):
url = 'https://steamcommunity.com/mobilelogin/getrsakey/'
values = {
'username': self._username,
'donotcache' : self._get_donotcachetime(),
}
req = self.post(url, data=values)
data = req.json()
if not data['success']:
raise SteamWebError('Failed to get RSA key', data)
mod = int(str(data['publickey_mod']), 16)
exp = int(str(data['publickey_exp']), 16)
rsa = RSA.construct((mod, exp))
self.rsa_cipher = PKCS1_v1_5.new(rsa)
self.rsa_timestamp = data['timestamp']
|
get steam RSA key, build and return cipher
|
def get_meta(self, table_name, constraints=None, column_to_field_name=None, is_view=False, is_partition=None):
meta = [" class Meta(models.Model.Meta):",
" db_table = '%s'" % table_name]
if self.connection.vendor == 'salesforce':
for line in self.connection.introspection.get_additional_meta(table_name):
meta.append(" " + line)
meta.append("")
return meta
|
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
|
def as_fs(self):
s = self._standard_value
result = []
idx = 0
while (idx < len(s)):
c = s[idx]
if c != "\\":
result.append(c)
else:
nextchr = s[idx + 1]
if (nextchr == ".") or (nextchr == "-") or (nextchr == "_"):
result.append(nextchr)
idx += 1
else:
result.append("\\")
result.append(nextchr)
idx += 2
continue
idx += 1
return "".join(result)
|
Returns the value of component encoded as formatted string.
Inspect each character in value of component.
Certain nonalpha characters pass thru without escaping
into the result, but most retain escaping.
:returns: Formatted string associated with component
:rtype: string
|
def _path_completer_grammar(self):
if self._path_completer_grammar_cache is None:
self._path_completer_grammar_cache = self._create_path_completer_grammar()
return self._path_completer_grammar_cache
|
Return the grammar for matching paths inside strings inside Python
code.
|
def MakeDestinationKey(directory, filename):
return utils.SmartStr(utils.JoinPath(directory, filename)).lstrip("/")
|
Creates a name that identifies a database file.
|
def data_lookup_method(fields_list, mongo_db_obj, hist, record,
lookup_type):
if hist is None:
hist = {}
for field in record:
if record[field] != '' and record[field] is not None:
if field in fields_list:
if lookup_type in fields_list[field]['lookup']:
field_val_new, hist = DataLookup(
fieldVal=record[field],
db=mongo_db_obj,
lookupType=lookup_type,
fieldName=field,
histObj=hist)
record[field] = field_val_new
return record, hist
|
Method to lookup the replacement value given a single input value from
the same field.
:param dict fields_list: Fields configurations
:param MongoClient mongo_db_obj: MongoDB collection object
:param dict hist: existing input of history values object
:param dict record: values to validate
:param str lookup_type: Type of lookup
|
def start_watcher_thread(self):
watcher_thread = threading.Thread(target=self.run_watcher)
if self._reload_mode == self.RELOAD_MODE_V_SPAWN_WAIT:
daemon = False
else:
daemon = True
watcher_thread.setDaemon(daemon)
watcher_thread.start()
return watcher_thread
|
Start watcher thread.
:return:
Watcher thread object.
|
def detectOperaMobile(self):
return UAgentInfo.engineOpera in self.__userAgent \
and (UAgentInfo.mini in self.__userAgent
or UAgentInfo.mobi in self.__userAgent)
|
Return detection of an Opera browser for a mobile device
Detects Opera Mobile or Opera Mini.
|
def get_proposed_feature(project):
change_collector = ChangeCollector(project)
collected_changes = change_collector.collect_changes()
try:
new_feature_info = one_or_raise(collected_changes.new_feature_info)
importer, _, _ = new_feature_info
except ValueError:
raise BalletError('Too many features collected')
module = importer()
feature = _get_contrib_feature_from_module(module)
return feature
|
Get the proposed feature
The path of the proposed feature is determined by diffing the project
against a comparison branch, such as master. The feature is then imported
from that path and returned.
Args:
project (ballet.project.Project): project info
Raises:
ballet.exc.BalletError: more than one feature collected
|
def surface_or_abstract(cls, predstr):
if predstr.strip('"').lstrip("'").startswith('_'):
return cls.surface(predstr)
else:
return cls.abstract(predstr)
|
Instantiate a Pred from either its surface or abstract symbol.
|
def quantile(data, weights, quantile):
nd = data.ndim
if nd == 0:
TypeError("data must have at least one dimension")
elif nd == 1:
return quantile_1D(data, weights, quantile)
elif nd > 1:
n = data.shape
imr = data.reshape((np.prod(n[:-1]), n[-1]))
result = np.apply_along_axis(quantile_1D, -1, imr, weights, quantile)
return result.reshape(n[:-1])
|
Weighted quantile of an array with respect to the last axis.
Parameters
----------
data : ndarray
Input array.
weights : ndarray
Array with the weights. It must have the same size of the last
axis of `data`.
quantile : float
Quantile to compute. It must have a value between 0 and 1.
Returns
-------
quantile : float
The output value.
|
def shipping_rate(context, **kwargs):
settings = Configuration.for_site(context["request"].site)
code = kwargs.get('code', None)
name = kwargs.get('name', None)
return get_shipping_cost(settings, code, name)
|
Return the shipping rate for a country & shipping option name.
|
def values(self):
if self.ui.hzBtn.isChecked():
fscale = SmartSpinBox.Hz
else:
fscale = SmartSpinBox.kHz
if self.ui.msBtn.isChecked():
tscale = SmartSpinBox.MilliSeconds
else:
tscale = SmartSpinBox.Seconds
return fscale, tscale
|
Gets the scales that the user chose
| For frequency: 1 = Hz, 1000 = kHz
| For time: 1 = seconds, 0.001 = ms
:returns: float, float -- frequency scaling, time scaling
|
def _set_fqdn(self):
results = self._search(
'cn=config',
'(objectClass=*)',
['nsslapd-localhost'],
scope=ldap.SCOPE_BASE
)
if not results and type(results) is not list:
r = None
else:
dn, attrs = results[0]
r = attrs['nsslapd-localhost'][0].decode('utf-8')
self._fqdn = r
log.debug('FQDN: %s' % self._fqdn)
|
Get FQDN from LDAP
|
def is_constant(self, qc: QuantumComputer, bitstring_map: Dict[str, str]) -> bool:
self._init_attr(bitstring_map)
prog = Program()
dj_ro = prog.declare('ro', 'BIT', len(self.computational_qubits))
prog += self.deutsch_jozsa_circuit
prog += [MEASURE(qubit, ro) for qubit, ro in zip(self.computational_qubits, dj_ro)]
executable = qc.compile(prog)
returned_bitstring = qc.run(executable)
bitstring = np.array(returned_bitstring, dtype=int)
constant = all([bit == 0 for bit in bitstring])
return constant
|
Computes whether bitstring_map represents a constant function, given that it is constant
or balanced. Constant means all inputs map to the same value, balanced means half of the
inputs maps to one value, and half to the other.
:param QVMConnection cxn: The connection object to the Rigetti cloud to run pyQuil programs.
:param bitstring_map: A dictionary whose keys are bitstrings, and whose values are bits
represented as strings.
:type bistring_map: Dict[String, String]
:return: True if the bitstring_map represented a constant function, false otherwise.
:rtype: bool
|
def _parse_stop_words_file(self, path):
language = None
loaded = False
if os.path.isfile(path):
self._logger.debug('Loading stop words in %s', path)
language = path.split('-')[-1]
if not language in self.__stop_words:
self.__stop_words[language] = set()
with codecs.open(path, 'r', 'UTF-8') as file:
loaded = True
for word in file:
self.__stop_words[language].add(word.strip())
return loaded
|
Load stop words from the given path.
Parse the stop words file, saving each word found in it in a set
for the language of the file. This language is obtained from
the file name. If the file doesn't exist, the method will have
no effect.
Args:
path: Path to the stop words file.
Returns:
A boolean indicating whether the file was loaded.
|
def mac_set_relative_dylib_deps(libname):
from PyInstaller.lib.macholib import util
from PyInstaller.lib.macholib.MachO import MachO
if os.path.basename(libname) in _BOOTLOADER_FNAMES:
return
def match_func(pth):
if not util.in_system_path(pth):
return os.path.join('@executable_path', os.path.basename(pth))
dll = MachO(libname)
dll.rewriteLoadCommands(match_func)
try:
f = open(dll.filename, 'rb+')
for header in dll.headers:
f.seek(0)
dll.write(f)
f.seek(0, 2)
f.flush()
f.close()
except Exception:
pass
|
On Mac OS X set relative paths to dynamic library dependencies of `libname`.
Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH.
There are known some issues with DYLD_LIBRARY_PATH. Relative paths is
more flexible mechanism.
Current location of dependend libraries is derived from the location
of the executable (paths start with '@executable_path').
@executable_path or @loader_path fail in some situations
(@loader_path - qt4 plugins, @executable_path -
Python built-in hashlib module).
|
def _read_console_output(self, ws, out):
while True:
msg = yield from ws.receive()
if msg.tp == aiohttp.WSMsgType.text:
out.feed_data(msg.data.encode())
elif msg.tp == aiohttp.WSMsgType.BINARY:
out.feed_data(msg.data)
elif msg.tp == aiohttp.WSMsgType.ERROR:
log.critical("Docker WebSocket Error: {}".format(msg.data))
else:
out.feed_eof()
ws.close()
break
yield from self.stop()
|
Read Websocket and forward it to the telnet
:param ws: Websocket connection
:param out: Output stream
|
def get_all_dbparameter_groups(self, groupname=None, max_records=None,
marker=None):
params = {}
if groupname:
params['DBParameterGroupName'] = groupname
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBParameterGroups', params,
[('DBParameterGroup', ParameterGroup)])
|
Get all parameter groups associated with your account in a region.
:type groupname: str
:param groupname: The name of the DBParameter group to retrieve.
If not provided, all DBParameter groups will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.ec2.parametergroup.ParameterGroup`
|
def patch_conf(settings_patch=None, settings_file=None):
if settings_patch is None:
settings_patch = {}
reload_config()
os.environ[ENVIRONMENT_VARIABLE] = settings_file if settings_file else ''
from bernard.conf import settings as l_settings
r_settings = l_settings._settings
r_settings.update(settings_patch)
if 'bernard.i18n' in modules:
from bernard.i18n import translate, intents
translate._regenerate_word_dict()
intents._refresh_intents_db()
yield
|
Reload the configuration form scratch. Only the default config is loaded,
not the environment-specified config.
Then the specified patch is applied.
This is for unit tests only!
:param settings_patch: Custom configuration values to insert
:param settings_file: Custom settings file to read
|
def topics(self):
cluster = self._client.cluster
if self._client._metadata_refresh_in_progress and self._client._topics:
future = cluster.request_update()
self._client.poll(future=future)
stash = cluster.need_all_topic_metadata
cluster.need_all_topic_metadata = True
future = cluster.request_update()
self._client.poll(future=future)
cluster.need_all_topic_metadata = stash
return cluster.topics()
|
Get all topics the user is authorized to view.
Returns:
set: topics
|
def output_shape(self):
if self._output_shape is None:
self._ensure_is_connected()
if callable(self._output_shape):
self._output_shape = tuple(self._output_shape())
return self._output_shape
|
Returns the output shape.
|
def _wr_ver_n_key(self, fout_txt, verbose):
with open(fout_txt, 'w') as prt:
self._prt_ver_n_key(prt, verbose)
print(' WROTE: {TXT}'.format(TXT=fout_txt))
|
Write GO DAG version and key indicating presence of GO ID in a list.
|
def _compute_document_meta(self):
meta = OrderedDict()
bounds_iter = xml_utils.bounds(self.filename,
start_re=r'<text id="(\d+)"[^>]*name="([^"]*)"',
end_re=r'</text>')
for match, bounds in bounds_iter:
doc_id, title = str(match.group(1)), match.group(2)
title = xml_utils.unescape_attribute(title)
xml_data = xml_utils.load_chunk(self.filename, bounds)
doc = Document(compat.ElementTree.XML(xml_data.encode('utf8')))
meta[doc_id] = _DocumentMeta(title, bounds, doc.categories())
return meta
|
Return documents meta information that can
be used for fast document lookups. Meta information
consists of documents titles, categories and positions
in file.
|
def begin_pending_transactions(self):
while not self._pending_sessions.empty():
session = self._pending_sessions.get()
session._transaction.begin()
super(TransactionPingingPool, self).put(session)
|
Begin all transactions for sessions added to the pool.
|
def setupData(self, dataPath, numLabels=0, ordered=False, stripCats=False, seed=42, **kwargs):
self.split(dataPath, numLabels, **kwargs)
if not ordered:
self.randomizeData(seed)
filename, ext = os.path.splitext(dataPath)
classificationFileName = "{}_category.json".format(filename)
dataFileName = "{}_network{}".format(filename, ext)
if stripCats:
self.stripCategories()
self.saveData(dataFileName, classificationFileName)
return dataFileName
|
Main method of this class. Use for setting up a network data file.
@param dataPath (str) Path to CSV file.
@param numLabels (int) Number of columns of category labels.
@param textPreprocess (bool) True will preprocess text while tokenizing.
@param ordered (bool) Keep data samples (sequences) in order,
otherwise randomize.
@param seed (int) Random seed.
@return dataFileName (str) Network data file name; same directory as
input data file.
|
def stats(self, request):
request = _patch_stats_request(request)
body = json.dumps(request)
return self.dispatcher.response(models.Request(
self._url('data/v1/stats'), self.auth,
body_type=models.JSON, data=body, method='POST')).get_body()
|
Get stats for the provided request.
:param request dict: A search request that also contains the 'interval'
property.
:returns: :py:class:`planet.api.models.JSON`
:raises planet.api.exceptions.APIException: On API error.
|
def _stop(self):
duration = time.time() - self.start
self.result['stop'] = str(datetime.datetime.now())
self.result['delta'] = int(duration)
if self.duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
self.result['stdout'] = "Paused for %s %s" % (duration, self.duration_unit)
|
calculate the duration we actually paused for and then
finish building the task result string
|
def pack_commands(self, commands):
"Pack multiple commands into the Redis protocol"
output = []
pieces = []
buffer_length = 0
for cmd in commands:
for chunk in self.pack_command(*cmd):
pieces.append(chunk)
buffer_length += len(chunk)
if buffer_length > 6000:
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
|
Pack multiple commands into the Redis protocol
|
def abort_request(self, request):
self.timedout = True
try:
request.cancel()
except error.AlreadyCancelled:
return
|
Called to abort request on timeout
|
def compute_time_at_sun_angle(day, latitude, angle):
positive_angle_rad = radians(abs(angle))
angle_sign = abs(angle)/angle
latitude_rad = radians(latitude)
declination = radians(sun_declination(day))
numerator = -sin(positive_angle_rad) - sin(latitude_rad) * sin(declination)
denominator = cos(latitude_rad) * cos(declination)
time_diff = degrees(acos(numerator/denominator)) / 15
return time_diff * angle_sign
|
Compute the floating point time difference between mid-day and an angle.
All the prayers are defined as certain angles from mid-day (Zuhr).
This formula is taken from praytimes.org/calculation
:param day: The day to which to compute for
:param longitude: Longitude of the place of interest
:angle: The angle at which to compute the time
:returns: The floating point time delta between Zuhr and the angle, the
sign of the result corresponds to the sign of the angle
|
def cidr_broadcast(cidr):
ips = netaddr.IPNetwork(cidr)
return six.text_type(ips.broadcast)
|
Get the broadcast address associated with a CIDR address.
CLI example::
salt myminion netaddress.cidr_netmask 192.168.0.0/20
|
def _setup():
_SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
_SOCKET.bind(('', PORT))
udp = threading.Thread(target=_listen, daemon=True)
udp.start()
|
Set up module.
Open a UDP socket, and listen in a thread.
|
def get(self, nb=0):
return {i: self.stats_history[i].history_raw(nb=nb) for i in self.stats_history}
|
Get the history as a dict of list
|
def disconnect(self, user):
self.remove_user(user)
self.send_message(create_message('RoomServer', 'Please all say goodbye to {name}!'.format(name=user.id.name)))
self.send_message(create_disconnect(user.id.name))
|
Disconnect a user and send a message to the
connected clients
|
def _build_all_dependencies(self):
ret = {}
for model, schema in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
|
Helper function to build a map of model to their list of model reference dependencies
|
def remove_entry(self, **field_value):
field, value = next(iter(field_value.items()))
self.data['entries'][:] = [entry
for entry in self.data.get('entries')
if entry.get('{}_entry'.format(self.typeof))
.get(field) != str(value)]
|
Remove an AccessList entry by field specified. Use the supported
arguments for the inheriting class for keyword arguments.
:raises UpdateElementFailed: failed to modify with reason
:return: None
|
def is_ubuntu():
if sys.platform.startswith('linux') and osp.isfile('/etc/lsb-release'):
release_info = open('/etc/lsb-release').read()
if 'Ubuntu' in release_info:
return True
else:
return False
else:
return False
|
Detect if we are running in an Ubuntu-based distribution
|
async def enable(self, reason=None):
params = {"enable": False, "reason": reason}
response = await self._api.put("/v1/agent/maintenance", params=params)
return response.status == 200
|
Resumes normal operation
Parameters:
reason (str): Reason of enabling
Returns:
bool: ``True`` on success
|
def dot(self, other):
dot_product = 0
a = self.elements
b = other.elements
a_len = len(a)
b_len = len(b)
i = j = 0
while i < a_len and j < b_len:
a_val = a[i]
b_val = b[j]
if a_val < b_val:
i += 2
elif a_val > b_val:
j += 2
else:
dot_product += a[i + 1] * b[j + 1]
i += 2
j += 2
return dot_product
|
Calculates the dot product of this vector and another vector.
|
def generic_div(a, b):
logger.debug('Called generic_div({}, {})'.format(a, b))
return a / b
|
Simple function to divide two numbers
|
def init_argparser_build_dir(
self, argparser, help=(
'the build directory, where all sources will be copied to '
'as part of the build process; if left unspecified, the '
'default behavior is to create a new temporary directory '
'that will be removed upon conclusion of the build; if '
'specified, it must be an existing directory and all files '
'for the build will be copied there instead, overwriting any '
'existing file, with no cleanup done after.'
)):
argparser.add_argument(
'--build-dir', default=None, dest=BUILD_DIR,
metavar=metavar(BUILD_DIR), help=help,
)
|
For setting up build directory
|
def plotgwsrc(gwb):
theta, phi, omega, polarization = gwb.gw_dist()
rho = phi-N.pi
eta = 0.5*N.pi - theta
P.title("GWB source population")
ax = P.axes(projection='mollweide')
foo = P.scatter(rho, eta, marker='.', s=1)
return foo
|
Plot a GWB source population as a mollweide projection.
|
def execute_command_no_results(self, sock_info, generator):
full_result = {
"writeErrors": [],
"writeConcernErrors": [],
"nInserted": 0,
"nUpserted": 0,
"nMatched": 0,
"nModified": 0,
"nRemoved": 0,
"upserted": [],
}
write_concern = WriteConcern()
op_id = _randint()
try:
self._execute_command(
generator, write_concern, None,
sock_info, op_id, False, full_result)
except OperationFailure:
pass
|
Execute write commands with OP_MSG and w=0 WriteConcern, ordered.
|
def addPolygonAnnot(self, points):
CheckParent(self)
val = _fitz.Page_addPolygonAnnot(self, points)
if not val: return
val.thisown = True
val.parent = weakref.proxy(self)
self._annot_refs[id(val)] = val
return val
|
Add a 'Polygon' annotation for a sequence of points.
|
def get_output_jsonpath(self, sub_output=None):
output_jsonpath_field = self.get_output_jsonpath_field(sub_output)
metadata = self.extractor.get_metadata()
metadata['source'] = str(self.input_fields)
extractor_filter = ""
is_first = True
for key, value in metadata.iteritems():
if is_first:
is_first = False
else:
extractor_filter = extractor_filter + " & "
if isinstance(value, basestring):
extractor_filter = extractor_filter\
+ "{}=\"{}\"".format(key,
re.sub('(?<=[^\\\])\"', "'", value))
elif isinstance(value, types.ListType):
extractor_filter = extractor_filter\
+ "{}={}".format(key, str(value))
output_jsonpath = "{}[?{}].result.value".format(
output_jsonpath_field, extractor_filter)
return output_jsonpath
|
Attempt to build a JSONPath filter for this ExtractorProcessor
that captures how to get at the outputs of the wrapped Extractor
|
def plot_rebit_prior(prior, rebit_axes=REBIT_AXES,
n_samples=2000, true_state=None, true_size=250,
force_mean=None,
legend=True,
mean_color_index=2
):
pallette = plt.rcParams['axes.color_cycle']
plot_rebit_modelparams(prior.sample(n_samples),
c=pallette[0],
label='Prior',
rebit_axes=rebit_axes
)
if true_state is not None:
plot_rebit_modelparams(true_state,
c=pallette[1],
label='True', marker='*', s=true_size,
rebit_axes=rebit_axes
)
if hasattr(prior, '_mean') or force_mean is not None:
mean = force_mean if force_mean is not None else prior._mean
plot_rebit_modelparams(
prior._basis.state_to_modelparams(mean)[None, :],
edgecolors=pallette[mean_color_index], s=250, facecolors='none', linewidth=3,
label='Mean',
rebit_axes=rebit_axes
)
plot_decorate_rebits(prior.basis,
rebit_axes=rebit_axes
)
if legend:
plt.legend(loc='lower left', ncol=3, scatterpoints=1)
|
Plots rebit states drawn from a given prior.
:param qinfer.tomography.DensityOperatorDistribution prior: Distribution over
rebit states to plot.
:param list rebit_axes: List containing indices for the :math:`x`
and :math:`z` axes.
:param int n_samples: Number of samples to draw from the
prior.
:param np.ndarray true_state: State to be plotted as a "true" state for
comparison.
|
def comicDownloaded(self, comic, filename, text=None):
if self.lastComic != comic.name:
self.newComic(comic)
size = None
if self.allowdownscale:
size = getDimensionForImage(filename, MaxImageSize)
imageUrl = self.getUrlFromFilename(filename)
pageUrl = comic.referrer
if pageUrl != self.lastUrl:
self.html.write(u'<li><a href="%s">%s</a>\n' % (pageUrl, pageUrl))
self.html.write(u'<br/><img src="%s"' % imageUrl)
if size:
self.html.write(' width="%d" height="%d"' % size)
self.html.write('/>\n')
if text:
self.html.write(u'<br/>%s\n' % text)
self.lastComic = comic.name
self.lastUrl = pageUrl
|
Write HTML entry for downloaded comic.
|
def _CalculateHashesFileEntry(
self, file_system, file_entry, parent_full_path, output_writer):
full_path = file_system.JoinPath([parent_full_path, file_entry.name])
for data_stream in file_entry.data_streams:
hash_value = self._CalculateHashDataStream(file_entry, data_stream.name)
display_path = self._GetDisplayPath(
file_entry.path_spec, full_path, data_stream.name)
output_writer.WriteFileHash(display_path, hash_value or 'N/A')
for sub_file_entry in file_entry.sub_file_entries:
self._CalculateHashesFileEntry(
file_system, sub_file_entry, full_path, output_writer)
|
Recursive calculates hashes starting with the file entry.
Args:
file_system (dfvfs.FileSystem): file system.
file_entry (dfvfs.FileEntry): file entry.
parent_full_path (str): full path of the parent file entry.
output_writer (StdoutWriter): output writer.
|
def user_getfield(self, field, access_token=None):
info = self.user_getinfo([field], access_token)
return info.get(field)
|
Request a single field of information about the user.
:param field: The name of the field requested.
:type field: str
:returns: The value of the field. Depending on the type, this may be
a string, list, dict, or something else.
:rtype: object
.. versionadded:: 1.0
|
def on_binop(self, node):
return op2func(node.op)(self.run(node.left),
self.run(node.right))
|
Binary operator.
|
def get_significant_decimal(my_decimal):
if isinstance(my_decimal, Integral):
return my_decimal
if my_decimal != my_decimal:
return my_decimal
my_int_part = str(my_decimal).split('.')[0]
my_decimal_part = str(my_decimal).split('.')[1]
first_not_zero = 0
for i in range(len(my_decimal_part)):
if my_decimal_part[i] == '0':
continue
else:
first_not_zero = i
break
my_truncated_decimal = my_decimal_part[:first_not_zero + 3]
my_leftover_number = my_decimal_part[:first_not_zero + 3:]
my_leftover_number = int(float('0.' + my_leftover_number))
round_up = False
if my_leftover_number == 1:
round_up = True
my_truncated = float(my_int_part + '.' + my_truncated_decimal)
if round_up:
my_bonus = 1 * 10 ^ (-(first_not_zero + 4))
my_truncated += my_bonus
return my_truncated
|
Return a truncated decimal by last three digit after leading zero.
|
def find_window(self, highlight_locations):
if len(self.text_block) <= self.max_length:
return (0, self.max_length)
num_chars_before = getattr(
settings,
'HIGHLIGHT_NUM_CHARS_BEFORE_MATCH',
0
)
best_start, best_end = super(ColabHighlighter, self).find_window(
highlight_locations
)
if best_start <= num_chars_before:
best_end -= best_start
best_start = 0
else:
best_start -= num_chars_before
best_end -= num_chars_before
return (best_start, best_end)
|
Getting the HIGHLIGHT_NUM_CHARS_BEFORE_MATCH setting
to find how many characters before the first word found should
be removed from the window
|
def send_all_messages(self, close_on_done=True):
self.open()
running = True
try:
messages = self._pending_messages[:]
running = self.wait()
results = [m.state for m in messages]
return results
finally:
if close_on_done or not running:
self.close()
|
Send all pending messages in the queue. This will return a list
of the send result of all the pending messages so it can be
determined if any messages failed to send.
This function will open the client if it is not already open.
:param close_on_done: Close the client once the messages are sent.
Default is `True`.
:type close_on_done: bool
:rtype: list[~uamqp.constants.MessageState]
|
def create_user(self, auth, login_name, username, email, password, send_notify=False):
data = {
"login_name": login_name,
"username": username,
"email": email,
"password": password,
"send_notify": send_notify
}
response = self.post("/admin/users", auth=auth, data=data)
return GogsUser.from_json(response.json())
|
Creates a new user, and returns the created user.
:param auth.Authentication auth: authentication object, must be admin-level
:param str login_name: login name for created user
:param str username: username for created user
:param str email: email address for created user
:param str password: password for created user
:param bool send_notify: whether a notification email should be sent upon creation
:return: a representation of the created user
:rtype: GogsUser
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
|
def save_plot(fig, prefile='', postfile='', output_path='./', output_name='Figure',
output_format='png', dpi=300, transparent=False, **_):
if not os.path.exists(output_path):
os.makedirs(output_path)
output = os.path.join(output_path,
prefile + output_name + postfile + "." + output_format)
fig.savefig(output, dpi=dpi, transparent=transparent)
|
Generates a figure file in the selected directory.
Args:
fig: matplotlib figure
prefile(str): Include before the general filename of the figure
postfile(str): Included after the general filename of the figure
output_path(str): Define the path to the output directory
output_name(str): String to define the name of the output figure
output_format(str): String to define the format of the output figure
dpi(int): Define the DPI (Dots per Inch) of the figure
transparent(bool): If True the saved figure will have a transparent background
|
def filter_format(filter_template, assertion_values):
assert isinstance(filter_template, bytes)
return filter_template % (
tuple(map(escape_filter_chars, assertion_values)))
|
filter_template
String containing %s as placeholder for assertion values.
assertion_values
List or tuple of assertion values. Length must match
count of %s in filter_template.
|
def global_horizontal_irradiance(self):
analysis_period = AnalysisPeriod(timestep=self.timestep,
is_leap_year=self.is_leap_year)
header_ghr = Header(data_type=GlobalHorizontalIrradiance(),
unit='W/m2',
analysis_period=analysis_period,
metadata=self.metadata)
glob_horiz = []
sp = Sunpath.from_location(self.location)
sp.is_leap_year = self.is_leap_year
for dt, dnr, dhr in zip(self.datetimes, self.direct_normal_irradiance,
self.diffuse_horizontal_irradiance):
sun = sp.calculate_sun_from_date_time(dt)
glob_horiz.append(dhr + dnr * math.sin(math.radians(sun.altitude)))
return HourlyContinuousCollection(header_ghr, glob_horiz)
|
Returns the global horizontal irradiance at each timestep.
|
def to_list_str(value, encode=None):
result = []
for index, v in enumerate(value):
if isinstance(v, dict):
result.append(to_dict_str(v, encode))
continue
if isinstance(v, list):
result.append(to_list_str(v, encode))
continue
if encode:
result.append(encode(v))
else:
result.append(default_encode(v))
return result
|
recursively convert list content into string
:arg list value: The list that need to be converted.
:arg function encode: Function used to encode object.
|
def parse_value(self, val, display, rawdict = 0):
ret = {}
vno = 0
for f in self.static_fields:
if not f.name:
pass
elif isinstance(f, LengthField):
pass
elif isinstance(f, FormatField):
pass
else:
if f.structvalues == 1:
field_val = val[vno]
else:
field_val = val[vno:vno+f.structvalues]
if f.parse_value is not None:
field_val = f.parse_value(field_val, display, rawdict=rawdict)
ret[f.name] = field_val
vno = vno + f.structvalues
if not rawdict:
return DictWrapper(ret)
return ret
|
This function is used by List and Object fields to convert
Struct objects with no var_fields into Python values.
|
def create_build_system(working_dir, buildsys_type=None, package=None, opts=None,
write_build_scripts=False, verbose=False,
build_args=[], child_build_args=[]):
from rez.plugin_managers import plugin_manager
if not buildsys_type:
clss = get_valid_build_systems(working_dir, package=package)
if not clss:
raise BuildSystemError(
"No build system is associated with the path %s" % working_dir)
if len(clss) != 1:
s = ', '.join(x.name() for x in clss)
raise BuildSystemError(("Source could be built with one of: %s; "
"Please specify a build system") % s)
buildsys_type = iter(clss).next().name()
cls_ = plugin_manager.get_plugin_class('build_system', buildsys_type)
return cls_(working_dir,
opts=opts,
package=package,
write_build_scripts=write_build_scripts,
verbose=verbose,
build_args=build_args,
child_build_args=child_build_args)
|
Return a new build system that can build the source in working_dir.
|
def load(self):
if isinstance(self.specfile, str):
f = open(self.specfile, 'r')
else:
f = self.specfile
for line in f:
if self.v_regex.match(line):
self._pkg_version = self.v_regex.match(line).group(1)
if self.n_regex.match(line):
self._pkg_name = self.n_regex.match(line).group(1)
f.close()
self._loaded = True
|
call this function after the file exists to populate properties
|
def upload():
def twine(*args):
process = run(sys.executable, '-m', 'twine', *args)
return process.wait() != 0
if run(sys.executable, 'setup.py', 'sdist', 'bdist_wheel').wait() != 0:
error('failed building packages')
if twine('register', glob.glob('dist/*')[0]):
error('register failed')
if twine('upload', '-s', '-i', 'CB164668', '--skip-existing', 'dist/*'):
error('upload failed')
|
build the files and upload to pypi
|
def imagetransformer_b12l_4h_b128_uncond_dr03_tpu():
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 128
hparams.hidden_size = 256
hparams.filter_size = 2048
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
return hparams
|
TPU config for cifar 10.
|
def set_data(self, column, value, role):
if not self._data or column >= self._data.column_count():
return False
return self._data.set_data(column, value, role)
|
Set the data of column to value
:param column: the column to set
:type column: int
:param value: the value to set
:param role: the role, usually EditRole
:type role: :class:`QtCore.Qt.ItemDataRole`
:returns: True, if data successfully changed
:rtype: :class:`bool`
:raises: None
|
def sample(self, frame):
frames = self.frame_stack(frame)
if frames:
frames.pop()
parent_stats = self.stats
for f in frames:
parent_stats = parent_stats.ensure_child(f.f_code, void)
stats = parent_stats.ensure_child(frame.f_code, RecordingStatistics)
stats.own_hits += 1
|
Samples the given frame.
|
def pub(self, topic=b'', embed_topic=False):
if not isinstance(topic, bytes):
error = 'Topic must be bytes'
log.error(error)
raise TypeError(error)
sock = self.__sock(zmq.PUB)
return self.__send_function(sock, topic, embed_topic)
|
Returns a callable that can be used to transmit a message, with a given
``topic``, in a publisher-subscriber fashion. Note that the sender
function has a ``print`` like signature, with an infinite number of
arguments. Each one being a part of the complete message.
By default, no topic will be included into published messages. Being up
to developers to include the topic, at the beginning of the first part
(i.e. frame) of every published message, so that subscribers are able
to receive them. For a different behaviour, check the embed_topic
argument.
:param topic: the topic that will be published to (default=b'')
:type topic: bytes
:param embed_topic: set for the topic to be automatically sent as the
first part (i.e. frame) of every published message
(default=False)
:type embed_topic bool
:rtype: function
|
def _object_to_json(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return repr(obj)
|
Convert objects that cannot be natively serialized into JSON
into their string representation
For datetime based objects convert them into their ISO formatted
string as specified by :meth:`datetime.datetime.isoformat`.
:param obj: object to convert into a JSON via getting its string
representation.
:type obj: object
:return: String value representing the given object ready to be
encoded into a JSON.
:rtype: str
|
def touch():
if not os.path.isfile(get_rc_path()):
open(get_rc_path(), 'a').close()
print('Created file: {}'.format(get_rc_path()))
|
Create a .vacationrc file if none exists.
|
def options(self, **kwds):
opts = dict(self.opts)
for k in kwds:
try:
_ = opts[k]
except KeyError:
raise ValueError("invalid option {!r}".format(k))
opts[k] = kwds[k]
return type(self)(self.cls, opts, self.kwargs)
|
Change options for interactive functions.
Returns
-------
A new :class:`_InteractFactory` which will apply the
options when called.
|
def add_override(self, addr, key, value):
address = Address(str(addr)).id
_LOGGER.debug('New override for %s %s is %s', address, key, value)
device_override = self._overrides.get(address, {})
device_override[key] = value
self._overrides[address] = device_override
|
Register an attribute override for a device.
|
def _to_date_in_588(date_str):
try:
date_tokens = (int(x) for x in date_str.split("."))
except ValueError:
return date_str
return ".".join(str(x) for x in date_tokens)
|
Convert date in the format ala 03.02.2017 to 3.2.2017.
Viz #100 for details.
|
def _remove_till_caught_up_3pc(self, last_caught_up_3PC):
outdated_pre_prepares = {}
for key, pp in self.prePrepares.items():
if compare_3PC_keys(key, last_caught_up_3PC) >= 0:
outdated_pre_prepares[key] = pp
for key, pp in self.sentPrePrepares.items():
if compare_3PC_keys(key, last_caught_up_3PC) >= 0:
outdated_pre_prepares[key] = pp
self.logger.trace('{} going to remove messages for {} 3PC keys'.format(
self, len(outdated_pre_prepares)))
for key, pp in outdated_pre_prepares.items():
self.batches.pop(key, None)
self.sentPrePrepares.pop(key, None)
self.prePrepares.pop(key, None)
self.prepares.pop(key, None)
self.commits.pop(key, None)
self._discard_ordered_req_keys(pp)
|
Remove any 3 phase messages till the last ordered key and also remove
any corresponding request keys
|
def _text_position(size, text, font):
width, height = font.getsize(text)
left = (size - width) / 2.0
top = (size - height) / 3.0
return left, top
|
Returns the left-top point where the text should be positioned.
|
def delete_subject(self, subject_id):
uri = self._get_subject_uri(guid=subject_id)
return self.service._delete(uri)
|
Remove a specific subject by its identifier.
|
def stop_event_stream(self):
if self._stream:
self._stream.stop()
self._stream.join()
self._stream = None
with self._events.mutex:
self._events.queue.clear()
|
Stop streaming events from `gerrit stream-events`.
|
def tb_capture(func):
@wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except Exception:
raise MuchoChildError()
return wrapper
|
A decorator which captures worker tracebacks.
Tracebacks in particular, are captured. Inspired by an example in
https://bugs.python.org/issue13831.
This decorator wraps rio-mucho worker tasks.
Parameters
----------
func : function
A function to be decorated.
Returns
-------
func
|
def upper_diag_self_prodx(list_):
return [(item1, item2)
for n1, item1 in enumerate(list_)
for n2, item2 in enumerate(list_) if n1 < n2]
|
upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)]
|
def read(self):
buffer = BytesIO()
for chunk in self.buffer_iter():
log.debug('buffer.write(%r)', chunk)
buffer.write(chunk)
buffer.seek(0)
return buffer.read()
|
Read buffer out as a single stream.
.. warning::
Avoid using this function!
**Why?** This is a *convenience* function; it doesn't encourage good
memory management.
All memory required for a mesh is duplicated, and returned as a
single :class:`str`. So at best, using this function will double
the memory required for a single model.
**Instead:** Wherever possible, please use :meth:`buffer_iter`.
|
def _async_requests(urls):
session = FuturesSession(max_workers=30)
futures = [
session.get(url)
for url in urls
]
return [ future.result() for future in futures ]
|
Sends multiple non-blocking requests. Returns
a list of responses.
:param urls:
List of urls
|
def fave_dashboards_by_username(self, username):
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
|
This lets us use a user's username to pull favourite dashboards
|
def get_mesh(self, var, coords=None):
mesh = var.attrs.get('mesh')
if mesh is None:
return None
if coords is None:
coords = self.ds.coords
return coords.get(mesh, self.ds.coords.get(mesh))
|
Get the mesh variable for the given `var`
Parameters
----------
var: xarray.Variable
The data source whith the ``'mesh'`` attribute
coords: dict
The coordinates to use. If None, the coordinates of the dataset of
this decoder is used
Returns
-------
xarray.Coordinate
The mesh coordinate
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.