repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
hakku
|
hakku//article.pyfile:/article.py:function:parse_string_list/parse_string_list
|
def parse_string_list(fh):
"""Parse list of strings."""
result = []
line = fh.readline().encode('utf-8')
line = line[3:].rstrip()
result.append(line)
while fh.peek2() == ' ':
line = fh.readline().encode('utf-8')
line = line[3:].rstrip()
result.append(line)
return result
|
basic_utils
|
basic_utils//primitives.pyfile:/primitives.py:function:odd/odd
|
def odd(n: int) ->bool:
"""
Returns true if n is odd
>>> even(3)
False
"""
return n % 2 == 1
|
intake-sql-0.2.0
|
intake-sql-0.2.0//versioneer.pyfile:/versioneer.py:function:scan_setup_py/scan_setup_py
|
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open('setup.py', 'r') as f:
for line in f.readlines():
if 'import versioneer' in line:
found.add('import')
if 'versioneer.get_cmdclass()' in line:
found.add('cmdclass')
if 'versioneer.get_version()' in line:
found.add('get_version')
if 'versioneer.VCS' in line:
setters = True
if 'versioneer.versionfile_source' in line:
setters = True
if len(found) != 3:
print('')
print('Your setup.py appears to be missing some important items')
print('(but I might be wrong). Please make sure it has something')
print('roughly like the following:')
print('')
print(' import versioneer')
print(' setup( version=versioneer.get_version(),')
print(' cmdclass=versioneer.get_cmdclass(), ...)')
print('')
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print('now lives in setup.cfg, and should be removed from setup.py')
print('')
errors += 1
return errors
|
horae.search-1.0a1
|
horae.search-1.0a1//horae/search/interfaces.pyclass:IAdvancedSearchFieldProvider/fields
|
def fields():
""" Returns a list of fields to be added to the form
"""
|
pybader
|
pybader//utils.pyfile:/utils.py:function:python_format/python_format
|
def python_format(a, prec, align=''):
"""Format an array into standard form and return as a string.
args:
a: the array
prec: the precision to output the array to
align: anything that can go before the . in python formatting
"""
format_ = (f' {{:{align}.{prec}E}}' * a.shape[1] + '\n') * a.shape[0]
return format_.format(*a.flatten())
|
pyllars-1.0.3
|
pyllars-1.0.3//pyllars/physionet_utils.pyfile:/pyllars/physionet_utils.py:function:_fix_mimic_icd/_fix_mimic_icd
|
def _fix_mimic_icd(icd):
""" Add the decimal to the correct location for the ICD code
From the mimic documentation (https://mimic.physionet.org/mimictables/diagnoses_icd/):
> The code field for the ICD-9-CM Principal and Other Diagnosis Codes
> is six characters in length, with the decimal point implied between
> the third and fourth digit for all diagnosis codes other than the V
> codes. The decimal is implied for V codes between the second and third
> digit.
"""
icd = str(icd)
if len(icd) == 3:
icd = icd
else:
icd = [icd[:3], '.', icd[3:]]
icd = ''.join(icd)
return icd
|
scrapy
|
scrapy//utils/misc.pyfile:/utils/misc.py:function:create_instance/create_instance
|
def create_instance(objcls, settings, crawler, *args, **kwargs):
"""Construct a class instance using its ``from_crawler`` or
``from_settings`` constructors, if available.
At least one of ``settings`` and ``crawler`` needs to be different from
``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.
If ``crawler`` is ``None``, only the ``from_settings`` constructor will be
tried.
``*args`` and ``**kwargs`` are forwarded to the constructors.
Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
"""
if settings is None:
if crawler is None:
raise ValueError('Specify at least one of settings and crawler.')
settings = crawler.settings
if crawler and hasattr(objcls, 'from_crawler'):
return objcls.from_crawler(crawler, *args, **kwargs)
elif hasattr(objcls, 'from_settings'):
return objcls.from_settings(settings, *args, **kwargs)
else:
return objcls(*args, **kwargs)
|
craedl
|
craedl//core.pyfile:/core.py:function:to_x_bytes/to_x_bytes
|
def to_x_bytes(bytes):
"""
Take a number in bytes and return a human-readable string.
:param bytes: number in bytes
:type bytes: int
:returns: a human-readable string
"""
x_bytes = bytes
power = 0
while x_bytes >= 1000:
x_bytes = x_bytes * 0.001
power = power + 3
if power == 0:
return '%.0f bytes' % x_bytes
if power == 3:
return '%.0f kB' % x_bytes
if power == 6:
return '%.0f MB' % x_bytes
if power == 9:
return '%.0f GB' % x_bytes
if power == 12:
return '%.0f TB' % x_bytes
|
zserio
|
zserio//bitreader.pyclass:BitStreamReader/fromFile
|
@classmethod
def fromFile(cls, filename):
"""
Constructs bit stream reader from file.
:param filename: Filename to read as a bit stream.
"""
with open(filename, 'rb') as file:
return cls(file.read())
|
dimarray
|
dimarray//core/transform.pyfile:/core/transform.py:function:_interp_internal_from_weight/_interp_internal_from_weight
|
def _interp_internal_from_weight(arr, axis, left, right, lhs_idx, rhs_idx,
frac, left_idx, right_idx):
""" numpy ==> numpy """
if arr.ndim > 1:
arr = arr.swapaxes(axis, 0)
_frac = frac[(slice(None),) + (None,) * (arr.ndim - 1)]
else:
_frac = frac
vleft = arr[lhs_idx]
vright = arr[rhs_idx]
newval = vleft + _frac * (vright - vleft)
newval[left_idx] = left
newval[right_idx] = right
if arr.ndim > 1:
newval = newval.swapaxes(axis, 0)
return newval
|
SkPy-0.9.1
|
SkPy-0.9.1//skpy/msg.pyclass:SkypeMsg/italic
|
@staticmethod
def italic(s):
"""
Format text to be italic.
Args:
s (str): string to format
Returns:
str: formatted string
"""
return '<i raw_pre="_" raw_post="_">{0}</i>'.format(s)
|
sensebook
|
sensebook//_utils.pyfile:/_utils.py:function:strip_json_cruft/strip_json_cruft
|
def strip_json_cruft(text: str) ->str:
"""Removes `for(;;);` (and other cruft) that preceeds JSON responses"""
try:
return text[text.index('{'):]
except ValueError:
raise ValueError('No JSON object found: {!r}'.format(text))
|
DeltaML-commons-0.10.17
|
DeltaML-commons-0.10.17//commons/data/regresion_dataset_generator.pyclass:DatasetGenerator/generate_and_download
|
@classmethod
def generate_and_download(cls, filename, n_samples, features, target='y',
noise=10, random_state=0):
"""
Generates a random dataset for a linear regression model as a pandas dataframe and downloads it as csv.
:param filename: the name of the file created as output
:param n_samples: number of samples in the generated dataframe
:param features: the name of each feature as a list of str
:param target: the name of the target (as str)
:param noise:
:param random_state: for replication of the experiment
:return: The coefficients array
"""
df, coef = cls.generate_df(n_samples, features, target, noise, random_state
)
df.to_csv(filename, index=False, sep='\t')
return coef
|
nolds
|
nolds//measures.pyfile:/measures.py:function:lyap_r_len/lyap_r_len
|
def lyap_r_len(**kwargs):
"""
Helper function that calculates the minimum number of data points required
to use lyap_r.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_r (required: emb_dim, lag, trajectory_len and
min_tsep)
Returns:
minimum number of data points required to call lyap_r with the given
parameters
"""
min_len = (kwargs['emb_dim'] - 1) * kwargs['lag'] + 1
min_len += kwargs['trajectory_len'] - 1
min_len += kwargs['min_tsep'] * 2 + 1
return min_len
|
qmeq
|
qmeq//indexing.pyfile:/indexing.py:function:sz_to_ind/sz_to_ind
|
def sz_to_ind(sz, charge, nsingle):
"""
Converts :math:`S_{z}` to a list index.
Parameters
----------
sz : int
Value :math:`S_{z}` of a spin projection in the z direction.
charge : int
Value of the charge.
nsingle : int
Number of single particle states.
Returns
-------
int
Value of the list index corresponding to sz.
"""
szmax = min(charge, nsingle - charge)
return int((szmax + sz) / 2)
|
django-echoices-2.6.0
|
django-echoices-2.6.0//echoices/enums/enums.pyclass:EChoice/coerce
|
@classmethod
def coerce(cls, other):
"""
Return the `value` in the type of the value of this EChoice. Typically, `value` is a string. Intended use case
is to convert `other` coming from a HTML form, typically a select choice.
Parameters
----------
other : str
Returns
-------
the `other` value in the type of the value of this EChoice.
"""
return cls.__value_type_(other)
|
cron-lock-0.1.1
|
cron-lock-0.1.1//cron_lock/lock.pyfile:/cron_lock/lock.py:function:make_timestamp_period/make_timestamp_period
|
def make_timestamp_period(**kwargs):
"""
convert time to timestamp
:param kwargs: support minutes, seconds
:return: timestamp
"""
minutes = kwargs.get('minutes', 0)
seconds = kwargs.get('seconds', 0)
return minutes * 60 + seconds
|
ryo_iso
|
ryo_iso//tasks/main.pyfile:/tasks/main.py:function:task__squashfs_backup/task__squashfs_backup
|
def task__squashfs_backup():
"""
Backup files from image/squashfs
Backup files to be restored after updating image/squashfs
:actions:
- ``sudo cp image/preseed/ubuntu.seed build/backup/ubuntu.seed``
- ``sudo cp squashfs-root/etc/apt/sources.list build/backup/sources.list``
- ``sudo mv squashfs-root/etc/resolv.conf squashfs-root/etc/resolv.conf.orig``
:targets:
- build/backup/ubuntu.seed
- build/backup/sources.list
- squashfs-root/etc/resolv.conf.orig
:task_dep:
- :func:`task__mount_dev`
:uptodate:
- True
"""
return {'actions': [
'sudo cp image/preseed/ubuntu.seed build/backup/ubuntu.seed || true',
'sudo cp squashfs-root/etc/apt/sources.list build/backup/sources.list',
'sudo mv squashfs-root/etc/resolv.conf squashfs-root/etc/resolv.conf.orig'
], 'targets': ['build/backup/sources.list',
'squashfs-root/etc/resolv.conf.orig'], 'task_dep': ['_mount_dev'],
'uptodate': [True]}
|
spacepy
|
spacepy//pybats/kyoto.pyfile:/pybats/kyoto.py:function:dstfetch/dstfetch
|
def dstfetch(yrstart, mostart, yrstop, mostop):
"""
A function to fetch Kyoto Dst directly from the Kyoto WDC website.
Returns raw ascii lines.
"""
try:
import urllib.parse, urllib.request
except ImportError:
import urllib
urllib.parse = urllib
urllib.request = urllib
forminfo = {}
forminfo2 = {}
if yrstart - 2000 >= 0:
forminfo['SCent'] = 20
else:
forminfo['SCent'] = 19
forminfo['STens'] = int(('%d' % yrstart)[2])
forminfo['SYear'] = int(('%d' % yrstart)[3])
forminfo['SMonth'] = '%02i' % mostart
if yrstop - 2000 >= 0:
forminfo['ECent'] = 20
else:
forminfo['ECent'] = 19
forminfo['ETens'] = int(('%d' % yrstop)[2])
forminfo['EYear'] = int(('%d' % yrstop)[3])
forminfo['EMonth'] = '%02i' % mostop
email = urllib.parse.quote('[email protected]')
target = ('http://wdc.kugi.kyoto-u.ac.jp/cgi-bin/dstae-cgi?' +
'%s=%2i&%s=%i&%s=%i&%s=%s&%s=%2i&%s=%i&%s=%i&%s=%s&' % ('SCent',
forminfo['SCent'], 'STens', forminfo['STens'], 'SYear', forminfo[
'SYear'], 'SMonth', forminfo['SMonth'], 'ECent', forminfo['ECent'],
'ETens', forminfo['ETens'], 'EYear', forminfo['EYear'], 'EMonth',
forminfo['EMonth']))
target = (target + 'Image+Type=GIF&COLOR=COLOR&AE+Sensitivity=0' +
'&Dst+Sensitivity=0&Output=DST&Out+format=WDC&Email=' + email)
f = urllib.request.urlopen(target)
lines = f.readlines()
return lines
|
ADRpy-0.1.18
|
ADRpy-0.1.18//ADRpy/atmospheres.pyfile:/ADRpy/atmospheres.py:function:pistonpowerfactor/pistonpowerfactor
|
def pistonpowerfactor(density_kgpm3):
"""Gagg-Ferrar model. Multiply by this to get power at given density."""
sigma = density_kgpm3 / 1.225
return 1.132 * sigma - 0.132
|
ngsscriptlibrary
|
ngsscriptlibrary//parsing.pyfile:/parsing.py:function:parse_bed_to_loci/parse_bed_to_loci
|
def parse_bed_to_loci(target):
"""Read BED file and create chr:pos for each targetbase. Return list."""
loci = list()
with open(target) as f:
for line in f:
chromosome, start, end, *_ = line.split()
start = int(start)
end = int(end)
while start <= end:
locus = '{}:{}'.format(chromosome, start)
loci.append(locus)
start += 1
return loci
|
exporters-0.7.0
|
exporters-0.7.0//exporters/utils.pyfile:/exporters/utils.py:function:maybe_cast_list/maybe_cast_list
|
def maybe_cast_list(value, types):
"""
Try to coerce list values into more specific list subclasses in types.
"""
if not isinstance(value, list):
return value
if type(types) not in (list, tuple):
types = types,
for list_type in types:
if issubclass(list_type, list):
try:
return list_type(value)
except (TypeError, ValueError):
pass
return value
|
fake-blender-api-2.79-0.3.1
|
fake-blender-api-2.79-0.3.1//bpy/ops/pose.pyfile:/bpy/ops/pose.py:function:quaternions_flip/quaternions_flip
|
def quaternions_flip():
"""Flip quaternion values to achieve desired rotations, while maintaining the same orientations
"""
pass
|
relstorage
|
relstorage//adapters/interfaces.pyclass:IConnectionManager/open_for_store
|
def open_for_store(**open_args):
"""
Open and initialize a connection for storing objects.
This connection is read/write, and its view of the database
needs to be consistent for each statement, but should read a
fresh snapshot on each statement for purposes of conflict
resolution and cooperation with other store connections. It
should be opened in ``READ COMMITTED`` isolation level,
without autocommit. (Opening in ``REPEATABLE READ`` or higher,
with a single snapshot, could reduce the use of locks, but
increases the risk of serialization errors and having
transactions rollback; we could handle that by raising
``ConflictError`` and letting the application retry, but only
if we did that before ``tpc_finish``, and not all test cases
can handle that either.)
This connection will take locks on rows in the state tables,
and hold them during the commit process.
A connection opened by this method is the only type of
connection that can hold the commit lock.
:return: ``(conn, cursor)``
"""
|
zope
|
zope//index/topic/interfaces.pyclass:ITopicFilteredSet/unindex_doc
|
def unindex_doc(docid):
"""Remove an object with id 'docid' from the index."""
|
alot
|
alot//helper.pyfile:/helper.py:function:humanize_size/humanize_size
|
def humanize_size(size):
"""Create a nice human readable representation of the given number
(understood as bytes) using the "KiB" and "MiB" suffixes to indicate
kibibytes and mebibytes. A kibibyte is defined as 1024 bytes (as opposed to
a kilobyte which is 1000 bytes) and a mibibyte is 1024**2 bytes (as opposed
to a megabyte which is 1000**2 bytes).
:param size: the number to convert
:type size: int
:returns: the human readable representation of size
:rtype: str
"""
for factor, format_string in ((1, '%i'), (1024, '%iKiB'), (1024 * 1024,
'%.1fMiB')):
if size / factor < 1024:
return format_string % (size / factor)
return format_string % (size / factor)
|
zxbasic-1.9.2
|
zxbasic-1.9.2//zxbparser.pyfile:/zxbparser.py:function:p_step_expr/p_step_expr
|
def p_step_expr(p):
""" step : STEP expr
"""
p[0] = p[2]
|
mysqlbinlog2gpubsub-1.0.7
|
mysqlbinlog2gpubsub-1.0.7//mysqlbinlog2gpubsub/config.pyfile:/mysqlbinlog2gpubsub/config.py:function:_import_from_dict/_import_from_dict
|
def _import_from_dict(conf_dict):
""" Import settings from a dictionary
Args:
conf_dict (dict): settings to be imported
"""
globals().update(conf_dict)
|
check50-3.0.10
|
check50-3.0.10//check50/py.pyfile:/check50/py.py:function:append_code/append_code
|
def append_code(original, codefile):
"""Append the contents of one file to another.
:param original: name of file that will be appended to
:type original: str
:param codefile: name of file that will be appende
:type codefile: str
This function is particularly useful when one wants to replace a function
in student code with their own implementation of one. If two functions are
defined with the same name in Python, the latter definition is taken so overwriting
a function is as simple as writing it to a file and then appending it to the
student's code.
Example usage::
# Include a file containing our own implementation of a lookup function.
check50.include("lookup.py")
# Overwrite the lookup function in helpers.py with our own implementation.
check50.py.append_code("helpers.py", "lookup.py")
"""
with open(codefile) as code, open(original, 'a') as o:
o.write('\n')
o.writelines(code)
|
metaflow-2.0.5
|
metaflow-2.0.5//metaflow/datastore/datastore.pyclass:MetaflowDataStore/get_artifacts
|
@classmethod
def get_artifacts(cls, artifacts_to_prefetch):
"""
Return a list of (sha, obj_blob) for all the object_path(s) specified in
`artifacts_to_prefetch`.
"""
raise NotImplementedError()
|
matplotlib2tikz-0.7.6
|
matplotlib2tikz-0.7.6//matplotlib2tikz/save.pyfile:/matplotlib2tikz/save.py:function:_tex_comment/_tex_comment
|
def _tex_comment(comment):
"""Prepends each line in string with the LaTeX comment key, '%'.
"""
return '% ' + str.replace(comment, '\n', '\n% ') + '\n'
|
coremltools
|
coremltools//converters/keras/_utils.pyfile:/converters/keras/_utils.py:function:raise_error_unsupported_categorical_option/raise_error_unsupported_categorical_option
|
def raise_error_unsupported_categorical_option(option_name, option_value,
layer_type, layer_name):
"""
Raise an error if an option is not supported.
"""
raise RuntimeError('Unsupported option %s=%s in layer %s(%s)' % (
option_name, option_value, layer_type, layer_name))
|
tcex-2.0.4
|
tcex-2.0.4//tcex/threat_intelligence/tcex_ti_tc_request.pyclass:TiTcRequest/is_false
|
@staticmethod
def is_false(value):
"""checks to see if a string is False"""
if not value:
return False
value = str(value)
return value.lower() in ['false', '0', 'f', 'n', 'no']
|
mxnet-1.6.0.data
|
mxnet-1.6.0.data//purelib/mxnet/symbol/gen_sparse.pyfile:/purelib/mxnet/symbol/gen_sparse.py:function:square/square
|
def square(data=None, name=None, attr=None, out=None, **kwargs):
"""Returns element-wise squared value of the input.
.. math::
square(x) = x^2
Example::
square([2, 3, 4]) = [4, 9, 16]
The storage type of ``square`` output depends upon the input storage type:
- square(default) = default
- square(row_sparse) = row_sparse
- square(csr) = csr
Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L118
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return 0,
|
Diofant-0.11.0
|
Diofant-0.11.0//diofant/polys/rootisolation.pyfile:/diofant/polys/rootisolation.py:function:_rectangle_small_p/_rectangle_small_p
|
def _rectangle_small_p(a, b, eps):
"""Return ``True`` if the given rectangle is small enough."""
(u, v), (s, t) = a, b
if eps is not None:
return s - u < eps and t - v < eps
else:
return True
|
haoda
|
haoda//util.pyfile:/util.py:function:get_suitable_int_type/get_suitable_int_type
|
def get_suitable_int_type(upper: int, lower: int=0) ->str:
"""Returns the suitable integer type with the least bits.
Returns the integer type that can hold all values between max_val and min_val
(inclusive) and has the least bits.
Args:
max_val: Maximum value that needs to be valid.
min_val: Minimum value that needs to be valid.
Returns:
The suitable type.
"""
assert upper >= lower
upper = max(upper, 0)
lower = min(lower, 0)
if lower == 0:
return 'uint%d' % upper.bit_length()
return 'int%d' % (max(upper.bit_length(), (lower + 1).bit_length()) + 1)
|
iapws-1.4.1
|
iapws-1.4.1//iapws/iapws97.pyfile:/iapws/iapws97.py:function:_h13_s/_h13_s
|
def _h13_s(s):
"""Define the boundary between Region 1 and 3, h=f(s)
Parameters
----------
s : float
Specific entropy, [kJ/kgK]
Returns
-------
h : float
Specific enthalpy, [kJ/kg]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* s(100MPa,623.15K) ≤ s ≤ s'(623.15K)
References
----------
IAPWS, Revised Supplementary Release on Backward Equations p(h,s) for
Region 3, Equations as a Function of h and s for the Region Boundaries, and
an Equation Tsat(h,s) for Region 4 of the IAPWS Industrial Formulation 1997
for the Thermodynamic Properties of Water and Steam,
http://www.iapws.org/relguide/Supp-phs3-2014.pdf. Eq 7
Examples
--------
>>> _h13_s(3.7)
1632.525047
>>> _h13_s(3.5)
1566.104611
"""
if s < 3.397782955 or s > 3.77828134:
raise NotImplementedError('Incoming out of bound')
sigma = s / 3.8
I = [0, 1, 1, 3, 5, 6]
J = [0, -2, 2, -12, -4, -3]
n = [0.913965547600543, -4.30944856041991e-05, 60.3235694765419,
1.17518273082168e-18, 0.220000904781292, -69.0815545851641]
suma = 0
for i, j, ni in zip(I, J, n):
suma += ni * (sigma - 0.884) ** i * (sigma - 0.864) ** j
return 1700 * suma
|
gfutilities-0.8.4
|
gfutilities-0.8.4//gfutilities/service/websocket.pyfile:/gfutilities/service/websocket.py:function:_byte_to_int/_byte_to_int
|
def _byte_to_int(data: bytes) ->int:
"""
Returns integer value of big endian byte data
:param data:
:return: Integer value
:rtype: int
"""
return ord(data[0:1]) + ord(data[1:2]) * 256 + ord(data[2:3]
) * 65536 + ord(data[3:4]) * 16777216
|
eclcli-1.3.5
|
eclcli-1.3.5//eclcli/storage/storageclient/common/apiclient/base.pyclass:HookableMixin/add_hook
|
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
|
pylrc-0.1.0
|
pylrc-0.1.0//pylrc/utilities.pyfile:/pylrc/utilities.py:function:findEvenSplit/findEvenSplit
|
def findEvenSplit(line):
"""
Given a string, splits it into two evenly spaced lines
"""
word_list = line.split(' ')
differences = []
for i in range(len(word_list)):
group1 = ' '.join(word_list[0:i + 1])
group2 = ' '.join(word_list[i + 1:])
differences.append(abs(len(group1) - len(group2)))
index = differences.index(min(differences))
for i in range(len(word_list)):
if i == index:
group1 = ' '.join(word_list[0:i + 1])
group2 = ' '.join(word_list[i + 1:])
return ''.join([group1, '\n', group2]).rstrip()
|
dockering
|
dockering//config_building.pyfile:/config_building.py:function:add_host_config_params_dns/add_host_config_params_dns
|
def add_host_config_params_dns(docker_host, host_config_params=None):
"""Add dns input params
This is not a generic implementation. This method will setup dns with the
expectation that a local consul agent is running on the docker host and will
service the dns requests.
Args:
-----
docker_host (string): Docker host ip address which will be used as the dns server
host_config_params (dict): Target dict to accumulate host config inputs
Returns:
--------
Updated host_config_params
"""
if host_config_params == None:
host_config_params = {}
host_config_params['dns'] = [docker_host]
host_config_params['dns_search'] = ['service.consul']
host_config_params['extra_hosts'] = {'consul': docker_host}
return host_config_params
|
noworkflow
|
noworkflow//now/persistence/models/base.pyclass:MetaModel/set_instances_default
|
def set_instances_default(cls, attr, value):
"""Set DEFAULT attribute for instances of classes created
by this metaclass
Arguments:
attr -- attribute name
value -- new attribute value
"""
for instance in cls.get_instances():
instance.set_instance_attr(attr, value)
|
webviz-config-0.0.56
|
webviz-config-0.0.56//webviz_config/webviz_store.pyclass:WebvizStorage/_dict_to_tuples
|
@staticmethod
def _dict_to_tuples(dictionary: dict) ->tuple:
"""Since dictionaries are not hashable, this is a helper function
converting a dictionary into a sorted tuple."""
return tuple(sorted(dictionary.items()))
|
bareutils
|
bareutils//response_code.pyfile:/response_code.py:function:is_server_error/is_server_error
|
def is_server_error(code: int) ->bool:
"""Return true if the code is a server error HTTP response code.
Args:
code (int): The HTTP response code.
Returns:
bool: True if the code was a server error else false.
"""
return code >= 500 and code < 600
|
hutch-python-1.0.2
|
hutch-python-1.0.2//versioneer.pyfile:/versioneer.py:function:render_pep440_pre/render_pep440_pre
|
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '.post.dev%d' % pieces['distance']
else:
rendered = '0.post.dev%d' % pieces['distance']
return rendered
|
dropbox-10.1.2
|
dropbox-10.1.2//dropbox/team.pyclass:GroupMembersAddError/members_not_in_team
|
@classmethod
def members_not_in_team(cls, val):
"""
Create an instance of this class set to the ``members_not_in_team`` tag
with value ``val``.
:param list of [str] val:
:rtype: GroupMembersAddError
"""
return cls('members_not_in_team', val)
|
pyxsim
|
pyxsim//_version.pyfile:/_version.py:function:render_pep440_old/render_pep440_old
|
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
colony
|
colony//libs/object_util.pyfile:/libs/object_util.py:function:__object_has_attr/__object_has_attr
|
def __object_has_attr(instance, attribute_name):
"""
Checks if an attribute with the given name
exists in the given instance.
This method provides an additional layer of abstraction
that allows it to be used in objects or in maps.
:type instance: Object
:param instance: The instance to be checked
for attribute.
:type attribute_name: String
:param attribute_name: The name of the attribute
to be checked in the instance.
:rtype: bool
:return: The result of the has attribute testing
in the instance.
"""
instance_type = type(instance)
if instance_type == dict:
return attribute_name in instance
else:
return hasattr(instance, attribute_name)
|
pyang
|
pyang//translators/schemanode.pyclass:SchemaNode/leaf_list
|
@classmethod
def leaf_list(cls, name, parent=None, interleave=None):
"""Create _list_ node for a leaf-list."""
node = cls('_list_', parent, interleave=interleave)
node.attr['name'] = name
node.keys = None
node.minEl = '0'
node.maxEl = None
node.occur = 3
return node
|
bandwagon-0.3.0
|
bandwagon-0.3.0//bandwagon/tools.pyfile:/bandwagon/tools.py:function:set_record_topology/set_record_topology
|
def set_record_topology(record, topology, pass_if_already_set=False):
"""Set record.annotations['topology'] (if not already set?)"""
record_topology = record.annotations.get('topology', None)
do_nothing = pass_if_already_set and record_topology is not None
if not do_nothing:
record.annotations['topology'] = topology
|
textblob_fr
|
textblob_fr//_text.pyfile:/_text.py:function:_suffix_rules/_suffix_rules
|
def _suffix_rules(token, **kwargs):
""" Default morphological tagging rules for English, based on word suffixes.
"""
word, pos = token
if word.endswith('ing'):
pos = 'VBG'
if word.endswith('ly'):
pos = 'RB'
if word.endswith('s') and not word.endswith(('is', 'ous', 'ss')):
pos = 'NNS'
if word.endswith(('able', 'al', 'ful', 'ible', 'ient', 'ish', 'ive',
'less', 'tic', 'ous')) or '-' in word:
pos = 'JJ'
if word.endswith('ed'):
pos = 'VBN'
if word.endswith(('ate', 'ify', 'ise', 'ize')):
pos = 'VBP'
return [word, pos]
|
tpDcc-core-0.0.11
|
tpDcc-core-0.0.11//tpDcc/register.pyfile:/tpDcc/register.py:function:register_class/register_class
|
def register_class(cls_name, cls, is_unique=False):
"""
This function registers given class into tpRigToolkit module
:param cls_name: str, name of the class we want to register
:param cls: class, class we want to register
:param is_unique: bool, Whether if the class should be updated if new class is registered with the same name
"""
import tpDcc
if is_unique:
if cls_name in tpDcc.__dict__:
setattr(tpDcc.__dict__, cls_name, getattr(tpDcc.__dict__, cls_name)
)
else:
tpDcc.__dict__[cls_name] = cls
|
debarcer
|
debarcer//src/generate_consensus.pyfile:/src/generate_consensus.py:function:find_closest/find_closest
|
def find_closest(pos, L):
"""
(int, list) -> tuple
:param pos: Position of interest along chromosome 0-based
:param L: List of (positions, counts)
Returns a tuple (i, k) corresponding to the closest position i from pos
and the highest count k if multiple counts exist for the smallest distance
between pos and i
"""
D = {}
for i in L:
dist = abs(pos - i[0])
if dist in D:
D[dist].append((i[1], i[0]))
else:
D[dist] = [(i[1], i[0])]
for i in D:
D[i].sort(key=lambda x: x[0])
distances = sorted(D.keys())
smallest_dist = distances[0]
return smallest_dist, D[smallest_dist][-1][0], D[smallest_dist][-1][1]
|
ipython-7.14.0
|
ipython-7.14.0//IPython/core/display.pyfile:/IPython/core/display.py:function:publish_display_data/publish_display_data
|
def publish_display_data(data, metadata=None, source=None, *, transient=
None, **kwargs):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
Keys of data and metadata can be any mime-type.
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
source : str, deprecated
Unused.
transient : dict, keyword-only
A dictionary of transient data, such as display_id.
"""
from IPython.core.interactiveshell import InteractiveShell
display_pub = InteractiveShell.instance().display_pub
if transient:
kwargs['transient'] = transient
display_pub.publish(data=data, metadata=metadata, **kwargs)
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/swf.pyfile:/pyboto3/swf.py:function:count_open_workflow_executions/count_open_workflow_executions
|
def count_open_workflow_executions(domain=None, startTimeFilter=None,
typeFilter=None, tagFilter=None, executionFilter=None):
"""
Returns the number of open workflow executions within the given domain that meet the specified filtering criteria.
Access Control
You can use IAM policies to control this action's access to Amazon SWF resources as follows:
If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows .
See also: AWS API Documentation
:example: response = client.count_open_workflow_executions(
domain='string',
startTimeFilter={
'oldestDate': datetime(2015, 1, 1),
'latestDate': datetime(2015, 1, 1)
},
typeFilter={
'name': 'string',
'version': 'string'
},
tagFilter={
'tag': 'string'
},
executionFilter={
'workflowId': 'string'
}
)
:type domain: string
:param domain: [REQUIRED]
The name of the domain containing the workflow executions to count.
:type startTimeFilter: dict
:param startTimeFilter: [REQUIRED]
Specifies the start time criteria that workflow executions must meet in order to be counted.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
:type typeFilter: dict
:param typeFilter: Specifies the type of the workflow executions to be counted.
Note
executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
name (string) -- [REQUIRED]Required. Name of the workflow type.
version (string) --Version of the workflow type.
:type tagFilter: dict
:param tagFilter: If specified, only executions that have a tag that matches the filter are counted.
Note
executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria.
:type executionFilter: dict
:param executionFilter: If specified, only workflow executions matching the WorkflowId in the filter are counted.
Note
executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter.
:rtype: dict
:return: {
'count': 123,
'truncated': True|False
}
:returns:
domain (string) -- [REQUIRED]
The name of the domain containing the workflow executions to count.
startTimeFilter (dict) -- [REQUIRED]
Specifies the start time criteria that workflow executions must meet in order to be counted.
oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return.
latestDate (datetime) --Specifies the latest start or close date and time to return.
typeFilter (dict) -- Specifies the type of the workflow executions to be counted.
Note
executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
name (string) -- [REQUIRED]Required. Name of the workflow type.
version (string) --Version of the workflow type.
tagFilter (dict) -- If specified, only executions that have a tag that matches the filter are counted.
Note
executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria.
executionFilter (dict) -- If specified, only workflow executions matching the WorkflowId in the filter are counted.
Note
executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request.
workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter.
"""
pass
|
cobra_utils
|
cobra_utils//query/get_ids.pyfile:/query/get_ids.py:function:get_gene_ids/get_gene_ids
|
def get_gene_ids(model):
"""
This function returns a list of IDs of all genes in the model.
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
Returns
-------
genes : list
A list containing all IDs of genes in the model.
"""
genes = []
for gene in model.genes:
genes.append(gene.id)
genes = list(set(genes))
return genes
|
conficus
|
conficus//coerce.pyfile:/coerce.py:function:coerce_single_line_str/coerce_single_line_str
|
def coerce_single_line_str(value):
"""
Multiline strings have two options:
1. Preserve new lines with the back slash:
value = ""\"A new value and something else to boot.
""\"
A new value
and something else
to boot.
2. Preserve left spacing with the pipe:
value = ""\"A new value | it's true.""\"
A new value
it's true.
"""
|
pycostanza-0.3.1
|
pycostanza-0.3.1//pycostanza/misc.pyfile:/pycostanza/misc.py:function:mode/mode
|
def mode(list_):
""" Return the mode of a list. """
return max(set(list_), key=list(list_).count)
|
tpDcc-libs-python-0.0.6
|
tpDcc-libs-python-0.0.6//tpDcc/libs/python/python.pyfile:/tpDcc/libs/python/python.py:function:to_3_list/to_3_list
|
def to_3_list(item):
"""
Converts item into a 3 item list
:param item: var
:return: list<var, var, var>
"""
if not isinstance(item, list):
item = [item] * 3
return item
|
aistac
|
aistac//components/abstract_component.pyclass:AbstractComponent/scratch_pad
|
@classmethod
def scratch_pad(cls):
""" A class method to use the Components intent methods as a scratch pad"""
return cls.from_env(task_name='scratch_pad', default_save=False,
default_save_intent=False, reset_templates=False, align_connectors=
False).intent_model
|
Sextant-2.0
|
Sextant-2.0//src/sextant/db_api.pyfile:/src/sextant/db_api.py:function:set_common_cutoff/set_common_cutoff
|
def set_common_cutoff(common_def):
"""
Sets the number of incoming connections at which we deem a function 'common'
Default is 10 (which is used if this method is never called).
:param common_def: number of incoming connections
"""
global COMMON_CUTOFF
COMMON_CUTOFF = common_def
|
fpl
|
fpl//utils.pyfile:/utils.py:function:short_name_converter/short_name_converter
|
def short_name_converter(team_id):
"""Converts a team's ID to their short name."""
short_name_map = {(1): 'ARS', (2): 'AVL', (3): 'BOU', (4): 'BHA', (5):
'BUR', (6): 'CHE', (7): 'CRY', (8): 'EVE', (9): 'LEI', (10): 'LIV',
(11): 'MCI', (12): 'MUN', (13): 'NEW', (14): 'NOR', (15): 'SHU', (
16): 'SOU', (17): 'TOT', (18): 'WAT', (19): 'WHU', (20): 'WOL',
None: None}
return short_name_map[team_id]
|
fake-blender-api-2.79-0.3.1
|
fake-blender-api-2.79-0.3.1//bpy/ops/nla.pyfile:/bpy/ops/nla.py:function:delete/delete
|
def delete():
"""Delete selected strips
"""
pass
|
ii_game
|
ii_game//scripts/auto_load.pyfile:/scripts/auto_load.py:function:remove_extension/remove_extension
|
def remove_extension(filename):
"""Remove the extension from a filename string"""
return filename[:filename.index('.')]
|
etcdb-1.7.0
|
etcdb-1.7.0//etcdb/sqlparser/parser.pyfile:/etcdb/sqlparser/parser.py:function:p_opt_column_def_options_list/p_opt_column_def_options_list
|
def p_opt_column_def_options_list(p):
"""opt_column_def_options_list : opt_column_def_options opt_column_def_options_list"""
if p[2] is None:
p[0] = p[1]
else:
p[2].update(p[1])
p[0] = p[2]
|
uwsgiconf
|
uwsgiconf//uwsgi_stub.pyfile:/uwsgi_stub.py:function:websocket_handshake/websocket_handshake
|
def websocket_handshake(security_key=None, origin=None, proto=None):
"""Waits for websocket handshake.
:param str|unicode security_key: Websocket security key to use.
:param str|unicode origin: Override ``Sec-WebSocket-Origin``.
:param str|unicode proto: Override ``Sec-WebSocket-Protocol``.
:rtype: None
:raises IOError: If unable to complete handshake.
"""
|
novaposhta-api-client-0.2.3
|
novaposhta-api-client-0.2.3//novaposhta/models.pyclass:Counterparty/get_counterparty_by_edrpou
|
@classmethod
def get_counterparty_by_edrpou(cls, city_ref, code):
"""
Method for fetching info about counterparty by `EDRPOU` - National State Registry
of Ukrainian Enterprises and Organizations (8-digit code).
:example:
``Counterparty.get_counterparty_by_edrpou(city_ref='0006560c-4079-11de-b509-001d92f78698', code='12345678')``
:param city_ref:
ID of the city of counterparty
:type city_ref:
str or unicode
:param code:
EDRPOU code of the counterparty
:type code:
str or unicode
:return:
dictionary with info about counterparty
:rtype:
dict
"""
return cls.send(method='getCounterpartyByEDRPOU', method_props={
'CityRef': city_ref, 'EDRPOU': code})
|
pynq-2.5.2
|
pynq-2.5.2//pynq/lib/arduino/arduino_grove_imu.pyfile:/pynq/lib/arduino/arduino_grove_imu.py:function:_reg2float/_reg2float
|
def _reg2float(reg):
"""Converts 32-bit register value to floats in Python.
Parameters
----------
reg: int
A 32-bit register value read from the mailbox.
Returns
-------
float
A float number translated from the register value.
"""
if reg == 0:
return 0.0
sign = (reg & 2147483648) >> 31 & 1
exp = ((reg & 2139095040) >> 23) - 127
if exp == 0:
man = (reg & 8388607) / pow(2, 23)
else:
man = 1 + (reg & 8388607) / pow(2, 23)
result = pow(2, exp) * man * (sign * -2 + 1)
return float('{0:.2f}'.format(result))
|
dlispy
|
dlispy//common.pyfile:/common.py:function:readBytes/readBytes
|
def readBytes(fs, n):
"""Reads exactly n bytes and returns them."""
r = fs.read(n)
if len(r) != n:
raise Exception('Can not read {} bytes only {} left'.format(n, len(r)))
return r
|
cstarmigrate
|
cstarmigrate//config.pyfile:/config.py:function:_assert_type/_assert_type
|
def _assert_type(data, key, tpe, default=None):
"""Extract and verify if a key in a dictionary has a given type."""
value = data.get(key, default)
if not isinstance(value, tpe):
raise ValueError(
f'Config error: {key}: expected {tpe}, found {type(value)}')
return value
|
secrets-guard-0.14
|
secrets-guard-0.14//secrets_guard/utils.pyfile:/secrets_guard/utils.py:function:is_list/is_list
|
def is_list(l):
"""
Returns whether the object is a list.
:param l: the object
:return: whether is a list
"""
return isinstance(l, list)
|
mmlspark
|
mmlspark//cognitive/GenerateThumbnails.pyclass:GenerateThumbnails/getJavaPackage
|
@staticmethod
def getJavaPackage():
""" Returns package name String. """
return 'com.microsoft.ml.spark.cognitive.GenerateThumbnails'
|
fake-blender-api-2.79-0.3.1
|
fake-blender-api-2.79-0.3.1//bpy/ops/wm.pyfile:/bpy/ops/wm.py:function:keymap_restore/keymap_restore
|
def keymap_restore(all: bool=False):
"""Restore key map(s)
:param all: All Keymaps, Restore all keymaps to default
:type all: bool
"""
pass
|
baka_tenshi
|
baka_tenshi//schema.pyclass:References/__declare_first__
|
@classmethod
def __declare_first__(cls):
"""declarative hook called within the 'before_configure' mapper event."""
for lcl, rmt in cls._to_ref:
cls._decl_class_registry[lcl]._reference_table(cls.
_decl_class_registry[rmt].__table__)
cls._to_ref.clear()
|
allegedb-0.15.1
|
allegedb-0.15.1//allegedb/cache.pyfile:/allegedb/cache.py:function:lru_append/lru_append
|
def lru_append(kc, lru, kckey, maxsize):
"""Delete old data from ``kc``, then add the new ``kckey``.
:param kc: a three-layer keycache
:param lru: an :class:`OrderedDict` with a key for each triple that should fill out ``kc``'s three layers
:param kckey: a triple that indexes into ``kc``, which will be added to ``lru`` if needed
:param maxsize: maximum number of entries in ``lru`` and, therefore, ``kc``
"""
if kckey in lru:
return
while len(lru) >= maxsize:
(peb, turn, tick), _ = lru.popitem(False)
if peb not in kc:
continue
kcpeb = kc[peb]
if turn not in kcpeb:
continue
kcpebturn = kcpeb[turn]
if tick not in kcpebturn:
continue
del kcpebturn[tick]
if not kcpebturn:
del kcpeb[turn]
if not kcpeb:
del kc[peb]
lru[kckey] = True
|
nbsite-0.6.7.data
|
nbsite-0.6.7.data//scripts/nbsite_generate_modules.pyfile:/scripts/nbsite_generate_modules.py:function:makename/makename
|
def makename(package, module):
"""Join package and module with a dot."""
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
|
APScheduler-3.6.3
|
APScheduler-3.6.3//apscheduler/util.pyfile:/apscheduler/util.py:function:asbool/asbool
|
def asbool(obj):
"""
Interprets an object as a boolean value.
:rtype: bool
"""
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in ('true', 'yes', 'on', 'y', 't', '1'):
return True
if obj in ('false', 'no', 'off', 'n', 'f', '0'):
return False
raise ValueError('Unable to interpret value "%s" as boolean' % obj)
return bool(obj)
|
sequana-0.8.3
|
sequana-0.8.3//sequana/resources/canvas/bar.pyfile:/sequana/resources/canvas/bar.py:function:stacked_bar/stacked_bar
|
def stacked_bar(tag, title, datalist):
"""
data list should be a list of dictionary formatted as follows
{"name": "A"
"data": {
"R1_mapped": 50,
"R2_mapped": 50,
"R1_unmapped": 50,
"R2_unmapped": 50,
}
}
"""
dataitems = ''
for item in datalist:
datatext = []
for k, v in item['data'].items():
datatext.append('{y:%s,label:"%s"}' % (v, k))
datatext = ',\n '.join(datatext)
params = {'name': item['name'], 'datatext': datatext}
dataitems += (
"""
{
type: "stackedBar100",
showInLegend: true,
name: "%(name)s",
dataPoints: [
%(datatext)s
]
},
"""
% params)
metadata = {'tag': tag, 'title': title, 'dataitems': dataitems}
script = """
<script type="text/javascript">
window.onload = function () {
var chart = new CanvasJS.Chart("chartContainer%(tag)s",
{
theme: "theme2",
title:{
text: "%(title)s"
},
animationEnabled: true,
axisY:{
title: "percent"
},
legend :{
horizontalAlign: 'center',
verticalAlign: 'bottom'
},
toolTip: {
shared: true
},
data:[
%(dataitems)s
]
});
chart.render();
}
</script>
"""
return script % metadata
|
bx
|
bx//cookbook/attribute.pyfile:/cookbook/attribute.py:function:mangle/mangle
|
def mangle(classname, attrname):
"""mangles name according to python name-mangling
conventions for private variables"""
return '_%s__%s' % (classname, attrname)
|
cellsystem
|
cellsystem//cellsystem.pyclass:SimpleCells/_init_daughter
|
@staticmethod
def _init_daughter(cell):
"""Add a new daughter of the cell in an appropriate site."""
daughter = cell.new_daughter()
site = cell.site
daughter.add_to(site.random_neighbor())
return daughter
|
arch
|
arch//standalone/utils/cloudpickle.pyfile:/standalone/utils/cloudpickle.py:function:instance/instance
|
def instance(cls):
"""Create a new instance of a class.
Parameters
----------
cls : type
The class to create an instance of.
Returns
-------
instance : cls
A new instance of ``cls``.
"""
return cls()
|
zope.formlib-4.7.1
|
zope.formlib-4.7.1//src/zope/formlib/interfaces.pyclass:IAddFormCustomization/add
|
def add(object):
"""Add an object to the context. Returns the added object.
"""
|
fake-bpy-module-2.78-20200428
|
fake-bpy-module-2.78-20200428//bpy/ops/object.pyfile:/bpy/ops/object.py:function:multires_higher_levels_delete/multires_higher_levels_delete
|
def multires_higher_levels_delete(modifier: str=''):
"""Deletes the higher resolution mesh, potential loss of detail
:param modifier: Modifier, Name of the modifier to edit
:type modifier: str
"""
pass
|
finex
|
finex//ratios.pyfile:/ratios.py:function:book_value_per_share/book_value_per_share
|
def book_value_per_share(book_value, total_shares):
"""Computes book value per share.
@params
-------
book_value: book value (equity) of an enterprice
total_shares: total number of shares
returns
-------
Book value per share, int or float
explanation
-----------
Book value per share is often used as a valuation measure for companies. If it is less than the
market price of a stock, the stock is considered as overvalued and if more than market price, stock is
considered as undervalued.
"""
return book_value / total_shares
|
collective.lead-1.0
|
collective.lead-1.0//collective/lead/interfaces.pyclass:IConfigurableDatabase/_setup_mappers
|
def _setup_mappers(tables, mappers):
"""Given a dict of tables, keyed by table name as in self.tables,
set up all SQLAlchemy mappers for the database and save them to the
dict 'mappers', keyed by table name..
"""
|
ptdc
|
ptdc//support.pyfile:/support.py:function:get_attribute/get_attribute
|
def get_attribute(obj, attr_name):
"""
Retrieve the attribute of a specific object
:param obj: object from which get the value
:param attr_name: name of the attribute
:return: value of the attribute
"""
return getattr(obj, attr_name)
|
decore-0.0.1
|
decore-0.0.1//versioneer.pyfile:/versioneer.py:function:scan_setup_py/scan_setup_py
|
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open('setup.py', 'r') as f:
for line in f.readlines():
if 'import versioneer' in line:
found.add('import')
if 'versioneer.get_cmdclass()' in line:
found.add('cmdclass')
if 'versioneer.get_version()' in line:
found.add('get_version')
if 'versioneer.VCS' in line:
setters = True
if 'versioneer.versionfile_source' in line:
setters = True
if len(found) != 3:
print('')
print('Your setup.py appears to be missing some important items')
print('(but I might be wrong). Please make sure it has something')
print('roughly like the following:')
print('')
print(' import versioneer')
print(' setup( version=versioneer.get_version(),')
print(' cmdclass=versioneer.get_cmdclass(), ...)')
print('')
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print('now lives in setup.cfg, and should be removed from setup.py')
print('')
errors += 1
return errors
|
LaueTools-3.0.0.34
|
LaueTools-3.0.0.34//LaueTools/IOLaueTools.pyfile:/LaueTools/IOLaueTools.py:function:readfitfile_comments/readfitfile_comments
|
def readfitfile_comments(fitfilepath):
"""read comments and return corresponding strings
#CCDLabel
#pixelsize
#Frame dimensions
#DetectorParameters
"""
dictcomments = {}
ccdlabelflag = False
pixelsizeflag = False
framedimflag = False
detectorflag = False
f = open(fitfilepath, 'r')
nblines = 0
for line in f.readlines():
nblines += 1
f.seek(0)
lineindex = 0
while lineindex < nblines:
line = f.readline()
if ccdlabelflag:
dictcomments['CCDLabel'] = line.split('#')[1].strip()
ccdlabelflag = False
if pixelsizeflag:
dictcomments['pixelsize'] = line.split('#')[1].strip()
pixelsizeflag = False
if framedimflag:
dictcomments['framedim'] = line.split('#')[1].strip()
framedimflag = False
if detectorflag:
dictcomments['detectorparameters'] = line.split('#')[1].strip()
detectorflag = False
if line.startswith(('#CCDLabel', '# CCDLabel')):
ccdlabelflag = True
if line.startswith(('#pixelsize', '# pixelsize')):
pixelsizeflag = True
if line.startswith(('#Frame dimensions', '# Frame dimensions')):
framedimflag = True
if line.startswith(('#DetectorParameters', '# DetectorParameters')):
detectorflag = True
lineindex += 1
f.close()
return dictcomments
|
autoflpy-1.1.8
|
autoflpy-1.1.8//autoflpy/util/plotting.pyfile:/autoflpy/util/plotting.py:function:select_plot_data_single/select_plot_data_single
|
def select_plot_data_single(values_list, plot_information, number_of_flights):
"""
List of data to plot returns values_list which has structure:
[[[axis, [data_source, column], [axis, [data_source, column]],
[[axis, [data_source, column], [axis, [data_source, column]]]"""
plot_data = []
for data_set in range(number_of_flights):
plot_data_temp = []
for data in plot_information:
values_list_index = 0
for values_list_data in values_list[data_set]:
if data[2].lower() == values_list_data[0].lower():
data.append(values_list_index)
for column in values_list[data_set][values_list_index][1:]:
if column[0].lower() == data[1].lower():
plot_data_temp.append([data[0], column, data[2]])
break
values_list_index += 1
plot_data.append(plot_data_temp)
return plot_data
|
typed_ast
|
typed_ast//ast3.pyfile:/ast3.py:function:iter_fields/iter_fields
|
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
|
punters_client
|
punters_client//html_utils.pyfile:/html_utils.py:function:try_parse/try_parse
|
def try_parse(value, parser, default=None):
"""Try to parse value using parser, returning default if the parse fails"""
try:
return parser(value)
except (TypeError, ValueError):
return default
|
stoqlib
|
stoqlib//domain/payment/card.pyclass:CreditProvider/has_card_provider
|
@classmethod
def has_card_provider(cls, store):
"""Find out if there is a card provider
:param store: a database store
:returns: if there is a card provider
"""
return bool(store.find(cls).count())
|
testfm
|
testfm//upgrade.pyclass:Upgrade/list_versions
|
@classmethod
def list_versions(cls, options=None):
"""Build foreman-maintain upgrade list-versions"""
cls.command_sub = 'list-versions'
if options is None:
options = {}
result = cls._construct_command(options)
return result
|
aiida_nwchem
|
aiida_nwchem//tools/dbexporters/tcod_plugins/nwcpymatgen.pyclass:NwcpymatgenTcodtranslator/get_software_package_compilation_timestamp
|
@classmethod
def get_software_package_compilation_timestamp(cls, calc, **kwargs):
"""
Returns the timestamp of package/program compilation in ISO 8601
format.
"""
from dateutil.parser import parse
try:
date = calc.out.job_info.get_dict()['compiled']
return parse(date.replace('_', ' ')).isoformat()
except Exception:
return None
|
pce
|
pce//util4pce.pyfile:/util4pce.py:function:human_readable_time/human_readable_time
|
def human_readable_time(time):
"""
HUMAN_READABLE_TIME convert a time ns, us, ms depending on its value
Paramaters
----------
time (float) :
time value
Returns
-------
time (float):
time value converted
units (str):
unit of measurement
AUTHOR: Luca Giaccone ([email protected])
DATE: 11.08.2018
HISTORY:
"""
if time < 1e-09:
time = time * 1000000.0
unit = 'ns'
elif time < 1e-06:
time = time * 1000000.0
unit = 'us'
elif time < 0.001:
time = time * 1000.0
unit = 'ms'
else:
unit = 's'
return time, unit
|
pytest_bug
|
pytest_bug//hooks.pyfile:/hooks.py:function:pytest_bug_item_mark/pytest_bug_item_mark
|
def pytest_bug_item_mark(item, config):
"""
Called after set mark
:param item: pytest item
:param config: Base pytest config
"""
|
teensytoany-0.0.21
|
teensytoany-0.0.21//versioneer.pyfile:/versioneer.py:function:render_git_describe_long/render_git_describe_long
|
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
pdfminer
|
pdfminer//utils.pyfile:/utils.py:function:fsplit/fsplit
|
def fsplit(pred, objs):
"""Split a list into two classes according to the predicate."""
t = []
f = []
for obj in objs:
if pred(obj):
t.append(obj)
else:
f.append(obj)
return t, f
|
digital_rf-2.6.3
|
digital_rf-2.6.3//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old
|
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
tadtool-0.81
|
tadtool-0.81//tadtool/tad.pyfile:/tadtool/tad.py:function:_border_type/_border_type
|
def _border_type(i, values):
"""
Returns border type:
:param i: index of potential border
:param values: insulation index values
:return: if border: 1 or -1, else 0. 1 if derivative at border is >0, -1 otherwise
"""
if i == 0:
return 1
if i == len(values) - 1:
return -1
if values[i - 1] <= 0 <= values[i + 1]:
return 1
if values[i + 1] <= 0 <= values[i - 1]:
return -1
return 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.