repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
watchmaker
|
watchmaker//static/salt/formulas/ash-windows-formula/_modules/win_lgpo.pyclass:_policy_info/_strip_quotes
|
@classmethod
def _strip_quotes(cls, val, **kwargs):
"""
strips quotes from a string
"""
return val.replace('"', '')
|
vpcrouter-1.8.11
|
vpcrouter-1.8.11//vpcrouter/monitor/common.pyclass:MonitorPlugin/check_arguments
|
@classmethod
def check_arguments(cls, conf):
"""
Callback to perform sanity checking for the plugin's specific
parameters.
Should raise exception in case of error.
"""
return
|
thug-1.6.1
|
thug-1.6.1//thug/ThugAPI/IThugAPI.pyclass:IThugAPI/set_file_logging
|
def set_file_logging():
"""
set_file_logging
Enable file logging mode
@return: None
"""
|
pya2l-0.0.1
|
pya2l-0.0.1//pya2l/parser/grammar/parser.pyclass:A2lParser/p_ref_measurement_optional
|
@staticmethod
def p_ref_measurement_optional(p):
"""ref_measurement_optional : identifier"""
p[0] = p.slice[1].type, p[1]
|
clustergrammer2
|
clustergrammer2//clustergrammer_fun/export_data.pyfile:/clustergrammer_fun/export_data.py:function:export_net_json/export_net_json
|
def export_net_json(net, net_type, indent='no-indent'):
""" export json string of dat """
import json
from copy import deepcopy
if net_type == 'dat':
exp_dict = deepcopy(net.dat)
if type(exp_dict['mat']) is not list:
exp_dict['mat'] = exp_dict['mat'].tolist()
if 'mat_orig' in exp_dict:
exp_dict['mat_orig'] = exp_dict['mat_orig'].tolist()
elif net_type == 'viz':
exp_dict = net.viz
elif net_type == 'sim_row':
exp_dict = net.sim['row']
elif net_type == 'sim_col':
exp_dict = net.sim['col']
if indent == 'indent':
exp_json = json.dumps(exp_dict, indent=2)
else:
exp_json = json.dumps(exp_dict)
return exp_json
|
sregistry-0.2.35
|
sregistry-0.2.35//sregistry/auth/secrets.pyfile:/sregistry/auth/secrets.py:function:_default_client_secrets/_default_client_secrets
|
def _default_client_secrets():
"""return default client secrets, including singularity hub base
"""
client_secrets = {'hub': {'base': 'https://singularity-hub.org/api'}}
return client_secrets
|
pluggdapps-0.43dev
|
pluggdapps-0.43dev//pluggdapps/utils/lib.pyfile:/pluggdapps/utils/lib.py:function:asbool/asbool
|
def asbool(val, default=None):
"""Convert a string representation of boolean value to boolean type."""
try:
if val and isinstance(val, str):
v = True if val.lower() == 'true' else False
else:
v = bool(val)
except:
v = default
return v
|
matrix_array
|
matrix_array//array.pyclass:matrixArrayNum/sum
|
@staticmethod
def sum(*c, offset=0):
"""
matrixArrayNum sub over matrice
@param c: matrice
"""
return sum(c, offset) if len(c) > 1 else sum(c[0])
|
scoria-1.0.5
|
scoria-1.0.5//scoria/six.pyfile:/scoria/six.py:function:_add_doc/_add_doc
|
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
|
bayesian-optimization-1.1.0
|
bayesian-optimization-1.1.0//bayes_opt/util.pyclass:Colours/yellow
|
@classmethod
def yellow(cls, s):
"""Wrap text in yellow."""
return cls._wrap_colour(s, cls.YELLOW)
|
mxnet-1.6.0.data
|
mxnet-1.6.0.data//purelib/mxnet/rnn/io.pyfile:/purelib/mxnet/rnn/io.py:function:encode_sentences/encode_sentences
|
def encode_sentences(sentences, vocab=None, invalid_label=-1, invalid_key=
'\n', start_label=0, unknown_token=None):
"""Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
sentences : list of list of str
A list of sentences to encode. Each sentence
should be a list of string tokens.
vocab : None or dict of str -> int
Optional input Vocabulary
invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
invalid_key : str, default '\\n'
Key for invalid token. Use '\\n' for end
of sentence by default.
start_label : int
lowest index.
unknown_token: str
Symbol to represent unknown token.
If not specified, unknown token will be skipped.
Returns
-------
result : list of list of int
encoded sentences
vocab : dict of str -> int
result vocabulary
"""
idx = start_label
if vocab is None:
vocab = {invalid_key: invalid_label}
new_vocab = True
else:
new_vocab = False
res = []
for sent in sentences:
coded = []
for word in sent:
if word not in vocab:
assert new_vocab or unknown_token, 'Unknown token %s' % word
if idx == invalid_label:
idx += 1
if unknown_token:
word = unknown_token
vocab[word] = idx
idx += 1
coded.append(vocab[word])
res.append(coded)
return res, vocab
|
airsspy-0.1.2
|
airsspy-0.1.2//airsspy/seed.pyfile:/airsspy/seed.py:function:tuple2range/tuple2range
|
def tuple2range(value):
"""
Return the string for a given value. If the value is a tuple
make it a range.
"""
if isinstance(value, (list, tuple)):
return '{}-{}'.format(value[0], value[1])
return str(value)
|
fake-blender-api-2.79-0.3.1
|
fake-blender-api-2.79-0.3.1//bpy/ops/object.pyfile:/bpy/ops/object.py:function:drop_named_image/drop_named_image
|
def drop_named_image(filepath: str='', relative_path: bool=True, name: str=
'', view_align: bool=False, location: float=(0.0, 0.0, 0.0), rotation:
float=(0.0, 0.0, 0.0), layers: bool=(False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False)):
"""Add an empty image type to scene with data
:param filepath: Filepath, Path to image file
:type filepath: str
:param relative_path: Relative Path, Select the file relative to the blend file
:type relative_path: bool
:param name: Name, Image name to assign
:type name: str
:param view_align: Align to View, Align the new object to the view
:type view_align: bool
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
:param layers: Layer
:type layers: bool
"""
pass
|
homeassistant-0.109.6
|
homeassistant-0.109.6//homeassistant/components/google_assistant/trait.pyclass:ArmDisArmTrait/might_2fa
|
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
|
breezy-3.0.2
|
breezy-3.0.2//breezy/revision.pyfile:/breezy/revision.py:function:is_reserved_id/is_reserved_id
|
def is_reserved_id(revision_id):
"""Determine whether a revision id is reserved
:return: True if the revision is reserved, False otherwise
"""
return isinstance(revision_id, bytes) and revision_id.endswith(b':')
|
openstack-cyborg-3.0.0
|
openstack-cyborg-3.0.0//cyborg/objects/attach_handle.pyclass:AttachHandle/list
|
@classmethod
def list(cls, context, filters=None):
"""Return a list of AttachHandle objects."""
if filters:
sort_dir = filters.pop('sort_dir', 'desc')
sort_key = filters.pop('sort_key', 'created_at')
limit = filters.pop('limit', None)
marker = filters.pop('marker_obj', None)
db_ahs = cls.dbapi.attach_handle_get_by_filters(context, filters,
sort_dir=sort_dir, sort_key=sort_key, limit=limit, marker=marker)
else:
db_ahs = cls.dbapi.attach_handle_list(context)
obj_ah_list = cls._from_db_object_list(db_ahs, context)
return obj_ah_list
|
mata-0.1.4
|
mata-0.1.4//mata/launcher.pyfile:/mata/launcher.py:function:shouldInstallPygame/shouldInstallPygame
|
def shouldInstallPygame():
"""
Return whether Pygame needs to be installed
"""
try:
import pygame
return False
except ImportError:
return True
|
mizani-0.6.0
|
mizani-0.6.0//mizani/scale.pyclass:scale_discrete/apply
|
@classmethod
def apply(cls, x, palette, na_value=None):
"""
Scale data discretely
Parameters
----------
x : array_like
Discrete values to scale
palette : callable ``f(x)``
Palette to use
na_value : object
Value to use for missing values.
Returns
-------
out : array_like
Scaled values
"""
limits = cls.train(x)
return cls.map(x, palette, limits, na_value)
|
bda.resultduplexer-1.0-beta2
|
bda.resultduplexer-1.0-beta2//bda/resultduplexer/interfaces.pyclass:IBrainWrapper/getURL
|
def getURL():
"""Return the object URL.
"""
|
flywheel
|
flywheel//models/analysis_input.pyclass:AnalysisInput/positional_to_model
|
@staticmethod
def positional_to_model(value):
"""Converts a positional argument to a model value"""
return value
|
leo-6.2.1
|
leo-6.2.1//leo/core/leoGlobals.pyfile:/leo/core/leoGlobals.py:function:splitLines/splitLines
|
def splitLines(s):
"""Split s into lines, preserving the number of lines and
the endings of all lines, including the last line."""
if s:
return s.splitlines(True)
return []
|
homeassistant-0.109.6
|
homeassistant-0.109.6//homeassistant/components/mysensors/helpers.pyfile:/homeassistant/components/mysensors/helpers.py:function:invalid_msg/invalid_msg
|
def invalid_msg(gateway, child, value_type_name):
"""Return a message for an invalid child during schema validation."""
pres = gateway.const.Presentation
set_req = gateway.const.SetReq
return (
f'{pres(child.type).name} requires value_type {set_req[value_type_name].name}'
)
|
pydruid-0.5.9
|
pydruid-0.5.9//env/lib/python3.7/_dummy_thread.pyfile:/env/lib/python3.7/_dummy_thread.py:function:exit/exit
|
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
|
tensors-0.1
|
tensors-0.1//tensors/frontend/tensor.pyclass:Tensor/approximate
|
@classmethod
def approximate(Class, function, accuracy, *args, **kwargs):
"""
Approximate the function of the given tensor arguments.
:param function: the function to be approximated
:type function: a callable that accepts a number of one-dimensional
NumPy arrays of equal length and returns a one-dimensional NumPy
array of the same length
:param accuracy: an acceptable relative root mean squared error of the
approximation
:type accuracy: a non-negative number
:param args: the arguments to be passed to `function`
:type args: tensors of the same type and shape
:param kwargs: optional keyword arguments to be passed to `function`
:returns: an approximation of the function of the given tensor arguments
with the specified accuracy
:rtype: a tensor of the same type and shape as `args`
:raises: :class:`AssertionError` if `accuracy` is negative,
:class:`AssertionError` if `args` have different shapes, and
:class:`RuntimeError` if the approximation process fails to converge
.. seealso::
:func:`tensors.tools.tensorize`
"""
raise NotImplementedError
|
thriftr-0.0.1
|
thriftr-0.0.1//thriftr/parser.pyfile:/thriftr/parser.py:function:p_const_list/p_const_list
|
def p_const_list(p):
"""const_list : '[' const_value_seq ']' """
p[0] = p[2]
|
pandas-1.0.3
|
pandas-1.0.3//pandas/core/arrays/sparse/accessor.pyclass:SparseAccessor/from_coo
|
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : Series
A Series with sparse values.
Examples
--------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
shape=(3, 4))
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 2.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
0 2 1
3 2
1 0 3
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
"""
from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series
from pandas import Series
result = _coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
return result
|
PICOS-2.0.8
|
PICOS-2.0.8//picos/solvers/solver_cplex.pyclass:CPLEXSolver/default_penalty
|
@classmethod
def default_penalty(cls):
"""Implement :meth:`~.solver.Solver.default_penalty`."""
return 0.0
|
sylib
|
sylib//atfparser/parser.pyfile:/atfparser/parser.py:function:p_val_comp_num/p_val_comp_num
|
def p_val_comp_num(p):
"""comp_num : comp_num_type INTEGER COMMA INIOFFSET INTEGER COMMA BLOCKSIZE INTEGER COMMA VALPERBLOCK INTEGER COMMA VALOFFSETS ints"""
p[0] = {'TYPE': p[1], 'LENGTH': p[2], p[4]: p[5], p[7]: p[8], p[10]: p[
11], p[13]: p[14]}
|
ask_sdk_model
|
ask_sdk_model//interfaces/alexa/presentation/aplt/command.pyclass:Command/get_real_child_model
|
@classmethod
def get_real_child_model(cls, data):
"""Returns the real base class specified by the discriminator"""
discriminator_value = data[cls.json_discriminator_key]
return cls.discriminator_value_class_map.get(discriminator_value)
|
colorful-0.5.4
|
colorful-0.5.4//colorful/colors.pyfile:/colorful/colors.py:function:parse_rgb_txt_file/parse_rgb_txt_file
|
def parse_rgb_txt_file(path):
"""
Parse the given rgb.txt file into a Python dict.
See https://en.wikipedia.org/wiki/X11_color_names for more information
:param str path: the path to the X11 rgb.txt file
"""
color_dict = {}
with open(path, 'r') as rgb_txt:
for line in rgb_txt:
line = line.strip()
if not line or line.startswith('!'):
continue
parts = line.split()
color_dict[' '.join(parts[3:])] = int(parts[0]), int(parts[1]
), int(parts[2])
return color_dict
|
retryp-0.3.post9
|
retryp-0.3.post9//versioneer.pyfile:/versioneer.py:function:render_pep440_pre/render_pep440_pre
|
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '.post.dev%d' % pieces['distance']
else:
rendered = '0.post.dev%d' % pieces['distance']
return rendered
|
fake-bpy-module-2.79-20200428
|
fake-bpy-module-2.79-20200428//bpy/ops/sculpt.pyfile:/bpy/ops/sculpt.py:function:optimize/optimize
|
def optimize():
"""Recalculate the sculpt BVH to improve performance
"""
pass
|
pyaudioclassification-0.1.9
|
pyaudioclassification-0.1.9//pyaudioclassification/models.pyfile:/pyaudioclassification/models.py:function:svm/svm
|
def svm(num_classes):
"""Support vector machine.
-*- ref: mtobeiyf https://github.com/mtobeiyf/audio-classification -*-
"""
from sklearn.svm import SVC
return SVC(C=20.0, gamma=1e-05)
|
apstools
|
apstools//synApps/calcout.pyfile:/synApps/calcout.py:function:setup_incrementer_calcout/setup_incrementer_calcout
|
def setup_incrementer_calcout(calcout, scan=None, limit=100000):
"""
setup calcout record as an incrementer
PARAMETERS
calcout : object
instance of :class:`CalcoutRecord`
scan : text or int or None
any of the EPICS record `.SCAN` values,
or the index number of the value,
set to default if `None`,
default: `.1 second`
limit : int or None
set the incrementer back to zero
when this number is reached (or passed),
default: 100000
"""
scan = scan or '.1 second'
calcout.reset()
calcout.scanning_rate.put('Passive')
calcout.description.put('incrementer')
pvname = calcout.calculated_value.pvname.split('.')[0]
calcout.channels.A.input_pv.put(pvname)
calcout.channels.B.input_value.put(limit)
calcout.calculation.put('(A+1) % B')
calcout.scanning_rate.put(scan)
calcout.hints = {'fields': ['input_value']}
calcout.read_attrs = ['input_value']
|
zope
|
zope//error/interfaces.pyclass:ILocalErrorReportingUtility/setProperties
|
def setProperties(keep_entries, copy_to_zlog=1, ignored_exceptions=(),
RESPONSE=None):
"""Sets the properties
keep_entries, copy_to_logfile, ignored_exceptions
:keyword tuple ignored_exceptions: A sequence of *str* unqualified
class names (such as ``'Unauthorized'``) that will be ignored.
The values here will be compared with the ``__name__`` of the first
member of the ``info`` passed to :meth:`raising`.
"""
|
rasterstats-0.14.0
|
rasterstats-0.14.0//src/rasterstats/io.pyfile:/src/rasterstats/io.py:function:wrap_geom/wrap_geom
|
def wrap_geom(geom):
""" Wraps a geometry dict in an GeoJSON Feature
"""
return {'type': 'Feature', 'properties': {}, 'geometry': geom}
|
poros-0.0.40
|
poros-0.0.40//poros/poros_chars/tokenization.pyfile:/poros/poros_chars/tokenization.py:function:convert_tokens_to_ids/convert_tokens_to_ids
|
def convert_tokens_to_ids(vocab, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(vocab[token])
return ids
|
pyams_catalog
|
pyams_catalog//query.pyfile:/query.py:function:and_/and_
|
def and_(source, added):
"""Combine two queries with 'and'"""
if source is None:
source = added
else:
source &= added
return source
|
opsdroid_get_image_size-0.2.2
|
opsdroid_get_image_size-0.2.2//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old
|
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
declxml-1.1.3
|
declxml-1.1.3//declxml.pyfile:/declxml.py:function:_xml_namespace_strip/_xml_namespace_strip
|
def _xml_namespace_strip(root):
"""Strip the XML namespace prefix from all element tags under the given root Element."""
if '}' not in root.tag:
return
for element in root.iter():
if '}' in element.tag:
element.tag = element.tag.split('}')[1]
else:
pass
|
vcfstats
|
vcfstats//formula.pyfile:/formula.py:function:safe_split/safe_split
|
def safe_split(string, delimter, trim=True):
"""
Split a string using a single-character delimter
@params:
`string`: the string
`delimter`: the single-character delimter
`trim`: whether to trim each part. Default: True
@examples:
```python
ret = split("'a,b',c", ",")
# ret == ["'a,b'", "c"]
# ',' inside quotes will be recognized.
```
@returns:
The list of substrings
"""
ret = []
special1 = ['(', ')', '[', ']', '{', '}']
special2 = ["'", '"', '`']
special3 = '\\'
flags1 = [0, 0, 0]
flags2 = [False, False, False]
flags3 = False
start = 0
for i, char in enumerate(string):
if char == special3:
flags3 = not flags3
elif not flags3:
if char in special1:
index = special1.index(char)
if index % 2 == 0:
flags1[int(index / 2)] += 1
else:
flags1[int(index / 2)] -= 1
elif char in special2:
index = special2.index(char)
flags2[index] = not flags2[index]
elif char == delimter and not any(flags1) and not any(flags2):
rest = string[start:i]
if trim:
rest = rest.strip()
ret.append(rest)
start = i + 1
else:
flags3 = False
rest = string[start:]
if trim:
rest = rest.strip()
ret.append(rest)
return ret
|
pyknotid-0.5.3
|
pyknotid-0.5.3//pyknotid/invariants.pyfile:/pyknotid/invariants.py:function:_write_mathematica_script/_write_mathematica_script
|
def _write_mathematica_script(filen, text):
"""
Write the given text (mathematica code) to the given filename. It will
be wrapped in some MathKernel calling stuff first.
"""
with open(filen, 'w') as fileh:
fileh.write(
"""MathKernel -noprompt -run "commandLine={${1+"$1"}}; $(sed '1,/^exit/d' $0) ; Exit[]"
exit $?
"""
)
fileh.write(text)
fileh.close()
|
sqlint
|
sqlint//parser/keywords.pyfile:/parser/keywords.py:function:format/format
|
def format(keyword: str, keyword_style: str) ->str:
"""Returns formatted keyword
Args:
keyword: target keyword
keyword_style: formatting style
Returns:
formatted keyword
"""
expected: str = keyword
if keyword_style == 'lower':
expected = keyword.lower()
if keyword_style == 'upper-all':
expected = keyword.upper()
if keyword_style == 'upper-head':
expected = f'{keyword[0].upper()}{keyword[1:].lower()}'
return expected
|
hdx
|
hdx//utilities/dictandlist.pyfile:/utilities/dictandlist.py:function:dict_of_sets_add/dict_of_sets_add
|
def dict_of_sets_add(dictionary, key, value):
"""Add value to a set in a dictionary by key
Args:
dictionary (DictUpperBound): Dictionary to which to add values
key (Any): Key within dictionary
value (Any): Value to add to set in dictionary
Returns:
None
"""
set_objs = dictionary.get(key, set())
set_objs.add(value)
dictionary[key] = set_objs
|
mne
|
mne//viz/utils.pyfile:/viz/utils.py:function:_setup_cmap/_setup_cmap
|
def _setup_cmap(cmap, n_axes=1, norm=False):
"""Set color map interactivity."""
if cmap == 'interactive':
cmap = 'Reds' if norm else 'RdBu_r', True
elif not isinstance(cmap, tuple):
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
cmap = cmap, False if n_axes > 2 else True
return cmap
|
flywheel
|
flywheel//models/job_config_output.pyclass:JobConfigOutput/positional_to_model
|
@staticmethod
def positional_to_model(value):
"""Converts a positional argument to a model value"""
return value
|
asyncssh
|
asyncssh//sk_ecdsa.pyclass:_SKECDSAKey/decode_ssh_private
|
@classmethod
def decode_ssh_private(cls, packet):
"""Decode an SSH format SK ECDSA private key"""
curve_id = packet.get_string()
public_value = packet.get_string()
application = packet.get_string()
flags = packet.get_byte()
key_handle = packet.get_string()
reserved = packet.get_string()
return curve_id, public_value, application, flags, key_handle, reserved
|
pydens
|
pydens//batchflow/batchflow/research/utils.pyfile:/batchflow/batchflow/research/utils.py:function:get_metrics/get_metrics
|
def get_metrics(iteration, experiment, pipeline, metrics_var, metrics_name):
""" Function to evaluate metrics """
_ = iteration
pipeline = experiment[pipeline].pipeline
metrics = pipeline.get_variable(metrics_var)
return metrics.evaluate(metrics_name)
|
superset-0.30.1
|
superset-0.30.1//superset/db_engine_specs/presto.pyclass:PrestoEngineSpec/_is_column_name_quoted
|
@classmethod
def _is_column_name_quoted(cls, column_name: str) ->bool:
"""
Check if column name is in quotes
:param column_name: column name
:return: boolean
"""
return column_name.startswith('"') and column_name.endswith('"')
|
cone.app-0.9.5
|
cone.app-0.9.5//src/cone/app/browser/ajax.pyfile:/src/cone/app/browser/ajax.py:function:dummy_livesearch_callback/dummy_livesearch_callback
|
def dummy_livesearch_callback(model, request):
"""Dummy callback for Livesearch. Set as default.
We receive the search term at ``request.params['term']``.
Livesearch expects a list of dicts with keys:
``label`` - Label of found item
``value`` - The value re-inserted in input. This is normally ``term``
``target`` - The target URL for rendering the content tile.
"""
term = request.params['term']
return [{'label': 'Root', 'value': term, 'target': request.application_url}
]
|
LbNightlyTools
|
LbNightlyTools//Scripts/GitlabMR.pyfile:/Scripts/GitlabMR.py:function:get_hook_trigger/get_hook_trigger
|
def get_hook_trigger(content):
"""
Return the ids of the note and discussion where the hook was triggered.
@param content: Contents of gitlab hook
"""
return dict(project_id=content['project']['id'], merge_request_iid=
content['merge_request']['iid'], discussion_id=content[
'object_attributes']['discussion_id'], note_id=content[
'object_attributes']['id'])
|
twitter_fire_scraper-2.2.0
|
twitter_fire_scraper-2.2.0//twitter_fire_scraper/util.pyfile:/twitter_fire_scraper/util.py:function:get_status_text/get_status_text
|
def get_status_text(status):
"""Given a Status, return its text.
This method favors longer text."""
if hasattr(status, 'full_text'):
return status.full_text
if hasattr(status, 'text'):
return status.text
raise Exception('Status {} has no text?'.format(status))
|
black-widow-1.7.2
|
black-widow-1.7.2//src/black_widow/app/managers/sniffer/pcap_sniffer.pyclass:PcapSniffer/_merge_addr
|
@staticmethod
def _merge_addr(host1: dict, host2: dict):
"""
Merge host1 and host2 by preferring host2
:param host1: {
'mac': <mac_addr>,
'mac_manufacturer': tuple,
'ip': <ip_addr>,
'ip_host': list
}
:param host2: //
:return: The host1 merged with host2
"""
if host1 is None:
return host2
if host2 is None:
return host1
host = host2.copy()
for key, val in host2.items():
if val is not None:
continue
host[key] = host1.get(key)
ip = host.get('ip')
ip_host = host.get('ip_host')
mac = host.get('mac')
mac_manufacturer = host.get('mac_manufacturer')
if ip is not None:
host['label'] = ip
host['title'] = ip_host
elif mac_manufacturer is None:
host['label'] = mac
else:
host['label'] = mac_manufacturer
host['title'] = mac
return host
|
pylinkirc-3.0.0
|
pylinkirc-3.0.0//coremods/handlers.pyfile:/coremods/handlers.py:function:handle_mode/handle_mode
|
def handle_mode(irc, source, command, args):
"""Protect against forced deoper attempts."""
target = args['target']
modes = args['modes']
if irc.is_internal_client(target) and not irc.is_internal_client(source):
if ('-o', None) in modes and (target == irc.pseudoclient.uid or not
irc.is_manipulatable_client(target)):
irc.mode(irc.sid, target, {('+o', None)})
|
audio.coders-4.0.2
|
audio.coders-4.0.2//.lib/pkg_resources.pyclass:IMetadataProvider/run_script
|
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
|
wt-superset-0.34.0
|
wt-superset-0.34.0//superset/db_engine_specs/presto.pyclass:PrestoEngineSpec/_expand_row_data
|
@classmethod
def _expand_row_data(cls, datum: dict, column: str, column_hierarchy: dict
) ->None:
"""
Separate out nested fields and its value in a row of data
:param datum: row of data
:param column: row column name
:param column_hierarchy: dictionary tracking structural columns and its
nested fields
"""
if column in datum:
row_data = datum[column]
row_children = column_hierarchy[column]['children']
if row_data and len(row_data) != len(row_children):
raise Exception(
'The number of data values and number of nestedfields are not equal'
)
elif row_data:
for index, data_value in enumerate(row_data):
datum[row_children[index]] = data_value
else:
for row_child in row_children:
datum[row_child] = ''
|
dryer
|
dryer//_version.pyfile:/_version.py:function:render_git_describe/render_git_describe
|
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
spawn-0.3.0
|
spawn-0.3.0//spawn/util/string.pyfile:/spawn/util/string.py:function:quote/quote
|
def quote(strpath):
"""Wrap the given string in quotes
:param strpath: A string representing a path
:type strpath: str
"""
if strpath[0] != '"' or strpath[-1] != '"':
return '"' + strpath + '"'
return strpath
|
jdcloud_cli-1.2.5
|
jdcloud_cli-1.2.5//jdcloud_cli/cement/core/arg.pyclass:IArgument/_setup
|
def _setup(app_obj):
"""
The _setup function is called during application initialization and
must 'setup' the handler object making it ready for the framework
or the application to make further calls to it.
:param app_obj: The application object
:returns: ``None``
"""
|
wagtailimporter-2.0.0
|
wagtailimporter-2.0.0//wagtailimporter/serializer.pyfile:/wagtailimporter/serializer.py:function:normalise/normalise
|
def normalise(url):
"""Normalize URL paths by appending a trailing slash."""
url = str(url)
if not url.endswith('/'):
url += '/'
return url
|
deepsleep
|
deepsleep//models/base_model.pyclass:BaseModel/_get_total_mapping
|
@staticmethod
def _get_total_mapping():
"""Inverse operation of :meth:`._map_preds`
This is called after reading back predictions from disk to create
confusion matrices. This must be overridden if :meth:`_map_preds`
is also overridden.
:return: A dictionary mapping each prediction in .csv to an integer
:rtype: dict
.. seealso:: :class:`deepsleep.models.SpindleModel`
"""
return {}
|
pydm-1.10.1
|
pydm-1.10.1//versioneer.pyfile:/versioneer.py:function:plus_or_dot/plus_or_dot
|
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if '+' in pieces.get('closest-tag', ''):
return '.'
return '+'
|
bpy
|
bpy//ops/gpencil.pyfile:/ops/gpencil.py:function:vertex_group_invert/vertex_group_invert
|
def vertex_group_invert():
"""Invert weights to the active vertex group
"""
pass
|
cli_code-28.1.0
|
cli_code-28.1.0//cli_code/util.pyfile:/cli_code/util.py:function:find/find
|
def find(item, vec):
"""return the index of the first occurence of item in vec"""
for i in range(len(vec)):
if item == vec[i]:
return i
return -1
|
CGATReport-0.9.1
|
CGATReport-0.9.1//CGATReport/DataTree.pyfile:/CGATReport/DataTree.py:function:removeLeaf/removeLeaf
|
def removeLeaf(work, path):
"""remove leaf/branch at *path*.
raises KeyError if path is not found.
"""
if len(path) == 0:
work.clear()
else:
for x in path[:-1]:
work = work[x]
del work[path[-1]]
return work
|
superset-dywx-0.26.3
|
superset-dywx-0.26.3//superset/db_engine_specs.pyclass:HiveEngineSpec/modify_url_for_impersonation
|
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
"""
pass
|
webscaff
|
webscaff//commands/sys/usr.pyfile:/commands/sys/usr.py:function:add_to_group/add_to_group
|
def add_to_group(ctx, user, group):
"""Adds a user into a group.
Returns ``True`` if is just added (not already was there).
:rtype: bool
"""
result = ctx.sudo('adduser %s %s' % (user, group)).stdout.strip()
return 'Adding' in result
|
py42-1.1.1
|
py42-1.1.1//src/py42/util.pyfile:/src/py42/util.py:function:convert_datetime_to_timestamp_str/convert_datetime_to_timestamp_str
|
def convert_datetime_to_timestamp_str(date):
"""Converts the given datetime to a formatted date str. The format matches strftime
directives %Y-%m-%dT%H:%M:%S.%f.
Args:
date (datetime): The datetime object to convert.
Returns:
(str): A str representing the given date. Example output looks like
'2020-03-25T15:29:04.465Z'.
"""
prefix = date.strftime(u'%Y-%m-%dT%H:%M:%S.%f')[:-3]
return u'{0}Z'.format(prefix)
|
aiowamp
|
aiowamp//serializers/json.pyfile:/serializers/json.py:function:is_encoded_bytes/is_encoded_bytes
|
def is_encoded_bytes(s: str) ->bool:
"""Check if the given string contains encoded binary data.
Args:
s: String to check.
Returns:
Whether the given string holds encoded binary data.
"""
return s.startswith('\x00')
|
funtool-0.0.87
|
funtool-0.0.87//funtool/state_collection.pyfile:/funtool/state_collection.py:function:groups_in_grouping/groups_in_grouping
|
def groups_in_grouping(state_collection, grouping_name):
"""
Returns an iterable which goes over all the groups in a grouping
Use instead of direct access to future proof in case of changes in StateCollection
"""
return state_collection.groupings.get(grouping_name, {}).values()
|
transx2gtfs-0.4.1
|
transx2gtfs-0.4.1//transx2gtfs/routes.pyfile:/transx2gtfs/routes.py:function:get_mode/get_mode
|
def get_mode(mode):
"""Parse mode from TransXChange value"""
if mode in ['tram', 'trolleyBus']:
return 0
elif mode in ['underground', 'metro']:
return 1
elif mode == 'rail':
return 2
elif mode in ['bus', 'coach']:
return 3
elif mode == 'ferry':
return 4
|
wtfix
|
wtfix//protocol/spec.pyclass:AttributeValueMappingsMixin/get_value
|
@classmethod
def get_value(cls, name):
"""
Given a type name, retrieve the corresponding value from the FIX specification.
:param name: a type name
:return: the value associated with the type name.
"""
return cls.__dict__[name]
|
gerrit_mq-0.3.0
|
gerrit_mq-0.3.0//gerrit_mq/webfront.pyfile:/gerrit_mq/webfront.py:function:extract_common_args/extract_common_args
|
def extract_common_args(req_args):
"""
Pull common query parameter arguments out of a query string
"""
project_filter = req_args.get('project', None)
branch_filter = req_args.get('branch', None)
try:
offset = max(int(req_args['offset']), 0)
except (KeyError, ValueError):
offset = 0
try:
limit = min(int(req_args['limit']), 500)
except (KeyError, ValueError):
limit = 25
return project_filter, branch_filter, offset, limit
|
sfctl
|
sfctl//custom_cluster_upgrade.pyfile:/custom_cluster_upgrade.py:function:create_upgrade_health_policy/create_upgrade_health_policy
|
def create_upgrade_health_policy(delta_unhealthy_nodes,
ud_delta_unhealthy_nodes):
"""Create an upgrade node health policy"""
from azure.servicefabric.models import ClusterUpgradeHealthPolicyObject
if not any([delta_unhealthy_nodes, ud_delta_unhealthy_nodes]):
return None
return ClusterUpgradeHealthPolicyObject(max_percent_delta_unhealthy_nodes
=delta_unhealthy_nodes,
max_percent_upgrade_domain_delta_unhealthy_nodes=
ud_delta_unhealthy_nodes)
|
pydss-0.1a
|
pydss-0.1a//pydss/shared/MathUtil.pyclass:MathUtil/ETKInvert
|
@classmethod
def ETKInvert(cls, A, norder, error):
"""Matrix= reference to matrix of doulbes
Norder = order of matrix (assumed square)
Error = 0 if no error;
= 1 if not enough heap to alloc temp array
= 2 if matrix can't be inverted
This routine will invert a non-symmetric matrix. Index is assumed to
follow the FORTRAN standard, not the Pascal standard. That is the data
are ordered by first subscript first, then second subscript. This routine
computes its own indexing, leaving nothing to the whims of a cantankerous compiler.
It assumes that the matrix is dimensioned to exactly the number of elements
needed. Apologies to Fortran users who are accustomed to over dimensioning
stuff.
"""
cls.L = norder
error.setValue(0)
LT = [None] * cls.L
if len(LT) == 0:
error.setValue(1)
return
_0 = True
j = 0
while True:
if _0 is True:
_0 = False
else:
j += 1
if not j < cls.L:
break
LT[j] = 0
T1 = 0.0
k = 1
_1 = True
M = 0
while True:
if _1 is True:
_1 = False
else:
M += 1
if not M < cls.L:
break
_2 = True
LL = 0
while True:
if _2 is True:
_2 = False
else:
LL += 1
if not LL < cls.L:
break
if LT[LL] != 1:
RMY = cls.Math.abs(A[cls.index(LL, LL)]) - cls.Math.abs(T1)
if RMY > 0.0:
T1 = A[cls.index(LL, LL)]
k = LL
RMY = cls.Math.abs(T1)
if RMY == 0.0:
error.setValue(2)
return
T1 = 0.0
LT[k] = 1
_3 = True
i = 0
while True:
if _3 is True:
_3 = False
else:
i += 1
if not i < cls.L:
break
if i != k:
_4 = True
j = 0
while True:
if _4 is True:
_4 = False
else:
j += 1
if not j < cls.L:
break
if j != k:
A[cls.index(i, j)] = A[cls.index(i, j)] - A[cls.
index(i, k)] * A[cls.index(k, j)] / A[cls.index
(k, k)]
A[cls.index(k, k)] = -1.0 / A[cls.index(k, k)]
_5 = True
i = 0
while True:
if _5 is True:
_5 = False
else:
i += 1
if not i < cls.L:
break
if i != k:
A[cls.index(i, k)] = A[cls.index(i, k)] * A[cls.index(k, k)]
A[cls.index(k, i)] = A[cls.index(k, i)] * A[cls.index(k, k)]
_6 = True
j = 0
while True:
if _6 is True:
_6 = False
else:
j += 1
if not j < cls.L:
break
_7 = True
k = 0
while True:
if _7 is True:
_7 = False
else:
k += 1
if not k < cls.L:
break
A[cls.index(j, k)] = -A[cls.index(j, k)]
LT = None
|
pep8-1.7.1
|
pep8-1.7.1//pep8.pyfile:/pep8.py:function:trailing_blank_lines/trailing_blank_lines
|
def trailing_blank_lines(physical_line, lines, line_number, total_lines):
"""Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\\n
However the last line should end with a new line (warning W292).
"""
if line_number == total_lines:
stripped_last_line = physical_line.rstrip()
if not stripped_last_line:
return 0, 'W391 blank line at end of file'
if stripped_last_line == physical_line:
return len(physical_line), 'W292 no newline at end of file'
|
geotecha
|
geotecha//mathematics/fourier.pyfile:/mathematics/fourier.py:function:func_mirror_for_odd_weight/func_mirror_for_odd_weight
|
def func_mirror_for_odd_weight(x, *myargs):
"""Rotate function by 180 degrees (or mirror about x and y-axis in turn).
Given a composite function f(x) * w(x) where w(x) is an odd weighting
function, return g(x) such that g(x)*w(x) gives same value as f(-x)*w(-x).
This can be useful in transforming a fourier sine integral
with negative integration limits to one with positive limits.
Parameters
----------
x : float
Value to evaluate function at.
func : function/callable
Function to mirror. Always the first argument after `x`.
myargs : optional
Any remaining arguments will be passed to `func`.
Returns
-------
out : ndarray
Value of -func(-x, *myargs)
See Also
--------
func_mirror_for_even_weight : mirror for an even wieght function
Examples
--------
>>> def f(x, a):
... return a*x+1
>>> func_mirror_for_odd_weight(5, f, 2)
9
>>> def ff(x, a):
... return a*x + 1.j
>>> func_mirror_for_odd_weight(3, real_func, ff, 4)
12.0
"""
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
|
nutsml
|
nutsml//imageutil.pyfile:/imageutil.py:function:rerange/rerange
|
def rerange(image, old_min, old_max, new_min, new_max, dtype):
"""
Return image with values in new range.
Note: The default range of images is [0, 255] and most image
processing functions expect this range and will fail otherwise.
However, as input to neural networks re-ranged images, e.g [-1, +1]
are sometimes needed.
>>> import numpy as np
>>> image = np.array([[0, 255], [255, 0]])
>>> rerange(image, 0, 255, -1, +1, 'float32')
array([[-1., 1.],
[ 1., -1.]], dtype=float32)
:param numpy.array image: Should be a numpy array of an image.
:param int|float old_min: Current minimum value of image, e.g. 0
:param int|float old_max: Current maximum value of image, e.g. 255
:param int|float new_min: New minimum, e.g. -1.0
:param int|float new_max: New maximum, e.g. +1.0
:param numpy datatype dtype: Data type of output image,
e.g. float32' or np.uint8
:return: Image with values in new range.
"""
image = image.astype('float32')
old_range, new_range = old_max - old_min, new_max - new_min
image = (image - old_min) / old_range * new_range + new_min
return image.astype(dtype)
|
Pytzer-0.4.3
|
Pytzer-0.4.3//pytzer/parameters.pyfile:/pytzer/parameters.py:function:psi_H_Li_Br_PK74/psi_H_Li_Br_PK74
|
def psi_H_Li_Br_PK74(T, P):
"""c-c'-a: hydrogen lithium bromide [PK74]."""
psi = 0.0
valid = T == 298.15
return psi, valid
|
RunToolkit
|
RunToolkit//for_file/for_excel.pyfile:/for_file/for_excel.py:function:num2title/num2title
|
def num2title(n: 'int > 0') ->str:
"""
LeetCode 168: Excel Sheet Column Title
Given a positive integer, return its corresponding column title as appear in an Excel sheet.
"""
res = ''
while n > 26:
n, yu = divmod(n - 1, 26)
res = chr(65 + yu) + res
res = chr(64 + n) + res
return res
|
rich_base_provider-1.0.1
|
rich_base_provider-1.0.1//rich_base_provider/sysadmin/recharge/recharge_rule/models.pyclass:RechargeRule/get_recharge_rule_list_by_kwargs
|
@classmethod
def get_recharge_rule_list_by_kwargs(cls, **kwargs):
"""
根据kwargs字典获取充值方案列表
:param kwargs:
:return:
"""
org_id = kwargs.get('org_id')
rule_apply_type = kwargs.get('rule_apply_type')
if kwargs.get('rule_name'):
return cls.objects(org_id=org_id, rule_apply_type=rule_apply_type,
rule_name=kwargs.get('rule_name'), status__nin=[cls.get_dict_id
('recharge_status', '删除')]).all()
if kwargs.get('recharge_condition') and kwargs.get('giving_count'):
return cls.objects(org_id=org_id, rule_apply_type=rule_apply_type,
recharge_condition=kwargs.get('recharge_condition'),
giving_count=kwargs.get('giving_count'), status__nin=[cls.
get_dict_id('recharge_status', '删除')]).all()
|
messages-0.5.0
|
messages-0.5.0//messages/_config.pyfile:/messages/_config.py:function:retrieve_pwd_from_config/retrieve_pwd_from_config
|
def retrieve_pwd_from_config(msg, cfg):
"""
Retrieve auth from profile configuration and set in msg.auth attr.
Args:
:msg: (Message class) an instance of a message class.
:cfg: (jsonconfig.Config) config instance.
"""
msg_type = msg.__class__.__name__.lower()
key_fmt = msg.profile + '_' + msg_type
pwd = cfg.pwd[key_fmt].split(' :: ')
if len(pwd) == 1:
msg.auth = pwd[0]
else:
msg.auth = tuple(pwd)
|
dropbox-10.1.2
|
dropbox-10.1.2//dropbox/team.pyclass:ResendSecondaryEmailResult/success
|
@classmethod
def success(cls, val):
"""
Create an instance of this class set to the ``success`` tag with value
``val``.
:param str val:
:rtype: ResendSecondaryEmailResult
"""
return cls('success', val)
|
CNFgen-0.8.4.1
|
CNFgen-0.8.4.1//cnfformula/cnf.pyclass:CNF/greater_than_constraint
|
@classmethod
def greater_than_constraint(cls, variables, lowerbound):
"""Clauses encoding a "strictly greater than" constraint
E.g. X1 + X2 + X3 + X4 > 2
(X1 v X2 v X3)
(X1 v X2 v X4)
(X1 v X3 v X4)
(X2 v X3 v X4)
Parameters
----------
variables : list of variables
variables in the constraint
lowerbound: int
lower bound of the constraint
Returns
-------
a list of clauses
Examples
--------
>>> list(CNF.greater_than_constraint(['a','b','c'],2))
[[(True, 'a')], [(True, 'b')], [(True, 'c')]]
>>> list(CNF.greater_than_constraint(['a'],0))
[[(True, 'a')]]
>>> list(CNF.greater_than_constraint(['a','b','c'],-1))
[]
>>> list(CNF.greater_than_constraint(['a','b','c'],3))
[[]]
"""
return cls._inequality_constraint_builder(variables, lowerbound,
greater=True)
|
wpt-superset-1.0.1
|
wpt-superset-1.0.1//superset/dataframe.pyfile:/superset/dataframe.py:function:dedup/dedup
|
def dedup(l, suffix='__', case_sensitive=True):
"""De-duplicates a list of string by suffixing a counter
Always returns the same number of entries as provided, and always returns
unique values. Case sensitive comparison by default.
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'])))
foo,bar,bar__1,bar__2,Bar
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False)))
foo,bar,bar__1,bar__2,Bar__3
"""
new_l = []
seen = {}
for s in l:
s_fixed_case = s if case_sensitive else s.lower()
if s_fixed_case in seen:
seen[s_fixed_case] += 1
s += suffix + str(seen[s_fixed_case])
else:
seen[s_fixed_case] = 0
new_l.append(s)
return new_l
|
elist-0.4.64
|
elist-0.4.64//elist/elist.pyfile:/elist/elist.py:function:find_all/find_all
|
def find_all(ol, test_func, *args):
"""
from elist.elist import *
from elist.jprint import pobj
def test_func(ele,x):
cond = (ele > x)
return(cond)
ol = [1,2,3,4,5,6,7]
rslt = find_all(ol,test_func,3)
pobj(rslt)
"""
rslt = []
length = ol.__len__()
for i in range(0, length):
cond = test_func(ol[i], *args)
if cond:
rslt.append({'index': i, 'value': ol[i]})
else:
pass
return rslt
|
gtrain-0.4.0
|
gtrain-0.4.0//gtrain/utils.pyfile:/gtrain/utils.py:function:join_weights_and_biases/join_weights_and_biases
|
def join_weights_and_biases(weights, biases):
"""
joins two arrays into one
:param weights: list of numpy arrays.
:param biases: list of numpy arrays with same length as weights.
:return: list of list with two numpy arrays for weights and biases, respectively.
- the first index is defines layer and the second weight (0) or bias (1)
"""
out = list()
for i, _ in enumerate(weights):
out.append([weights[i], biases[i]])
return out
|
memcnn-1.3.2
|
memcnn-1.3.2//memcnn/utils/stats.pyfile:/memcnn/utils/stats.py:function:accuracy/accuracy
|
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
Bcfg2-1.3.4
|
Bcfg2-1.3.4//src/lib/Bcfg2/Server/Lint/RequiredAttrs.pyfile:/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py:function:is_filename/is_filename
|
def is_filename(val):
""" Return True if val is a string describing a valid full path
"""
return val.startswith('/') and len(val) > 1
|
pyipv8-2.1.0
|
pyipv8-2.1.0//ipv8/messaging/deprecated/encoding.pyfile:/ipv8/messaging/deprecated/encoding.py:function:_a_decode_long/_a_decode_long
|
def _a_decode_long(stream, offset, count, _):
"""
'a2J42',3,2 --> 5,42
"""
return offset + count, int(stream[offset:offset + count])
|
kin-base-1.4.0
|
kin-base-1.4.0//kin_base/stellarxdr/xdrgen.pyfile:/kin_base/stellarxdr/xdrgen.py:function:p_type_specifier_list/p_type_specifier_list
|
def p_type_specifier_list(t):
"""type_specifier_list : COMMA type_specifier type_specifier_list
| empty"""
|
alnitak-0.2.2
|
alnitak-0.2.2//alnitak/dane.pyfile:/alnitak/dane.py:function:relative_to/relative_to
|
def relative_to(path, target):
"""Return the relative path of the input to the target path.
For example:
path = /a/b/d/c/Y
target = /a/b/c/X
Then this function will return:
'../../c/X'
Args:
path (pathlib.Path):
target (pathlib.Path):
Returns:
str: the path of 'path' relative to 'target'.
"""
for n, p in enumerate(path.parents):
try:
q = target.relative_to(p)
except ValueError:
continue
return '../' * n + str(q)
return str(target)
|
snekchek
|
snekchek//config_gen.pyfile:/config_gen.py:function:ask_str/ask_str
|
def ask_str(question, default=None):
"""Asks for a simple string"""
default_q = ' [default: {0}]: '.format(default
) if default is not None else ''
answer = input('{0} [{1}]: '.format(question, default_q))
if answer == '':
return default
return answer
|
sqlalchemy
|
sqlalchemy//ext/automap.pyfile:/ext/automap.py:function:classname_for_table/classname_for_table
|
def classname_for_table(base, tablename, table):
"""Return the class name that should be used, given the name
of a table.
The default implementation is::
return str(tablename)
Alternate implementations can be specified using the
:paramref:`.AutomapBase.prepare.classname_for_table`
parameter.
:param base: the :class:`.AutomapBase` class doing the prepare.
:param tablename: string name of the :class:`_schema.Table`.
:param table: the :class:`_schema.Table` object itself.
:return: a string class name.
.. note::
In Python 2, the string used for the class name **must** be a
non-Unicode object, e.g. a ``str()`` object. The ``.name`` attribute
of :class:`_schema.Table` is typically a Python unicode subclass,
so the
``str()`` function should be applied to this name, after accounting for
any non-ASCII characters.
"""
return str(tablename)
|
tkquick
|
tkquick//gui/tools.pyfile:/gui/tools.py:function:unique_int/unique_int
|
def unique_int(values):
"""
if a list looks like 3,6
if repeatedly called will return 1,2,4,5,7,8
"""
last = 0
for num in values:
if last not in values:
break
else:
last += 1
return last
|
distanceutils-0.5.1
|
distanceutils-0.5.1//distance/base.pyclass:default_fragments/get_sections
|
@staticmethod
def get_sections(target):
"""Get default fragment sections of the given class.
Parameters
----------
target : BaseObject class
The class of which to get the default fragment sections.
Returns
-------
sections : list of Section
The default fragment sections of `target`.
Raises
------
AttributeError
If default fragments for `target` were never specified.
"""
return target.__sections
|
pywinauto-0.6.8
|
pywinauto-0.6.8//pywinauto/actionlogger.pyclass:_CustomLogger/reset_level
|
@staticmethod
def reset_level():
"""Reset a logging level to a default"""
pass
|
iapws-1.4.1
|
iapws-1.4.1//iapws/iapws97.pyfile:/iapws/iapws97.py:function:_Backward2a_P_hs/_Backward2a_P_hs
|
def _Backward2a_P_hs(h, s):
"""Backward equation for region 2a, P=f(h,s)
Parameters
----------
h : float
Specific enthalpy, [kJ/kg]
s : float
Specific entropy, [kJ/kgK]
Returns
-------
P : float
Pressure, [MPa]
References
----------
IAPWS, Revised Supplementary Release on Backward Equations for Pressure
as a Function of Enthalpy and Entropy p(h,s) for Regions 1 and 2 of the
IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of
Water and Steam, http://www.iapws.org/relguide/Supp-PHS12-2014.pdf, Eq 3
Examples
--------
>>> _Backward2a_P_hs(2800,6.5)
1.371012767
>>> _Backward2a_P_hs(2800,9.5)
0.001879743844
>>> _Backward2a_P_hs(4100,9.5)
0.1024788997
"""
I = [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3,
3, 3, 4, 5, 5, 6, 7]
J = [1, 3, 6, 16, 20, 22, 0, 1, 2, 3, 5, 6, 10, 16, 20, 22, 3, 16, 20,
0, 2, 3, 6, 16, 16, 3, 16, 3, 1]
n = [-0.0182575361923032, -0.125229548799536, 0.592290437320145,
6.04769706185122, 238.624965444474, -298.639090222922,
0.051225081304075, -0.437266515606486, 0.413336902999504, -
5.16468254574773, -5.57014838445711, 12.8555037824478,
11.414410895329, -119.504225652714, -2847.7798596156,
4317.57846408006, 1.1289404080265, 1974.09186206319,
1516.12444706087, 0.0141324451421235, 0.585501282219601, -
2.97258075863012, 5.94567314847319, -6236.56565798905,
9659.86235133332, 6.81500934948134, -6332.07286824489, -
5.5891922446576, 0.0400645798472063]
nu = h / 4200
sigma = s / 12
suma = 0
for i, j, ni in zip(I, J, n):
suma += ni * (nu - 0.5) ** i * (sigma - 1.2) ** j
return 4 * suma ** 4
|
stk-2020.5.5.0
|
stk-2020.5.5.0//src/stk/serialization/json/serializers/molecule/utilities.pyfile:/src/stk/serialization/json/serializers/molecule/utilities.py:function:atom_to_json/atom_to_json
|
def atom_to_json(atom):
"""
Return a JSON representation of `atom`.
Parameters
----------
atom : :class:`.Atom`
The atom to serialize.
Returns
-------
:class:`dict`
A JSON representation of `atom`.
"""
return atom.get_atomic_number(), atom.get_charge()
|
pywinauto
|
pywinauto//base_wrapper.pyclass:BaseWrapper/_create_wrapper
|
@staticmethod
def _create_wrapper(cls_spec, element_info, myself):
"""Create a wrapper object according to the specified element info"""
if cls_spec != myself:
obj = object.__new__(cls_spec)
obj.__init__(element_info)
return obj
new_class = cls_spec.find_wrapper(element_info)
obj = object.__new__(new_class)
obj.__init__(element_info)
return obj
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.