repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
concord232-0.15
|
concord232-0.15//concord232/concord_helpers.pyfile:/concord232/concord_helpers.py:function:total_secs/total_secs
|
def total_secs(td):
""" *td* is a timedelta object """
return td.days * 3600 * 24 + td.seconds + td.microseconds / 1000000.0
|
src
|
src//drel/drel_ast_yacc.pyfile:/drel/drel_ast_yacc.py:function:p_for_stmt/p_for_stmt
|
def p_for_stmt(p):
"""for_stmt : FOR id_list IN testlist_star_expr suite
| FOR "[" id_list "]" IN testlist_star_expr suite """
if len(p) == 6:
p[0] = ['FOR', p[2], p[4], p[5]]
else:
p[0] = ['FOR', p[3], p[6], p[7]]
|
zettels-0.6.0
|
zettels-0.6.0//zettels/zettelparser.pyclass:Zettelparser/_ignorify
|
@staticmethod
def _ignorify(patterns=['*~']):
"""
pathspec implements gitignore style pattern matching. However,
it doesn't ignore patterns, it *matches* patterns.
So every pattern needs to be reversed by adding a '!' (or removing)
it.
"""
patterns = list(patterns)
reversed_patterns = ['*']
for p in patterns:
if p.startswith('!'):
reversed_patterns.append(p.replace('!', '', 1))
else:
reversed_patterns.append('!' + p)
return reversed_patterns
|
helpers
|
helpers//helpers.pyfile:/helpers.py:function:view_as_windows/view_as_windows
|
def view_as_windows(padded, patch_size, stride=None):
"""
cut image in sliding windows
first patch starts at 0, ends at patch_size
second patch starts at stride, ends at stride + patch_size
:param padded: padded image, padded according to get_padded_patches
:param patch_size: size of output patches
:param stride: stride between patches
:return: patches
"""
if stride is None:
stride = patch_size
patches = []
pad = int(patch_size - stride)
n_cols = int((padded.shape[0] - pad) / stride)
n_rows = int((padded.shape[1] - pad) / stride)
for i in range(n_cols):
for j in range(n_rows):
patches.append(padded[i * stride:i * stride + patch_size, j *
stride:j * stride + patch_size])
return patches
|
humilis-1.5.10
|
humilis-1.5.10//humilis/utils.pyfile:/humilis/utils.py:function:get_cf_name/get_cf_name
|
def get_cf_name(env_name, layer_name, stage=None):
"""Produces the CF stack name for layer."""
cf_name = '{}-{}'.format(env_name, layer_name)
if stage is not None:
cf_name = '{}-{}'.format(cf_name, stage)
return cf_name
|
jupyter-require-0.6.1
|
jupyter-require-0.6.1//setupbase.pyfile:/setupbase.py:function:_join_translated/_join_translated
|
def _join_translated(translated_parts, os_sep_class):
"""Join translated glob pattern parts.
This is different from a simple join, as care need to be taken
to allow ** to match ZERO or more directories.
"""
res = ''
for part in translated_parts[:-1]:
if part == '.*':
res += part
else:
res += part + os_sep_class
if translated_parts[-1] == '.*':
res += '.+'
res += '({os_sep_class}?.*)?'.format(os_sep_class=os_sep_class)
else:
res += translated_parts[-1]
return res
|
il2ds-middleware-0.10.3
|
il2ds-middleware-0.10.3//il2ds_middleware/interface/service.pyclass:IPilotsService/crashed
|
def crashed(info):
"""
Process 'user crashed' event.
Input:
`info` # An object with information about event's time, user's
# callsign, aircraft and position on map.
"""
|
Cactus-3.3.3
|
Cactus-3.3.3//cactus/plugin/defaults.pyfile:/cactus/plugin/defaults.py:function:postBuildStatic/postBuildStatic
|
def postBuildStatic(static):
"""
Called after building (copying to the build folder) a static file.
:param static: The static file that was just built.
:returns: None
"""
pass
|
renku
|
renku//core/models/provenance/qualified.pyclass:Usage/from_revision
|
@classmethod
def from_revision(cls, client, path, revision='HEAD', **kwargs):
"""Return dependency from given path and revision."""
from renku.core.models.entities import Entity
return cls(entity=Entity.from_revision(client, path, revision), **kwargs)
|
byteArk-0.0.12
|
byteArk-0.0.12//byteArk/requests/urllib3/contrib/pyopenssl.pyfile:/byteArk/requests/urllib3/contrib/pyopenssl.py:function:_validate_dependencies_met/_validate_dependencies_met
|
def _validate_dependencies_met():
"""
Verifies that PyOpenSSL's package-level dependencies have been met.
Throws `ImportError` if they are not met.
"""
from cryptography.x509.extensions import Extensions
if getattr(Extensions, 'get_extension_for_class', None) is None:
raise ImportError(
"'cryptography' module missing required functionality. Try upgrading to v1.3.4 or newer."
)
from OpenSSL.crypto import X509
x509 = X509()
if getattr(x509, '_x509', None) is None:
raise ImportError(
"'pyOpenSSL' module missing required functionality. Try upgrading to v0.14 or newer."
)
|
dp4gp-1.01
|
dp4gp-1.01//dp4gp/utils.pyfile:/dp4gp/utils.py:function:dp_unnormalise/dp_unnormalise
|
def dp_unnormalise(y, normalisation_parameters):
"""
new_y = dp_unnormalise(y, normalisation_parameters)
Unnormalises the data, y, using the parameters passed in normalisation_parameters.
"""
y = y * normalisation_parameters['std']
y = y + normalisation_parameters['mean']
return y
|
trytond
|
trytond//modules/sale_payment_gateway/payment.pyclass:Payment/search_company
|
@classmethod
def search_company(cls, name, clause):
"""
Searcher for field delivery_date
"""
return [('sale.company',) + tuple(clause[1:])]
|
Products.CMFCore-2.4.6
|
Products.CMFCore-2.4.6//Products/CMFCore/interfaces/_tools.pyclass:IWorkflowTool/getHistoryOf
|
def getHistoryOf(wf_id, ob):
""" Get the history of an object for a given workflow.
o 'wf_id' is the id of the selected workflow.
o 'ob' is the content object.
o Invoked by workflow definitions.
o Permission: Private (Python only)
"""
|
bleach-3.1.5
|
bleach-3.1.5//bleach/_vendor/html5lib/filters/alphabeticalattributes.pyfile:/bleach/_vendor/html5lib/filters/alphabeticalattributes.py:function:_attr_key/_attr_key
|
def _attr_key(attr):
"""Return an appropriate key for an attribute for sorting
Attributes have a namespace that can be either ``None`` or a string. We
can't compare the two because they're different types, so we convert
``None`` to an empty string first.
"""
return attr[0][0] or '', attr[0][1]
|
parceqt-0.9.0
|
parceqt-0.9.0//parceqt/treemodel.pyclass:TreeModel/node_tooltip
|
@classmethod
def node_tooltip(cls, node):
"""Return text for a tooltip for the node."""
d = cls.node_dict(node)
d.update(index=node.parent_index() if node.parent else '-', parent=cls.
node_repr(node.parent) if node.parent else '-')
if node.is_token:
if not d['group']:
d['group'] = '-'
template = cls.TOKEN_TOOLTIP if node.is_token else cls.CONTEXT_TOOLTIP
return template.format(**d).strip()
|
histogrammar
|
histogrammar//hgawk_grammar.pyfile:/hgawk_grammar.py:function:p_dictorsetmaker_star_1/p_dictorsetmaker_star_1
|
def p_dictorsetmaker_star_1(p):
"""dictorsetmaker_star : COMMA test COLON test"""
p[0] = [p[2]], [p[4]]
|
xalpha-0.8.10
|
xalpha-0.8.10//xalpha/indicator.pyfile:/xalpha/indicator.py:function:_upcount/_upcount
|
def _upcount(ls):
"""
count the ratio of upmove days by given a list
"""
count = 0
for i in range(len(ls) - 1):
if ls.iloc[i + 1] > ls.iloc[i]:
count += 1
return count / (len(ls) - 1)
|
code_parser
|
code_parser//core.pyfile:/core.py:function:extract_all_words_from_list_names/extract_all_words_from_list_names
|
def extract_all_words_from_list_names(names_list):
"""
Extract all words from list with snake case names
:param names_list: list
:return: list
"""
word_list = []
for name in names_list:
word_list += name.split('_')
return word_list
|
pyraf-2.1.15
|
pyraf-2.1.15//required_pkgs/stsci.tools/lib/stsci/tools/fileutil.pyfile:/required_pkgs/stsci.tools/lib/stsci/tools/fileutil.py:function:parseExtn/parseExtn
|
def parseExtn(extn=None):
"""
Parse a string representing a qualified fits extension name as in the
output of `parseFilename` and return a tuple ``(str(extname),
int(extver))``, which can be passed to `astropy.io.fits` functions using
the 'ext' kw.
Default return is the first extension in a fits file.
Examples
--------
::
>>> parseExtn('sci, 2')
('sci', 2)
>>> parseExtn('2')
('', 2)
>>> parseExtn('sci')
('sci', 1)
"""
if not extn:
return '', 0
try:
lext = extn.split(',')
except:
return '', 1
if len(lext) == 1 and lext[0].isdigit():
return '', int(lext[0])
elif len(lext) == 2:
return lext[0], int(lext[1])
else:
return lext[0], 1
|
filebridging-0.0.9
|
filebridging-0.0.9//filebridging/client.pyfile:/filebridging/client.py:function:get_action/get_action
|
def get_action(action):
"""Parse abbreviations for `action`."""
if not isinstance(action, str):
return
elif action.lower().startswith('r'):
return 'receive'
elif action.lower().startswith('s'):
return 'send'
|
crmngr
|
crmngr//utils.pyfile:/utils.py:function:truncate/truncate
|
def truncate(string, max_len=1000):
"""returns a truncated to max_len version of a string (or str(string))"""
string = str(string)
if len(string) > max_len - 12:
return string[:max_len] + '...TRUNCATED'
return string
|
skccm-0.2.dev
|
skccm-0.2.dev//skccm/utilities.pyfile:/skccm/utilities.py:function:train_test_split/train_test_split
|
def train_test_split(x1, x2, percent=0.75):
"""Splits the embedded time series into a training set and testing set.
Parameters
----------
x1 : 2D array
Embed time series.
x2 : 2D array
Embed time series.
percent : float
Percent to use for training set.
Returns
-------
x1tr : 2D array
x1te : 2D array
x2tr : 2D array
x2te : 2D array
"""
if len(x1) != len(x2):
print('X1 and X2 are different lengths!')
split = int(len(x1) * percent)
x1tr = x1[:split]
x2tr = x2[:split]
x1te = x1[split:]
x2te = x2[split:]
return x1tr, x1te, x2tr, x2te
|
pynusmv-tools-1.0rc10
|
pynusmv-tools-1.0rc10//pynusmv_tools/fairctl/explain.pyfile:/pynusmv_tools/fairctl/explain.py:function:explain_au/explain_au
|
def explain_au(fsm, state, phi, psi, states=None):
"""
Explain why state of fsm satisfies A phi U psi.
fsm -- the fsm;
state -- a state of fsm satisfying A phi U psi;
phi -- the set of states of fsm satifying phi;
psi -- the set of states of fsm satifying psi;
states -- a dictionary of state->explanation pairs.
"""
pass
|
mxnet-1.6.0.data
|
mxnet-1.6.0.data//purelib/mxnet/contrib/onnx/mx2onnx/_op_translations.pyfile:/purelib/mxnet/contrib/onnx/mx2onnx/_op_translations.py:function:get_boolean_attribute_value/get_boolean_attribute_value
|
def get_boolean_attribute_value(attrs, attr_name):
""" Helper function to convert a string version
of Boolean attributes to integer for ONNX.
Takes attribute dictionary and attr_name as
parameters.
"""
return 1 if attrs.get(attr_name, 0) in ['True', '1'] else 0
|
elastalert-0.2.4
|
elastalert-0.2.4//elastalert/util.pyfile:/elastalert/util.py:function:hashable/hashable
|
def hashable(obj):
""" Convert obj to a hashable obj.
We use the value of some fields from Elasticsearch as keys for dictionaries. This means
that whatever Elasticsearch returns must be hashable, and it sometimes returns a list or dict."""
if not obj.__hash__:
return str(obj)
return obj
|
SQImFil-0.3.5
|
SQImFil-0.3.5//python/SQImFil/_imfil.pyfile:/python/SQImFil/_imfil.py:function:write_best_to_x/write_best_to_x
|
def write_best_to_x(iteration_data):
"""
Makes the best point the current iterate.
x, funs, fval = write_best_to_x(iteration_data)"""
x = iteration_data.xb
funs = iteration_data.funsb
fval = iteration_data.fobjb
return x, funs, fval
|
greenhouse_envmgmt-1.0
|
greenhouse_envmgmt-1.0//greenhouse_envmgmt/control.pyclass:ControlCluster/compile_instance_masks
|
@classmethod
def compile_instance_masks(cls):
""" Compiles instance masks into a master mask that is usable by
the IO expander. Also determines whether or not the pump
should be on.
Method is generalized to support multiple IO expanders
for possible future expansion.
"""
number_IO_expanders = (len(cls._list) - 1) / 4 + 1
cls.master_mask = [0, 0] * number_IO_expanders
for ctrlobj in cls:
cls.master_mask[ctrlobj.bank] |= ctrlobj.mask
if ctrlobj.pump_request == 1:
cls.master_mask[cls.pump_bank] |= 1 << cls.pump_pin
|
patentmodels
|
patentmodels//lib/utils.pyfile:/lib/utils.py:function:check_list/check_list
|
def check_list(listvar):
"""Turns single items into a list of 1."""
if not isinstance(listvar, list):
listvar = [listvar]
return listvar
|
TwitchChatInterface
|
TwitchChatInterface//lib/twitchMessageHandler/messageHandler.pyclass:_Parser/badges
|
@staticmethod
def badges(tags) ->dict:
"""parse badges dict"""
try:
if 'badges' in tags and type(tags['badges']) == str:
badges = {}
explode = tags['badges'].split(',')
for item in explode:
parts = item.split('/')
if parts[1] == None:
return
badges[parts[0]] = parts[1]
tags['badges-raw'] = tags['badges']
tags['badges'] = badges
if 'badges' in tags and type(tags['badges']) == bool:
tags['badges-raw'] = None
return tags
except:
pass
|
mxnet
|
mxnet//contrib/onnx/_import/translation_utils.pyfile:/contrib/onnx/_import/translation_utils.py:function:_remove_attributes/_remove_attributes
|
def _remove_attributes(attrs, remove_list):
"""
Removes attributes in the remove list from the input attribute dict
:param attrs : Dict of operator attributes
:param remove_list : list of attributes to be removed
:return new_attr : Dict of operator attributes without the listed attributes.
"""
new_attrs = {}
for attr in attrs.keys():
if attr not in remove_list:
new_attrs[attr] = attrs[attr]
return new_attrs
|
py-popgen-0.1.7
|
py-popgen-0.1.7//pgpipe/vcftools.pyfile:/pgpipe/vcftools.py:function:produce_vcftools_output/produce_vcftools_output
|
def produce_vcftools_output(output, filename, append_mode=False,
strip_header=False):
"""
Creates the vcftools output file
This function will create an output file from the vcftools stdout.
Please run `check_vcftools_for_errors` prior to check that vcftools
finished without error.
Parameters
----------
output : str
vcftools stdout
filename : str
Specifies the filename for the output file
append_mode : bool
Used to create a single output file from multiple calls
strip_header : bool
Used to remove the header if not needed
Returns
-------
output : file
vcftools output file
"""
if strip_header:
output = ''.join(output.splitlines(True)[1:])
if append_mode:
vcftools_log_file = open(filename, 'a')
else:
vcftools_log_file = open(filename, 'w')
vcftools_log_file.write(str(output))
vcftools_log_file.close()
|
pedal-2.0.2
|
pedal-2.0.2//pedal/cait/stretchy_tree_matching.pyfile:/pedal/cait/stretchy_tree_matching.py:function:is_primitive/is_primitive
|
def is_primitive(item):
"""
Determines if the given item is a primitive value (either an int, float,
str, bool, or None).
Args:
item (any): Any value
Returns:
bool: Whether the item is a primitive value.
"""
return isinstance(item, (int, float, str, bool)) or item is None
|
nio
|
nio//api.pyclass:Api/mimetype_to_msgtype
|
@staticmethod
def mimetype_to_msgtype(mimetype):
"""Turn a mimetype into a matrix message type."""
if mimetype.startswith('image'):
return 'm.image'
elif mimetype.startswith('video'):
return 'm.video'
elif mimetype.startswith('audio'):
return 'm.audio'
return 'm.file'
|
pyidp3
|
pyidp3//typedIDP.pyclass:IDP/split_pred_name
|
@staticmethod
def split_pred_name(pred_name):
"""
Static method to split a predicate.
Splits a predicate in two parts: the name of the predicate, and the
type it holds.
:example:
Foo(bar,baz)
would be split in "Foo" and "[bar, baz]"
"""
pred_name = pred_name.strip()
if not pred_name.endswith(')'):
pred_name += '()'
name, args = pred_name.split('(')
arglist = args.strip(')').split(',')
return name, arglist
|
slicerator-1.0.0
|
slicerator-1.0.0//versioneer.pyfile:/versioneer.py:function:render_pep440_pre/render_pep440_pre
|
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '.post.dev%d' % pieces['distance']
else:
rendered = '0.post.dev%d' % pieces['distance']
return rendered
|
clans-0.3.0
|
clans-0.3.0//clans/ext/example.pyfile:/clans/ext/example.py:function:post_search/post_search
|
def post_search(cs, results):
"""
This hook is called after a (quicklove or regular) search, and is
passed a list containing the results.
Elements of this list are 3-tuples:
- the name of the plan on which the term was found (str)
- the number of instances found (int)
- a list of snippets.
Note that the snippet list may not be the same length as the
number of instances found.
Lists are mutable, so results may be filtered by modifying this
list in-place.
"""
pass
|
springheel
|
springheel//parsetranscript.pyfile:/parsetranscript.py:function:readTranscript/readTranscript
|
def readTranscript(file_name):
"""
Retrieve the contents of the transcript file.
Parameters
----------
file_name : str
The path to the transcript file.
Returns
-------
text_to_read : list
Lines from the transcript file.
"""
try:
with open(file_name, 'r', encoding='utf-8') as f:
text_to_read = f.read()
if text_to_read != '':
return text_to_read
else:
return 'No transcript file found.'
except IOError:
return 'No transcript file found.'
|
qecore
|
qecore//utility.pyfile:/utility.py:function:get_application/get_application
|
def get_application(context, application):
"""
Get Application class instance of an application, based upon given name.
:type context: <behave.runner.Context>
:param context: Context object that is passed from common_steps.
:type application: str
:param application: String of application identification: name.
:rtype: <qecore.application.Application>
:return: Application class instance
.. note::
Do **NOT** call this by yourself. This function is called by :mod:`common_steps`.
"""
app_class_to_return = None
try:
app_class_to_return = getattr(context, application)
except AttributeError:
for app in context.sandbox.applications:
if app.component == application:
app_class_to_return = app
break
except TypeError:
app_class_to_return = context.sandbox.default_application
assert context.sandbox.default_application is not None, 'Default application was not found. Check your environment file!'
assert app_class_to_return is not None, 'Application was not found. Check your environment or feature file!'
assert not isinstance(app_class_to_return, str
), 'Application class was not found. Usually indication of not installed application.'
return app_class_to_return
|
mercurial-5.4
|
mercurial-5.4//hgext/histedit.pyfile:/hgext/histedit.py:function:addln/addln
|
def addln(win, y, x, line, color=None):
"""Add a line to the given window left padding but 100% filled with
whitespace characters, so that the color appears on the whole line"""
maxy, maxx = win.getmaxyx()
length = maxx - 1 - x
line = bytes(line).ljust(length)[:length]
if y < 0:
y = maxy + y
if x < 0:
x = maxx + x
if color:
win.addstr(y, x, line, color)
else:
win.addstr(y, x, line)
|
pybinding
|
pybinding//utils/misc.pyfile:/utils/misc.py:function:with_defaults/with_defaults
|
def with_defaults(options: dict, defaults_dict: dict=None, **defaults_kwargs):
"""Return a dict where missing keys are filled in by defaults
>>> options = dict(hello=0)
>>> with_defaults(options, hello=4, world=5) == dict(hello=0, world=5)
True
>>> defaults = dict(hello=4, world=5)
>>> with_defaults(options, defaults) == dict(hello=0, world=5)
True
>>> with_defaults(options, defaults, world=7, yes=3) == dict(hello=0, world=5, yes=3)
True
"""
options = options if options else {}
if defaults_dict:
options = dict(defaults_dict, **options)
return dict(defaults_kwargs, **options)
|
soilapis
|
soilapis//summary_soil_property.pyfile:/summary_soil_property.py:function:compute_taw/compute_taw
|
def compute_taw(fc, pwp, depth, fraction):
"""
Compute total available water
:param fc: Field capacity
:param pwp: permanent wilting point
:param depth: depth of soil in mm
:param fraction: float value
:return: a float value for TAW
"""
return depth * fraction * (fc - pwp)
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/cloudhsm.pyfile:/pyboto3/cloudhsm.py:function:create_hapg/create_hapg
|
def create_hapg(Label=None):
"""
Creates a high-availability partition group. A high-availability partition group is a group of partitions that spans multiple physical HSMs.
See also: AWS API Documentation
:example: response = client.create_hapg(
Label='string'
)
:type Label: string
:param Label: [REQUIRED]
The label of the new high-availability partition group.
:rtype: dict
:return: {
'HapgArn': 'string'
}
"""
pass
|
IPython
|
IPython//core/completer.pyfile:/core/completer.py:function:has_open_quotes/has_open_quotes
|
def has_open_quotes(s):
"""Return whether a string has open quotes.
This simply counts whether the number of quote characters of either type in
the string is odd.
Returns
-------
If there is an open quote, the quote character is returned. Else, return
False.
"""
if s.count('"') % 2:
return '"'
elif s.count("'") % 2:
return "'"
else:
return False
|
kll-0.5.7.16
|
kll-0.5.7.16//kll/common/parse.pyclass:Make/debug
|
def debug(tokens):
"""
Just prints tokens
Used for debugging
"""
print(tokens)
return tokens
|
itsxpress-1.8.0
|
itsxpress-1.8.0//itsxpress/main.pyfile:/itsxpress/main.py:function:_is_paired/_is_paired
|
def _is_paired(fastq, fastq2, single_end):
"""Determines the workflow based on file inputs.
Args:
"""
if fastq and fastq2:
paired_end = True
interleaved = False
elif single_end:
paired_end = False
interleaved = False
else:
paired_end = True
interleaved = True
return paired_end, interleaved
|
acitoolkit-0.4
|
acitoolkit-0.4//acitoolkit/acitoolkit.pyclass:BaseContract/_get_toolkit_to_apic_classmap
|
@classmethod
def _get_toolkit_to_apic_classmap(cls):
"""
Gets the APIC class to an acitoolkit class mapping dictionary
:returns: dict of APIC class names to acitoolkit classes
"""
return {}
|
networkx
|
networkx//algorithms/isomorphism/matchhelpers.pyfile:/algorithms/isomorphism/matchhelpers.py:function:close/close
|
def close(x, y, rtol=1e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
return abs(x - y) <= atol + rtol * abs(y)
|
AdHoc-0.3.2
|
AdHoc-0.3.2//adhoc.pyclass:AdHoc/remove_sections
|
@classmethod
def remove_sections(cls, string, symbol_or_re, is_re=False):
"""Remove sections."""
ah_retained, ah_removed = cls.tag_partition(string, cls.section_tag(
symbol_or_re, is_re), is_re)
return ''.join(ah_retained)
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/dynamodb.pyfile:/pyboto3/dynamodb.py:function:untag_resource/untag_resource
|
def untag_resource(ResourceArn=None, TagKeys=None):
"""
Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to 5 times per second, per account.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide .
See also: AWS API Documentation
:example: response = client.untag_resource(
ResourceArn='string',
TagKeys=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]
The Amazon DyanamoDB resource the tags will be removed from. This value is an Amazon Resource Name (ARN).
:type TagKeys: list
:param TagKeys: [REQUIRED]
A list of tag keys. Existing tags of the resource whose keys are members of this list will be removed from the Amazon DynamoDB resource.
(string) --
"""
pass
|
gistory
|
gistory//index.pyfile:/index.py:function:pretty_date/pretty_date
|
def pretty_date(time=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
from datetime import datetime
now = datetime.now()
if type(time) is int:
diff = now - datetime.fromtimestamp(time)
elif isinstance(time, datetime):
diff = now - time
elif not time:
diff = now - now
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return 'just now'
if second_diff < 60:
return str(second_diff) + ' seconds ago'
if second_diff < 120:
return 'a minute ago'
if second_diff < 3600:
return str(int(second_diff / 60)) + ' minutes ago'
if second_diff < 7200:
return 'an hour ago'
if second_diff < 86400:
return str(int(second_diff / 3600)) + ' hours ago'
if day_diff == 1:
return 'Yesterday'
if day_diff < 7:
return str(int(day_diff)) + ' days ago'
if day_diff < 31:
return str(int(day_diff / 7)) + ' weeks ago'
if day_diff < 365:
return str(int(day_diff / 30)) + ' months ago'
return str(int(day_diff / 365)) + ' years ago'
|
doker
|
doker//rst2pdf/pygments_code_block_directive.pyfile:/rst2pdf/pygments_code_block_directive.py:function:string_bool/string_bool
|
def string_bool(argument):
"""
Converts True, true, False, False in python boolean values
"""
if argument is None:
msg = (
'argument required but none supplied; choose from "True" or "False"'
)
raise ValueError(msg)
elif argument.lower() == 'true':
return True
elif argument.lower() == 'false':
return False
else:
raise ValueError('"%s" unknown; choose from "True" or "False"' %
argument)
|
inkamusic
|
inkamusic//algorithms.pyfile:/algorithms.py:function:get_tone_from_envelope_val/get_tone_from_envelope_val
|
def get_tone_from_envelope_val(envelope, instru_low, instru_high):
"""transforms envelope val (-1 ... +1) to tone height"""
tone = ((instru_high - instru_low) * envelope + instru_high + instru_low
) / 2
tone = int(tone + 0.5)
return tone
|
uwsgiconf-0.20.1
|
uwsgiconf-0.20.1//uwsgiconf/uwsgi_stub.pyfile:/uwsgiconf/uwsgi_stub.py:function:call/call
|
def call(func_name, *args):
"""Performs an [RPC] function call with the given arguments.
.. warning:: Bytes are returned for Python 3.
:param str|unicode func_name: Function name to call
with optional address (if @-syntax is used).
:param list[str|bytes] args:
:rtype: bytes|str
"""
|
fake-bpy-module-2.80-20200428
|
fake-bpy-module-2.80-20200428//bpy/ops/render.pyfile:/bpy/ops/render.py:function:cycles_integrator_preset_add/cycles_integrator_preset_add
|
def cycles_integrator_preset_add(name: str='', remove_name: bool=False,
remove_active: bool=False):
"""Add an Integrator Preset
:param name: Name, Name of the preset, used to make the path name
:type name: str
:param remove_name: remove_name
:type remove_name: bool
:param remove_active: remove_active
:type remove_active: bool
"""
pass
|
ghidra_bridge
|
ghidra_bridge//ghidra_bridge.pyfile:/ghidra_bridge.py:function:get_listing_panel/get_listing_panel
|
def get_listing_panel(tool, ghidra):
""" Get the code listing UI element, so we can get up-to-date location/highlight/selection """
cvs = tool.getService(ghidra.app.services.CodeViewerService)
return cvs.getListingPanel()
|
rockset_sqlcli
|
rockset_sqlcli//rscli/packages/parseutils/meta.pyfile:/rscli/packages/parseutils/meta.py:function:parse_defaults/parse_defaults
|
def parse_defaults(defaults_string):
"""Yields default values for a function"""
if not defaults_string:
return
current = ''
in_quote = None
for char in defaults_string:
if current == '' and char == ' ':
continue
if char == '"' or char == "'":
if in_quote and char == in_quote:
in_quote = None
elif not in_quote:
in_quote = char
elif char == ',' and not in_quote:
yield current
current = ''
continue
current += char
yield current
|
test-pycoin-0.90a0
|
test-pycoin-0.90a0//pycoin/ecdsa/encrypt.pyfile:/pycoin/ecdsa/encrypt.py:function:generate_shared_public_key/generate_shared_public_key
|
def generate_shared_public_key(my_private_key, their_public_pair, generator):
"""
Generate a shared public pair. Two parties each generate a private key and share
their public key with the other party over an insecure channel. The shared public
key can be generated by either side, but not by eavesdroppers.
"""
p = generator.Point(*their_public_pair)
return my_private_key * p
|
graphx-0.0.4
|
graphx-0.0.4//graphx/preprocessor.pyfile:/graphx/preprocessor.py:function:set_node_mat/set_node_mat
|
def set_node_mat(node_map, node, node_list):
"""
Adjacent list to adjacent matrix
:param node_map:
:param node:
:param node_list:
:return: Adjacent matrix
"""
for x, y, val in node_list:
node_map[node.index(x)][node.index(y)] = node_map[node.index(y)][
node.index(x)] = val
|
lidtk
|
lidtk//data/language_utils.pyfile:/data/language_utils.py:function:get_characters/get_characters
|
def get_characters(lang_data):
"""
Return a sorted list of characters in the language corpus.
Parameters
----------
lang_data : list of str
A list of all paragraphs
Returns
-------
characters : Counter Object
"""
from collections import Counter
characters = Counter()
for paragraph in lang_data:
characters += Counter(paragraph)
return characters
|
neuropredict
|
neuropredict//algorithms.pyfile:/algorithms.py:function:add_new_params/add_new_params
|
def add_new_params(old_grid, new_grid, old_name, new_name):
"""
Adds new items (parameters) in-place to old dict (of parameters),
ensuring no overlap in new parameters with old parameters which prevents silent overwrite.
"""
if new_grid:
new_params = set(new_grid.keys())
old_params = set(old_grid.keys())
if len(old_params.intersection(new_params)) > 0:
raise ValueError(
'Overlap in parameters between {} and {} of the chosen pipeline.'
.format(old_name, new_name))
old_grid.update(new_grid)
return
|
wlauto-3.2.0
|
wlauto-3.2.0//wa/framework/plugin.pyclass:PluginMeta/_propagate_attributes
|
@classmethod
def _propagate_attributes(mcs, bases, attrs, clsname):
"""
For attributes specified by to_propagate, their values will be a union of
that specified for cls and its bases (cls values overriding those of bases
in case of conflicts).
"""
for prop_attr, attr_cls, attr_collector_cls in mcs.to_propagate:
should_propagate = False
propagated = attr_collector_cls(attr_cls)
for base in bases:
if hasattr(base, prop_attr):
propagated += getattr(base, prop_attr) or []
should_propagate = True
if prop_attr in attrs:
pattrs = attrs[prop_attr] or []
for pa in pattrs:
if not isinstance(pa, attr_cls):
msg = 'Invalid value "{}" for attribute "{}"; must be a {}'
raise ValueError(msg.format(pa, prop_attr, attr_cls))
pa._owner = clsname
propagated += pattrs
should_propagate = True
if should_propagate:
for p in propagated:
override = bool(getattr(p, 'override', None))
overridden = bool(getattr(p, '_overridden', None))
if override != overridden:
msg = "Overriding non existing parameter '{}' inside '{}'"
raise ValueError(msg.format(p.name, p._owner))
attrs[prop_attr] = propagated
|
gcsfs
|
gcsfs//core.pyclass:GCSFileSystem/split_path
|
@classmethod
def split_path(cls, path):
"""
Normalise GCS path string into bucket and key.
Parameters
----------
path : string
Input path, like `gcs://mybucket/path/to/file`.
Path is of the form: '[gs|gcs://]bucket[/key]'
Returns
-------
(bucket, key) tuple
"""
path = cls._strip_protocol(path).lstrip('/')
if '/' not in path:
return path, ''
else:
return path.split('/', 1)
|
fake-bpy-module-2.79-20200428
|
fake-bpy-module-2.79-20200428//bpy/ops/sculpt.pyfile:/bpy/ops/sculpt.py:function:sculptmode_toggle/sculptmode_toggle
|
def sculptmode_toggle():
"""Toggle sculpt mode in 3D view
"""
pass
|
alignak_backend-1.4.16
|
alignak_backend-1.4.16//alignak_backend/models/actionforcecheck.pyfile:/alignak_backend/models/actionforcecheck.py:function:get_name/get_name
|
def get_name(friendly=False):
"""Get name of this resource
:return: name of this resource
:rtype: str
"""
if friendly:
return 'Check request'
return 'actionforcecheck'
|
pyltr
|
pyltr//util/group.pyfile:/util/group.py:function:get_groups/get_groups
|
def get_groups(qids):
"""Makes an iterator of query groups on the provided list of query ids.
Parameters
----------
qids : array_like of shape = [n_samples]
List of query ids.
Yields
------
row : (qid, int, int)
Tuple of query id, from, to.
``[i for i, q in enumerate(qids) if q == qid] == range(from, to)``
"""
prev_qid = None
prev_limit = 0
total = 0
for i, qid in enumerate(qids):
total += 1
if qid != prev_qid:
if i != prev_limit:
yield prev_qid, prev_limit, i
prev_qid = qid
prev_limit = i
if prev_limit != total:
yield prev_qid, prev_limit, total
|
microcosm-logging-1.7.0
|
microcosm-logging-1.7.0//microcosm_logging/factories.pyfile:/microcosm_logging/factories.py:function:make_extra_console_formatter/make_extra_console_formatter
|
def make_extra_console_formatter(graph):
"""
Create the default console formatter.
"""
return {'()': 'microcosm_logging.formatters.ExtraConsoleFormatter',
'format_string': graph.config.logging.default_format}
|
allmydata
|
allmydata//interfaces.pyclass:IDirectoryNode/set_metadata_for
|
def set_metadata_for(name, metadata):
"""I replace any existing metadata for the named child with the new
metadata. The child name must be a unicode string. This metadata is
stored in the *edge*, not in the child, so it is attached to the
parent dirnode rather than the child node. I return a Deferred
(that fires with this dirnode) when the operation is complete.
I raise NoSuchChildError if I do not have a child by that name."""
|
alchemtest-0.3.0
|
alchemtest-0.3.0//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old
|
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
package_delivery_app
|
package_delivery_app//load.pyfile:/load.py:function:clean_distance_data/clean_distance_data
|
def clean_distance_data(csv_data):
"""Remove redundant column, use location numbers instead of names,
and convert distance data (a mix of integer and string) to floats.
"""
for row_index, row in enumerate(csv_data):
del row[1]
for col_index, datum in enumerate(row):
if row_index != 0 and col_index != 0:
if datum != '':
csv_data[row_index][col_index] = float(datum)
if row_index != 0:
row[0] = row_index
for col_index, _ in enumerate(csv_data[0]):
if col_index != 0:
csv_data[0][col_index] = col_index
|
fake-bpy-module-2.78-20200428
|
fake-bpy-module-2.78-20200428//bpy/ops/pose.pyfile:/bpy/ops/pose.py:function:armature_apply/armature_apply
|
def armature_apply():
"""Apply the current pose as the new rest pose
"""
pass
|
bpy
|
bpy//ops/object.pyfile:/ops/object.py:function:meshdeform_bind/meshdeform_bind
|
def meshdeform_bind(modifier: str=''):
"""Bind mesh to cage in mesh deform modifier
:param modifier: Modifier, Name of the modifier to edit
:type modifier: str
"""
pass
|
nunavut
|
nunavut//jinja/jinja2/filters.pyfile:/jinja/jinja2/filters.py:function:do_filesizeformat/do_filesizeformat
|
def do_filesizeformat(value, binary=False):
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = binary and 1024 or 1000
prefixes = [binary and 'KiB' or 'kB', binary and 'MiB' or 'MB', binary and
'GiB' or 'GB', binary and 'TiB' or 'TB', binary and 'PiB' or 'PB',
binary and 'EiB' or 'EB', binary and 'ZiB' or 'ZB', binary and
'YiB' or 'YB']
if bytes == 1:
return '1 Byte'
elif bytes < base:
return '%d Bytes' % bytes
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
return '%.1f %s' % (base * bytes / unit, prefix)
return '%.1f %s' % (base * bytes / unit, prefix)
|
collective.baseid-1.0rc1
|
collective.baseid-1.0rc1//collective/baseid/interfaces.pyclass:IIdsManage/items
|
def items():
"""Return a list of (id, object) pairs.
"""
|
Vizuka-0.36.1
|
Vizuka-0.36.1//vizuka/frontier/deterministic.pyclass:NoneFrontiers/compare
|
def compare(dict0, dict1, force_inside01=True):
"""
Draw none frontiers
"""
return 0
|
iaesdk
|
iaesdk//ibm_analytics_engine_api_v2.pyclass:AnalyticsEngineCustomActionScript/_from_dict
|
@classmethod
def _from_dict(cls, _dict):
"""Initialize a AnalyticsEngineCustomActionScript object from a json dictionary."""
return cls.from_dict(_dict)
|
ProcessOptimizer-0.4.7
|
ProcessOptimizer-0.4.7//ProcessOptimizer/benchmarks.pyfile:/ProcessOptimizer/benchmarks.py:function:bench1/bench1
|
def bench1(x):
"""A benchmark function for test purposes.
f(x) = x ** 2
It has a single minima with f(x*) = 0 at x* = 0.
"""
return x[0] ** 2
|
python-tutum-0.21.2
|
python-tutum-0.21.2//tutum/api/tag.pyclass:Tag/create
|
@classmethod
def create(cls, **kwargs):
"""Returns a new instance of the model (without saving it) with the attributes specified in ``kwargs``
:returns: tag -- a new local instance of the Tag
"""
return cls(**kwargs)
|
PySimpleAutomata-0.5.0
|
PySimpleAutomata-0.5.0//PySimpleAutomata/NFA.pyfile:/PySimpleAutomata/NFA.py:function:rename_nfa_states/rename_nfa_states
|
def rename_nfa_states(nfa: dict, suffix: str):
""" Side effect on input! Renames all the states of the NFA
adding a **suffix**.
It is an utility function to be used to avoid automata to have
states with names in common.
Avoid suffix that can lead to special name like "as", "and",...
:param dict nfa: input NFA.
:param str suffix: string to be added at beginning of each state name.
"""
conversion_dict = {}
new_states = set()
new_initials = set()
new_accepting = set()
for state in nfa['states']:
conversion_dict[state] = '' + suffix + state
new_states.add('' + suffix + state)
if state in nfa['initial_states']:
new_initials.add('' + suffix + state)
if state in nfa['accepting_states']:
new_accepting.add('' + suffix + state)
nfa['states'] = new_states
nfa['initial_states'] = new_initials
nfa['accepting_states'] = new_accepting
new_transitions = {}
for transition in nfa['transitions']:
new_arrival = set()
for arrival in nfa['transitions'][transition]:
new_arrival.add(conversion_dict[arrival])
new_transitions[conversion_dict[transition[0]], transition[1]
] = new_arrival
nfa['transitions'] = new_transitions
return nfa
|
plottwist-config-0.0.15
|
plottwist-config-0.0.15//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old
|
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
pero-0.14.0
|
pero-0.14.0//pero/formatters/utils.pyfile:/pero/formatters/utils.py:function:round_time/round_time
|
def round_time(seconds, units, rounding):
"""
Rounds given time into specific units.
Args:
seconds: float or int
Time in seconds.
units: pero.TIME
Specifies the rounding units as eny item from the pero.TIME enum.
rounding: pero.ROUNDING
Specifies the rounding style as any item from the pero.ROUNDING
enum.
Returns:
float
Rounded time in seconds.
"""
pass
|
auto_behave
|
auto_behave//gen_step_rst.pyfile:/gen_step_rst.py:function:_get_file_name/_get_file_name
|
def _get_file_name(name: str, is_step: bool) ->str:
"""
Gets a file name for the generated doc files.
:param name: Name of a folder or file.
:param is_step: If name is related to step file or not.
:return: The file name for the doc file.
"""
file_name = name.replace('_', '-')
if is_step:
file_name = f'{file_name}-steps.rst'
else:
file_name = f'{file_name}.rst'
return file_name
|
completions-0.0.8
|
completions-0.0.8//completions/templates.pyfile:/completions/templates.py:function:_option_style/_option_style
|
def _option_style(option):
"""
Tell the style of an option.
1. '-' or '--': naked (a)
2. '--abc' : long (l)
3. 'abc' : commnad (a)
4. '-abc' : oldlong (o)
5. '-a' : short (s)
"""
if option in ('-', '--'):
return 'a'
if option.startswith('--'):
return 'l'
if not option.startswith('-'):
return 'f -a'
if len(option) > 2:
return 'o'
return 's'
|
jhTAlib-20200412.0
|
jhTAlib-20200412.0//jhtalib/pattern_recognition/pattern_recognition.pyfile:/jhtalib/pattern_recognition/pattern_recognition.py:function:CDLXSIDEGAP3METHODS/CDLXSIDEGAP3METHODS
|
def CDLXSIDEGAP3METHODS(df):
"""
Upside/Downside Gap Three Methods
"""
|
RsCmwBluetoothSig-3.7.80.4
|
RsCmwBluetoothSig-3.7.80.4//RsCmwBluetoothSig/Internal/Conversions.pyfile:/RsCmwBluetoothSig/Internal/Conversions.py:function:find_in_enum_members/find_in_enum_members
|
def find_in_enum_members(item, enum_members: list):
"""Matches a string in the provided list of member strings.
The item must be not fully matched.
Rules:
- If the member string starts with underscore, it is cut out before the matching continues.
- The item is matched if a member string starts with the item (item is a prefix of the member)."""
found = None
for x in enum_members:
if x.startswith(item):
found = x
break
if x.startswith('_'):
if x.strip('_').startswith(item):
found = x
break
return found
|
pysnmpcollector
|
pysnmpcollector//client.pyfile:/client.py:function:_devcfg_url/_devcfg_url
|
def _devcfg_url(id_=None, runtime=False):
"""
Generate config path for a device
:param id_:
:param runtime:
:return:
"""
return '/api/cfg/snmpdevice{}{}'.format('' if not id_ else '/{}'.format
(id_), '/runtime' if runtime else '')
|
Django_504
|
Django_504//contrib/auth/hashers.pyfile:/contrib/auth/hashers.py:function:mask_hash/mask_hash
|
def mask_hash(hash, show=6, char='*'):
"""
Return the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
|
pyramid_torque_engine-0.5.4
|
pyramid_torque_engine-0.5.4//src/pyramid_torque_engine/util.pyfile:/src/pyramid_torque_engine/util.py:function:get_var/get_var
|
def get_var(environ, keys, default=None):
"""Try each of the keys in turn before falling back on the default."""
for key in keys:
if environ.has_key(key):
return environ.get(key)
return default
|
plone.app.redirector-2.2.1
|
plone.app.redirector-2.2.1//plone/app/redirector/interfaces.pyclass:IRedirectionStorage/__iter__
|
def __iter__():
"""Iterate over all existing paths."""
|
bat-0.3.7
|
bat-0.3.7//bat/dataframe_to_matrix.pyclass:DataFrameToMatrix/sanity_check_categorical
|
@staticmethod
def sanity_check_categorical(df):
"""Sanity check for 'dimensionality explosion' on categorical types
Args:
df (dataframe): The dataframe to check the categorical columns
Returns:
None
"""
for column in df.select_dtypes(include='category').columns:
num_unique = df[column].nunique()
if num_unique > 20:
print(
'WARNING: {:s} will expand into {:d} dimensions! Should not include in feature set!'
.format(column, num_unique))
|
ui
|
ui//modelvis/hda_ideal_VLE.pyclass:HDAParameterData/define_metadata
|
@classmethod
def define_metadata(cls, obj):
"""Define properties supported and units."""
obj.add_properties({'flow_mol': {'method': None, 'units': 'mol/s'},
'flow_mol_phase_comp': {'method': None, 'units': 'mol/s'},
'mole_frac': {'method': None, 'units': 'none'}, 'temperature': {
'method': None, 'units': 'K'}, 'pressure': {'method': None, 'units':
'Pa'}, 'flow_mol_phase': {'method': None, 'units': 'mol/s'},
'dens_mol_phase': {'method': '_dens_mol_phase', 'units': 'mol/m^3'},
'pressure_sat': {'method': '_pressure_sat', 'units': 'Pa'},
'mole_frac_phase': {'method': '_mole_frac_phase', 'units':
'no unit'}, 'energy_internal_mol_phase_comp': {'method':
'_energy_internal_mol_phase_comp', 'units': 'J/mol'},
'energy_internal_mol_phase': {'method':
'_enenrgy_internal_mol_phase', 'units': 'J/mol'},
'enth_mol_phase_comp': {'method': '_enth_mol_phase_comp', 'units':
'J/mol'}, 'enth_mol_phase': {'method': '_enth_mol_phase', 'units':
'J/mol'}, 'entr_mol_phase_comp': {'method': '_entr_mol_phase_comp',
'units': 'J/mol'}, 'entr_mol_phase': {'method': '_entr_mol_phase',
'units': 'J/mol'}, 'temperature_bubble': {'method':
'_temperature_bubble', 'units': 'K'}, 'temperature_dew': {'method':
'_temperature_dew', 'units': 'K'}, 'pressure_bubble': {'method':
'_pressure_bubble', 'units': 'Pa'}, 'pressure_dew': {'method':
'_pressure_dew', 'units': 'Pa'}, 'fug_vap': {'method': '_fug_vap',
'units': 'Pa'}, 'fug_liq': {'method': '_fug_liq', 'units': 'Pa'},
'dh_vap': {'method': '_dh_vap', 'units': 'J/mol'}, 'ds_vap': {
'method': '_ds_vap', 'units': 'J/mol.K'}})
obj.add_default_units({'time': 's', 'length': 'm', 'mass': 'g',
'amount': 'mol', 'temperature': 'K', 'energy': 'J', 'holdup': 'mol'})
|
icemac.ab.calendar-3.6.1
|
icemac.ab.calendar-3.6.1//src/icemac/ab/calendar/browser/renderer/table.pyfile:/src/icemac/ab/calendar/browser/renderer/table.py:function:render_event_time/render_event_time
|
def render_event_time(datetime, whole_day, request, no_time=''):
"""Render time of event human readable and localized to locale of user."""
if whole_day:
return no_time
formatter = request.locale.dates.getFormatter('time', 'short')
time = formatter.format(datetime)
if request.locale.id.language == 'de':
time += ' Uhr'
return time
|
moca-0.4.3
|
moca-0.4.3//moca/helpers/seqstats.pyfile:/moca/helpers/seqstats.py:function:remove_flanking_scores/remove_flanking_scores
|
def remove_flanking_scores(all_scores, flank_length):
""" Returns center scores, removing the flanking ones
Parameters
----------
all_scores: array_like
An array containting all scores
flank_length: int
Number of flanking sites on each side
"""
assert flank_length > 0
return all_scores[flank_length:-flank_length]
|
PyEIS
|
PyEIS//PyEIS_Lin_KK.pyfile:/PyEIS_Lin_KK.py:function:KK_RC75/KK_RC75
|
def KK_RC75(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
Kristian B. Knudsen ([email protected] / [email protected])
"""
return Rs + R_values[0] / (1 + w * 1.0j * t_values[0]) + R_values[1] / (
1 + w * 1.0j * t_values[1]) + R_values[2] / (1 + w * 1.0j * t_values[2]
) + R_values[3] / (1 + w * 1.0j * t_values[3]) + R_values[4] / (1 +
w * 1.0j * t_values[4]) + R_values[5] / (1 + w * 1.0j * t_values[5]
) + R_values[6] / (1 + w * 1.0j * t_values[6]) + R_values[7] / (1 +
w * 1.0j * t_values[7]) + R_values[8] / (1 + w * 1.0j * t_values[8]
) + R_values[9] / (1 + w * 1.0j * t_values[9]) + R_values[10] / (1 +
w * 1.0j * t_values[10]) + R_values[11] / (1 + w * 1.0j * t_values[11]
) + R_values[12] / (1 + w * 1.0j * t_values[12]) + R_values[13] / (
1 + w * 1.0j * t_values[13]) + R_values[14] / (1 + w * 1.0j *
t_values[14]) + R_values[15] / (1 + w * 1.0j * t_values[15]
) + R_values[16] / (1 + w * 1.0j * t_values[16]) + R_values[17] / (
1 + w * 1.0j * t_values[17]) + R_values[18] / (1 + w * 1.0j *
t_values[18]) + R_values[19] / (1 + w * 1.0j * t_values[19]
) + R_values[20] / (1 + w * 1.0j * t_values[20]) + R_values[21] / (
1 + w * 1.0j * t_values[21]) + R_values[22] / (1 + w * 1.0j *
t_values[22]) + R_values[23] / (1 + w * 1.0j * t_values[23]
) + R_values[24] / (1 + w * 1.0j * t_values[24]) + R_values[25] / (
1 + w * 1.0j * t_values[25]) + R_values[26] / (1 + w * 1.0j *
t_values[26]) + R_values[27] / (1 + w * 1.0j * t_values[27]
) + R_values[28] / (1 + w * 1.0j * t_values[28]) + R_values[29] / (
1 + w * 1.0j * t_values[29]) + R_values[30] / (1 + w * 1.0j *
t_values[30]) + R_values[31] / (1 + w * 1.0j * t_values[31]
) + R_values[32] / (1 + w * 1.0j * t_values[32]) + R_values[33] / (
1 + w * 1.0j * t_values[33]) + R_values[34] / (1 + w * 1.0j *
t_values[34]) + R_values[35] / (1 + w * 1.0j * t_values[35]
) + R_values[36] / (1 + w * 1.0j * t_values[36]) + R_values[37] / (
1 + w * 1.0j * t_values[37]) + R_values[38] / (1 + w * 1.0j *
t_values[38]) + R_values[39] / (1 + w * 1.0j * t_values[39]
) + R_values[40] / (1 + w * 1.0j * t_values[40]) + R_values[41] / (
1 + w * 1.0j * t_values[41]) + R_values[42] / (1 + w * 1.0j *
t_values[42]) + R_values[43] / (1 + w * 1.0j * t_values[43]
) + R_values[44] / (1 + w * 1.0j * t_values[44]) + R_values[45] / (
1 + w * 1.0j * t_values[45]) + R_values[46] / (1 + w * 1.0j *
t_values[46]) + R_values[47] / (1 + w * 1.0j * t_values[47]
) + R_values[48] / (1 + w * 1.0j * t_values[48]) + R_values[49] / (
1 + w * 1.0j * t_values[49]) + R_values[50] / (1 + w * 1.0j *
t_values[50]) + R_values[51] / (1 + w * 1.0j * t_values[51]
) + R_values[52] / (1 + w * 1.0j * t_values[52]) + R_values[53] / (
1 + w * 1.0j * t_values[53]) + R_values[54] / (1 + w * 1.0j *
t_values[54]) + R_values[55] / (1 + w * 1.0j * t_values[55]
) + R_values[56] / (1 + w * 1.0j * t_values[56]) + R_values[57] / (
1 + w * 1.0j * t_values[57]) + R_values[58] / (1 + w * 1.0j *
t_values[58]) + R_values[59] / (1 + w * 1.0j * t_values[59]
) + R_values[60] / (1 + w * 1.0j * t_values[60]) + R_values[61] / (
1 + w * 1.0j * t_values[61]) + R_values[62] / (1 + w * 1.0j *
t_values[62]) + R_values[63] / (1 + w * 1.0j * t_values[63]
) + R_values[64] / (1 + w * 1.0j * t_values[64]) + R_values[65] / (
1 + w * 1.0j * t_values[65]) + R_values[66] / (1 + w * 1.0j *
t_values[66]) + R_values[67] / (1 + w * 1.0j * t_values[67]
) + R_values[68] / (1 + w * 1.0j * t_values[68]) + R_values[69] / (
1 + w * 1.0j * t_values[69]) + R_values[70] / (1 + w * 1.0j *
t_values[70]) + R_values[71] / (1 + w * 1.0j * t_values[71]
) + R_values[72] / (1 + w * 1.0j * t_values[72]) + R_values[73] / (
1 + w * 1.0j * t_values[73]) + R_values[74] / (1 + w * 1.0j *
t_values[74])
|
traitar3-0.1.1
|
traitar3-0.1.1//traitar/evaluation.pyclass:evaluate/recall_pos_conf
|
@staticmethod
def recall_pos_conf(conf):
"""compute recall of the positive class"""
TN, FP, FN, TP = conf
if TP + FN == 0:
float('nan')
return TP / float(TP + FN)
|
pymesync-0.2.0
|
pymesync-0.2.0//pymesync/mock_pymesync.pyfile:/pymesync/mock_pymesync.py:function:delete_object/delete_object
|
def delete_object():
"""Delete an object from TimeSync"""
return [{'status': 200}]
|
enablebanking
|
enablebanking//models/party_identification.pyclass:PartyIdentification/__ne__
|
def __ne__(A, other):
"""Returns true if both objects are not equal"""
return not A == other
|
pysysbot-0.3.0
|
pysysbot-0.3.0//pysysbot/replies.pyfile:/pysysbot/replies.py:function:bytes2human/bytes2human
|
def bytes2human(n):
"""Convert bytes to human readable format."""
symbols = 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return '%sB' % n
|
yaplon-1.1.0
|
yaplon-1.1.0//yaplon/file_strip/comments.pyclass:Comments/add_style
|
@classmethod
def add_style(cls, style, fn):
"""Add comment style."""
if style not in cls.__dict__:
setattr(cls, style, fn)
cls.styles.append(style)
|
drizzlepac-3.1.6
|
drizzlepac-3.1.6//drizzlepac/mapreg.pyfile:/drizzlepac/mapreg.py:function:extension_from_filename/extension_from_filename
|
def extension_from_filename(filename):
"""
Parse out filename from any specified extensions.
Returns rootname and string version of extension name.
"""
_indx1 = filename.find('[')
_indx2 = filename.find(']')
if _indx1 > 0:
if _indx2 < _indx1:
raise RuntimeError(
"Incorrect extension specification in file name '%s'." %
filename)
_fname = filename[:_indx1]
_extn = filename[_indx1 + 1:_indx2].strip()
else:
_fname = filename
_extn = None
return _fname, _extn
|
OASYS1-XOPPY-1.0.75
|
OASYS1-XOPPY-1.0.75//orangecontrib/xoppy/util/script/variable.pyclass:StringVariable/is_primitive
|
@staticmethod
def is_primitive():
"""Return `False`: string variables are not stored as floats."""
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.