repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
pybc_1-1.0
|
pybc_1-1.0//pybc_1/miscellaneous.pyfile:/pybc_1/miscellaneous.py:function:__help_date/__help_date
|
def __help_date():
"""Function gives description and usage about 'date' command
"""
print('Description: This prints the current date and time.')
print('Usage: date')
|
taskcluster
|
taskcluster//utils.pyfile:/utils.py:function:scopeMatch/scopeMatch
|
def scopeMatch(assumedScopes, requiredScopeSets):
"""
Take a list of a assumed scopes, and a list of required scope sets on
disjunctive normal form, and check if any of the required scope sets are
satisfied.
Example:
requiredScopeSets = [
["scopeA", "scopeB"],
["scopeC"]
]
In this case assumed_scopes must contain, either:
"scopeA" AND "scopeB", OR just "scopeC".
"""
for scopeSet in requiredScopeSets:
for requiredScope in scopeSet:
for scope in assumedScopes:
if scope == requiredScope:
break
if scope.endswith('*') and requiredScope.startswith(scope[:-1]
):
break
else:
break
else:
return True
return False
|
modestpy
|
modestpy//utilities/figures.pyfile:/utilities/figures.py:function:get_figure/get_figure
|
def get_figure(ax):
"""
Retrieves figure from axes. Axes can be either an instance
of Matplotlib.Axes or a 1D/2D array of Matplotlib.Axes.
:param ax: Axes or vector/array of Axes
:return: Matplotlib.Figure
"""
fig = None
try:
fig = ax.get_figure()
except AttributeError:
try:
fig = ax[0].get_figure()
except AttributeError:
fig = ax[0][0].get_figure()
return fig
|
lifelib
|
lifelib//projects/solvency2/projection.pyfile:/projects/solvency2/projection.py:function:SizeBenefitMat/SizeBenefitMat
|
def SizeBenefitMat(t):
"""Maturity benefit per policy"""
return 0
|
crestdsl-0.5.2
|
crestdsl-0.5.2//crestdsl/sourcehelper.pyfile:/crestdsl/sourcehelper.py:function:is_descendant_of_type/is_descendant_of_type
|
def is_descendant_of_type(ast_node, reference_type):
"""check if one of the ancestors is an instance of a type"""
tmp = ast_node
try:
while tmp is not None:
tmp = tmp.parent
if isinstance(tmp, reference_type):
return True
except AttributeError:
return False
|
nbsite
|
nbsite//examples/sites/holoviews/holoviews/core/options.pyclass:StoreOptions/record_skipped_option
|
@classmethod
def record_skipped_option(cls, error):
"""
Record the OptionError associated with a skipped option if
currently recording
"""
if cls._errors_recorded is not None:
cls._errors_recorded.append(error)
|
pcu_nlp
|
pcu_nlp//pcu_nlp.pyfile:/pcu_nlp.py:function:getSentiment/getSentiment
|
def getSentiment(doc):
"""Get document global sentiment.
Parameter :
doc -- Spacy document
Return :
sentiment -- document global sentiment
"""
return doc.sentiment
|
nabu
|
nabu//utils.pyfile:/utils.py:function:convert_index/convert_index
|
def convert_index(idx, idx_max, default_val):
"""
Convert an index (possibly negative or None) to a non-negative integer.
Parameters
----------
idx: int or None
Index
idx_max: int
Maximum value (upper bound) for the index.
default_val: int
Default value if idx is None
Examples
---------
Given an integer `M`, `J = convert_index(i, M, XX)` returns an integer in the
mathematical range [0, M] (or Python `range(0, M)`). `J` can then be used
to define an upper bound of a range.
"""
if idx is None:
return default_val
if idx > idx_max:
return idx_max
if idx < 0:
return idx % idx_max
return idx
|
FIDL
|
FIDL//decompiler_utils.pyfile:/decompiler_utils.py:function:citem2higher/citem2higher
|
def citem2higher(citem):
"""This gets the higher representation of a given :class:``citem``, that is, a :class:``cinsn_t`` or :class:``cexpr_t``
:param citem: a :class:``citem`` object
:type citem: :class:``citem``
"""
if citem.is_expr():
return citem.cexpr
return citem.cinsn
|
siggen-1.0.2
|
siggen-1.0.2//siggen/utils.pyfile:/siggen/utils.py:function:drop_prefix_and_return_type/drop_prefix_and_return_type
|
def drop_prefix_and_return_type(function):
"""Takes the function value from a frame and drops prefix and return type
For example::
static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)
^ ^^^^^^ return type
prefix
This gets changes to this::
Allocator<MozJemallocBase>::malloc(unsigned __int64)
This tokenizes on space, but takes into account types, generics, traits,
function arguments, and other parts of the function signature delimited by
things like `', <>, {}, [], and () for both C/C++ and Rust.
After tokenizing, this returns the last token since that's comprised of the
function name and its arguments.
:arg function: the function value in a frame to drop bits from
:returns: adjusted function value
"""
DELIMITERS = {'(': ')', '{': '}', '[': ']', '<': '>', '`': "'"}
OPEN = DELIMITERS.keys()
CLOSE = DELIMITERS.values()
tokens = []
levels = []
current = []
for i, char in enumerate(function):
if char in OPEN:
levels.append(char)
current.append(char)
elif char in CLOSE:
if levels and DELIMITERS[levels[-1]] == char:
levels.pop()
current.append(char)
else:
current.append(char)
elif levels:
current.append(char)
elif char == ' ':
tokens.append(''.join(current))
current = []
else:
current.append(char)
if current:
tokens.append(''.join(current))
while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')):
tokens = tokens[:-2] + [' '.join(tokens[-2:])]
return tokens[-1]
|
kur
|
kur//containers/operators/for_loop.pyclass:ForLoop/get_container_name
|
@classmethod
def get_container_name(cls):
""" Returns the name of the container class.
Obviously, "for" is an overloaded word in programming. So rather
than risk problems with statements like `import for`, it is better
to just have a different name that is used in parsing containers.
"""
return 'for'
|
seaborn-flask-server-0.0.4
|
seaborn-flask-server-0.0.4//seaborn/flask_server/decorators.pyfile:/seaborn/flask_server/decorators.py:function:register/register
|
def register(database, debug, relative_path=''):
"""
This will store flask global variables the decorators need
:param database: SQLAlchemy database object
:param debug: bool if debug is True
:param relative_path: str of the relative path for
reporting api call functions
:return: None
"""
global db, DEBUG, RELATIVE_PATH
db = database
DEBUG = debug
RELATIVE_PATH = relative_path
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/databasemigrationservice.pyfile:/pyboto3/databasemigrationservice.py:function:describe_refresh_schemas_status/describe_refresh_schemas_status
|
def describe_refresh_schemas_status(EndpointArn=None):
"""
Returns the status of the RefreshSchemas operation.
See also: AWS API Documentation
:example: response = client.describe_refresh_schemas_status(
EndpointArn='string'
)
:type EndpointArn: string
:param EndpointArn: [REQUIRED]
The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.
:rtype: dict
:return: {
'RefreshSchemasStatus': {
'EndpointArn': 'string',
'ReplicationInstanceArn': 'string',
'Status': 'successful'|'failed'|'refreshing',
'LastRefreshDate': datetime(2015, 1, 1),
'LastFailureMessage': 'string'
}
}
"""
pass
|
weblate
|
weblate//trans/util.pyfile:/trans/util.py:function:get_state_css/get_state_css
|
def get_state_css(unit):
"""Return state flags."""
flags = []
if unit.fuzzy:
flags.append('state-need-edit')
elif not unit.translated:
flags.append('state-empty')
elif unit.readonly:
flags.append('state-readonly')
elif unit.approved:
flags.append('state-approved')
elif unit.translated:
flags.append('state-translated')
if unit.has_failing_check:
flags.append('state-alert')
if unit.has_comment:
flags.append('state-comment')
if unit.has_suggestion:
flags.append('state-suggest')
return flags
|
braulio
|
braulio//cli.pyfile:/cli.py:function:tag_pattern_option_validator/tag_pattern_option_validator
|
def tag_pattern_option_validator(ctx, param, value):
"""The provided string must contain **{version}** placeholder in order to
be valid. Otherwise :class:`click.UsageError` is raised.
"""
if not value or '{version}' not in value:
ctx.fail('Missing {version} placeholder in tag_pattern.')
return value
|
sif-0.0.11
|
sif-0.0.11//sif/utils/fileio.pyfile:/sif/utils/fileio.py:function:read_file/read_file
|
def read_file(filename, mode='r', readlines=True):
"""write_file will open a file, "filename" and write content, "content"
and properly close the file
"""
with open(filename, mode) as filey:
if readlines is True:
content = filey.readlines()
else:
content = filey.read()
return content
|
lifelib
|
lifelib//projects/ifrs17sim/projection.pyfile:/projects/ifrs17sim/projection.py:function:ReserveUernPremEnd/ReserveUernPremEnd
|
def ReserveUernPremEnd(t):
"""Unearned Premium: End of period"""
return 0
|
qcl-0.0.3
|
qcl-0.0.3//qcl/parse.pyfile:/qcl/parse.py:function:parse/parse
|
def parse(source):
""" Guess the identity of a particular file or data type and parse it.
:source: Single file or data structure with some molecular data contained within
:returns: ccData object with the molecular data parsed from source
TODO
"""
pass
|
ensae_teaching_cs
|
ensae_teaching_cs//td_1a/construction_classique.pyfile:/td_1a/construction_classique.py:function:construit_matrice_carree/construit_matrice_carree
|
def construit_matrice_carree(n):
"""
Cette fonction construit une matrice carrée remplie de zéro
sous la forme d'une liste de listes.
@param n dimension de la matrice carrée
"""
return [[(0) for i in range(n)] for j in range(n)]
|
deepcpg
|
deepcpg//models/utils.pyfile:/models/utils.py:function:encode_replicate_names/encode_replicate_names
|
def encode_replicate_names(replicate_names):
"""Encode list of replicate names as single string.
.. note:: Deprecated
This function is used to support legacy models and will be removed in
the future.
"""
return '--'.join(replicate_names)
|
pytzer
|
pytzer//parameters.pyfile:/parameters.py:function:psi_Na_AsO4_Cl_M83/psi_Na_AsO4_Cl_M83
|
def psi_Na_AsO4_Cl_M83(T, P):
"""c-a-a': sodium arsenate chloride [M83]."""
psi = 0
valid = T == 298.15
return psi, valid
|
manuscripts-0.2.20
|
manuscripts-0.2.20//manuscripts2/report.pyfile:/manuscripts2/report.py:function:create_csv/create_csv
|
def create_csv(filename, csv_data, mode='w'):
"""
Create a CSV file with the given data and store it in the
file with the given name.
:param filename: name of the file to store the data in
:pram csv_data: the data to be stored in the file
:param mode: the mode in which we have to open the file. It can
be 'w', 'a', etc. Default is 'w'
"""
with open(filename, mode) as f:
csv_data.replace('_', '\\_')
f.write(csv_data)
|
pya2l-0.0.1
|
pya2l-0.0.1//pya2l/parser/grammar/parser.pyclass:A2lParser/p_sample_rate
|
@staticmethod
def p_sample_rate(p):
"""sample_rate : SAMPLE_RATE SINGLE
| SAMPLE_RATE TRIPLE"""
p[0] = p[2]
|
pytileproj-0.0.12
|
pytileproj-0.0.12//pytileproj/geometry.pyfile:/pytileproj/geometry.py:function:intersect_geometry/intersect_geometry
|
def intersect_geometry(geometry1, geometry2):
"""
returns the intersection of two point or polygon geometries
Parameters
----------
geometry1, geometry2 : OGRGeometry
geometry objects
Returns
-------
intersection : OGRGeometry
a geometry representing the intersection area
"""
geometry1c = geometry1.Clone()
geometry2c = geometry2.Clone()
geometry1 = None
geometry2 = None
intersection = geometry1c.Intersection(geometry2c)
return intersection
|
ncluster
|
ncluster//aws_util.pyfile:/aws_util.py:function:extract_attr_for_match/extract_attr_for_match
|
def extract_attr_for_match(items, **kwargs):
"""Helper method to get attribute value for an item matching some criterion.
Specify target criteria value as dict, with target attribute having value -1
Example:
to extract state of vpc matching given vpc id
response = [{'State': 'available', 'VpcId': 'vpc-2bb1584c'}]
extract_attr_for_match(response, State=-1, VpcId='vpc-2bb1584c') #=> 'available'"""
query_arg = None
for arg, value in kwargs.items():
if value == -1:
assert query_arg is None, 'Only single query arg (-1 valued) is allowed'
query_arg = arg
result = []
filterset = set(kwargs.keys())
for item in items:
match = True
assert filterset.issubset(item.keys()
), 'Filter set contained %s which was not in record %s' % (
filterset.difference(item.keys()), item)
for arg in item:
if arg == query_arg:
continue
if arg in kwargs:
if item[arg] != kwargs[arg]:
match = False
break
if match:
result.append(item[query_arg])
assert len(result) <= 1, '%d values matched %s, only allow 1' % (len(
result), kwargs)
if result:
return result[0]
return None
|
libensemble-0.6.0
|
libensemble-0.6.0//examples/sim_funcs/job_control_hworld.pyfile:/examples/sim_funcs/job_control_hworld.py:function:six_hump_camel_func/six_hump_camel_func
|
def six_hump_camel_func(x):
"""
Definition of the six-hump camel
"""
x1 = x[0]
x2 = x[1]
term1 = (4 - 2.1 * x1 ** 2 + x1 ** 4 / 3) * x1 ** 2
term2 = x1 * x2
term3 = (-4 + 4 * x2 ** 2) * x2 ** 2
return term1 + term2 + term3
|
p4swamp-0.5
|
p4swamp-0.5//p4swamp/p4swamp.pyfile:/p4swamp/p4swamp.py:function:_parse_bin_arg/_parse_bin_arg
|
def _parse_bin_arg(kwargs):
"""
Returns the value if 'binary' argument is found, otherwise returns
false. Throws an error if the argument is not a boolean.
"""
if 'binary' in kwargs:
binary_arg = kwargs['binary']
if not isinstance(binary_arg, bool):
raise TypeError("'binary' input argument must be of type bool!")
return binary_arg
else:
return False
|
pyocd-0.26.0
|
pyocd-0.26.0//pyocd/utility/mask.pyfile:/pyocd/utility/mask.py:function:same/same
|
def same(d1, d2):
"""! @brief Test whether two sequences contain the same values.
Unlike a simple equality comparison, this function works as expected when the two sequences
are of different types, such as a list and bytearray. The sequences must return
compatible types from indexing.
"""
if len(d1) != len(d2):
return False
for i in range(len(d1)):
if d1[i] != d2[i]:
return False
return True
|
applaunchservices
|
applaunchservices//_dummy.pyfile:/_dummy.py:function:get_UTI_handler/get_UTI_handler
|
def get_UTI_handler(uniform_type_identifier, role):
"""Get handler for given uniform type identifier and role."""
return
|
mxnet-1.6.0.data
|
mxnet-1.6.0.data//purelib/mxnet/contrib/onnx/onnx2mx/_op_translations.pyfile:/purelib/mxnet/contrib/onnx/onnx2mx/_op_translations.py:function:reciprocal/reciprocal
|
def reciprocal(attrs, inputs, proto_obj):
"""Returns the reciprocal of the argument, element-wise."""
return 'reciprocal', attrs, inputs
|
sasmodels-1.0.2
|
sasmodels-1.0.2//sasmodels/generate.pyfile:/sasmodels/generate.py:function:_split_translation/_split_translation
|
def _split_translation(translation):
"""
Process the *translation* string, which is a sequence of assignments.
Blanks and comments (c-style and python-style) are stripped.
Conditional expressions should use C syntax (! || && ? :) not python.
"""
assignments = []
for line in translation.split('\n'):
code = line.split('#', 1)[0].split('//', 1)[0].strip()
if not code:
continue
parts = code.split('=', 1)
if len(parts) == 1:
raise ValueError("translation expected 'var=expr' but got %r", line
)
var, expr = parts
assignments.append((var.strip(), expr.strip()))
return assignments
|
dgl_cu90-0.4.3.post2.data
|
dgl_cu90-0.4.3.post2.data//purelib/dgl/backend/backend.pyfile:/purelib/dgl/backend/backend.py:function:unsorted_1d_segment_mean/unsorted_1d_segment_mean
|
def unsorted_1d_segment_mean(input, seg_id, n_segs, dim):
"""Computes the mean along segments of a tensor.
Equivalent to tf.unsorted_segment_mean, but seg_id is required to be a
1D tensor.
Note that segments never appeared in seg_id will have results of 0.
Parameters
----------
input : Tensor
The input tensor
seg_id : 1D Tensor
The segment IDs whose values are between 0 and n_segs - 1. Should
have the same length as input.
n_segs : int
Number of distinct segments
dim : int
Dimension to average on
Returns
-------
Tensor
The result
"""
pass
|
punx
|
punx//nxdl_schema.pyfile:/nxdl_schema.py:function:get_xml_namespace_dictionary/get_xml_namespace_dictionary
|
def get_xml_namespace_dictionary():
"""return the NeXus XML namespace dictionary"""
return dict(nx='http://definition.nexusformat.org/nxdl/3.1', xs=
'http://www.w3.org/2001/XMLSchema')
|
thoraxe
|
thoraxe//transcript_info/transcript_info.pyfile:/transcript_info/transcript_info.py:function:_is_incomplete_cds/_is_incomplete_cds
|
def _is_incomplete_cds(row, start_exon, end_exon):
"""
Return True if there are signals of an incomplete CDS coming from the exon.
In particular, if the start or end phase is different from 0 or -1.
"""
if end_exon:
return row['EndPhase'] in {1, 2}
if start_exon:
return row['StartPhase'] in {1, 2}
return False
|
geneblocks-1.2.1
|
geneblocks-1.2.1//geneblocks/sequence_modification_utils.pyfile:/geneblocks/sequence_modification_utils.py:function:insert/insert
|
def insert(seq, pos, inserted):
"""Return the sequence with ``inserted`` inserted, starting at index 'pos'
"""
return seq[:pos] + inserted + seq[pos:]
|
astromodels-1.2.1
|
astromodels-1.2.1//versioneer.pyfile:/versioneer.py:function:render_git_describe_long/render_git_describe_long
|
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
gocats
|
gocats//gocats.pyfile:/gocats.py:function:json_format_graph/json_format_graph
|
def json_format_graph(graph_object, graph_identifier):
"""Creates a dictionary representing the edges in the graph and formats it in such a way that it can be encoded into JSON for comparing the graph objects between versions of GOcats."""
json_dict = dict()
for edge in graph_object.edge_list:
json_dict[str(graph_identifier) + '_' + edge.json_edge[0]
] = edge.json_edge[1]
return json_dict
|
astwro
|
astwro//starlist/daofiles.pyfile:/starlist/daofiles.py:function:parse_dao_hdr/parse_dao_hdr
|
def parse_dao_hdr(hdr, val, line_prefix=''):
"""
creates dao header dict form two lines of file header
:param str hdr: first line
:param str val: second line
:param line_prefix: expected line prefix
:return: dict with dao header compatible with StarList.DAO_header
"""
hdr = hdr[len(line_prefix):]
val = val[len(line_prefix):]
return dict(zip(hdr.split(), val.split()))
|
pg_jts-0.0.1
|
pg_jts-0.0.1//pg_jts/pg_database.pyfile:/pg_jts/pg_database.py:function:get_sequences/get_sequences
|
def get_sequences(schema_name):
"""
Return a list of sequences within a schema with given name.
NOT IMPLEMENTED; TODO:
SELECT * FROM information_schema.sequences;
"""
pass
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/cognitoidentityprovider.pyfile:/pyboto3/cognitoidentityprovider.py:function:delete_user/delete_user
|
def delete_user(AccessToken=None):
"""
Allows a user to delete one's self.
See also: AWS API Documentation
:example: response = client.delete_user(
AccessToken='string'
)
:type AccessToken: string
:param AccessToken: [REQUIRED]
The access token from a request to delete a user.
"""
pass
|
delphixpy
|
delphixpy//v1_10_6/common.pyfile:/v1_10_6/common.py:function:validate_format/validate_format
|
def validate_format(*_arg):
"""
This method can be overridden with format validation logic.
"""
return True
|
mudicom-0.1.2
|
mudicom-0.1.2//mudicom/lookup.pyfile:/mudicom/lookup.py:function:VR/VR
|
def VR(VR=None, description=None):
""" Value Representation (VR) <-> Description lookup.
:param VR: Takes the VR and returns its description
:param description: Take the description of a VR and returns the VR
"""
value_repr = {'AE': 'Application Entity', 'AS': 'Age String', 'AT':
'Attribute Tag', 'CS': 'Code String', 'DA': 'Date', 'DS':
'Decimal String', 'DT': 'Date/Time', 'FL':
'Floating Point Single (4 bytes)', 'FD':
'Floating Point Double (8 bytes)', 'IS': 'Integer String', 'LO':
'Long String', 'LT': 'Long Text', 'OB': 'Other Byte', 'OF':
'Other Float', 'OW': 'Other Word', 'PN': 'Person Name', 'SH':
'Short String', 'SL': 'Signed Long', 'SQ': 'Sequence of Items',
'SS': 'Signed Short', 'ST': 'Short Text', 'TM': 'Time', 'UI':
'Unique Identifier', 'UL': 'Unsigned Long', 'UN': 'Unknown', 'US':
'Unsigned Short', 'UT': 'Unlimited Text'}
assert VR or description, 'Either VR or description required to map VR'
if VR is not None:
VR = VR.upper()
if VR in value_repr:
return value_repr[VR]
for key, value in value_repr.iteritems():
if description == value:
return key
return None
|
pynogram
|
pynogram//utils/other.pyfile:/utils/other.py:function:from_two_powers/from_two_powers
|
def from_two_powers(numbers):
"""
Construct a number from the powers of 2
"""
result = 0
for num in numbers:
result |= num
return result
|
wmagent-1.1.19.2
|
wmagent-1.1.19.2//src/python/WMCore/MicroService/Unified/Common.pyfile:/src/python/WMCore/MicroService/Unified/Common.py:function:teraBytes/teraBytes
|
def teraBytes(size):
"""Return size in TB"""
return float(size) / float(1024 ** 4)
|
vos
|
vos//vos.pyclass:Node/get_prop_value
|
@staticmethod
def get_prop_value(prop):
"""Pull out the value part of PROPERTY Element.
:param prop: an XML Element that represents a Node PROPERTY.
"""
return prop.text
|
golem
|
golem//core/utils.pyfile:/core/utils.py:function:choose_browser_by_precedence/choose_browser_by_precedence
|
def choose_browser_by_precedence(cli_browsers=None, suite_browsers=None,
settings_default_browser=None):
""" Defines which browser(s) to use by order of precedence
The order is the following:
1. browsers defined by CLI
2. browsers defined inside a suite
3. 'default_driver' setting
4. chrome
"""
if cli_browsers:
browsers = cli_browsers
elif suite_browsers:
browsers = suite_browsers
elif settings_default_browser:
browsers = [settings_default_browser]
else:
browsers = ['chrome']
return browsers
|
MAnorm-1.3.0
|
MAnorm-1.3.0//manorm/stats.pyfile:/manorm/stats.py:function:ma_to_xy/ma_to_xy
|
def ma_to_xy(m, a):
"""Convert (M, A) value back to read counts/densities of two samples.
Parameters
----------
m : float
M value.
a : float
A vlaue.
Returns
-------
x : float
Converted read count/density in sample 1.
y : float
Converted read count/density in sample 2.
"""
x = 2 ** (a + m / 2)
y = 2 ** (a - m / 2)
return x, y
|
selinon-1.1.0
|
selinon-1.1.0//selinon/selective_run_function.pyclass:SelectiveRunFunction/construct_import_name
|
@staticmethod
def construct_import_name(name, import_path):
"""Construct import name that will be used in generated config.
:param name: name of the function that will be imported
:param import_path: import that should be used to import function
:return: string representation of function that will be used in generated config
"""
return '_{import_path}_{name}'.format(import_path=import_path.replace(
'.', '_'), name=name)
|
fireworks-ml-0.3.9
|
fireworks-ml-0.3.9//examples/mnist.pyfile:/examples/mnist.py:function:normalize_batch/normalize_batch
|
def normalize_batch(batch, column='examples'):
""" Normalizes pixel intensities to fall between 0 and 1. """
batch[column] /= 255.0
return batch
|
fake-bpy-module-2.80-20200428
|
fake-bpy-module-2.80-20200428//bpy/ops/curve.pyfile:/bpy/ops/curve.py:function:decimate/decimate
|
def decimate(ratio: float=1.0):
"""Simplify selected curves
:param ratio: Ratio
:type ratio: float
"""
pass
|
nucypher-2.1.0b5
|
nucypher-2.1.0b5//nucypher/config/node.pyclass:CharacterConfiguration/generate
|
@classmethod
def generate(cls, password: str, *args, **kwargs):
"""Shortcut: Hook-up a new initial installation and write configuration file to the disk"""
node_config = cls(*args, dev_mode=False, **kwargs)
node_config.initialize(password=password)
node_config.to_configuration_file()
return node_config
|
lifx-photons-core-0.25.0
|
lifx-photons-core-0.25.0//photons_app/helpers.pyfile:/photons_app/helpers.py:function:silent_reporter/silent_reporter
|
def silent_reporter(res):
"""
A generic reporter for asyncio tasks that doesn't log errors.
For example:
.. code-block:: python
t = loop.create_task(coroutine())
t.add_done_callback(hp.silent_reporter)
This means that exceptions are *not* logged to the terminal and you won't
get warnings about tasks not being looked at when they finish.
This method will return True if there was no exception and None otherwise.
It also handles and silences CancelledError.
"""
if not res.cancelled():
exc = res.exception()
if not exc:
res.result()
return True
|
GSAS-II-WONDER_linux-1.0.1
|
GSAS-II-WONDER_linux-1.0.1//GSAS-II-WONDER/GSASIIlattice.pyfile:/GSAS-II-WONDER/GSASIIlattice.py:function:Hx2Rh/Hx2Rh
|
def Hx2Rh(Hx):
"""needs doc string"""
Rh = [0, 0, 0]
itk = -Hx[0] + Hx[1] + Hx[2]
if itk % 3 != 0:
return 0
else:
Rh[1] = itk // 3
Rh[0] = Rh[1] + Hx[0]
Rh[2] = Rh[1] - Hx[1]
if Rh[0] < 0:
for i in range(3):
Rh[i] = -Rh[i]
return Rh
|
PyCO2SYS
|
PyCO2SYS//salts.pyfile:/salts.py:function:borate_C65/borate_C65
|
def borate_C65(Sal):
"""Total borate in mol/kg-sw following C65."""
return 0.0004106 * Sal / 35
|
dropbox-10.1.2
|
dropbox-10.1.2//dropbox/team_log.pyclass:EventType/account_capture_change_availability
|
@classmethod
def account_capture_change_availability(cls, val):
"""
Create an instance of this class set to the
``account_capture_change_availability`` tag with value ``val``.
:param AccountCaptureChangeAvailabilityType val:
:rtype: EventType
"""
return cls('account_capture_change_availability', val)
|
large-image-1.1.0
|
large-image-1.1.0//large_image/cache_util/cache.pyfile:/large_image/cache_util/cache.py:function:strhash/strhash
|
def strhash(*args, **kwargs):
"""
Generate a string hash value for an arbitrary set of args and kwargs. This
relies on the repr of each element.
:param args: arbitrary tuple of args.
:param kwargs: arbitrary dictionary of kwargs.
:returns: hashed string of the arguments.
"""
if kwargs:
return '%r,%r' % (args, sorted(kwargs.items()))
return '%r' % (args,)
|
buildok-0.4.2
|
buildok-0.4.2//buildok/report.pyclass:Report/set_total_steps
|
@classmethod
def set_total_steps(cls, total_steps):
"""Set total steps value.
"""
cls.total_steps = total_steps
|
pandas
|
pandas//io/excel/_openpyxl.pyclass:_OpenpyxlWriter/_convert_to_side
|
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object.
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {'border_style': 'style'}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
|
bpaingest
|
bpaingest//projects/sepsis/ingest.pyclass:BaseSepsisAnalysedMetadata/google_drive_track_to_object
|
@classmethod
def google_drive_track_to_object(cls, trk, exclude=[]):
"""copy over the relevant bits of a sepsis google drive track object, to a package object"""
obj = {'facility': trk.facility, 'data_type': trk.
data_type_pre_pilot_pilot_or_main_dataset, 'ticket': trk.
ccg_jira_ticket}
for field in ('date_of_transfer', 'taxon_or_organism',
'strain_or_isolate', 'growth_media', 'folder_name',
'date_of_transfer_to_archive', 'file_count'):
if field in exclude:
continue
if hasattr(trk, field):
obj[field] = getattr(trk, field)
return obj
|
dorthrithil-networkx-1.11
|
dorthrithil-networkx-1.11//networkx/algorithms/minors.pyfile:/networkx/algorithms/minors.py:function:peek/peek
|
def peek(iterable):
"""Returns an arbitrary element of ``iterable`` without removing it.
This is most useful for peeking at an arbitrary element of a set::
>>> peek({3, 2, 1})
1
>>> peek('hello')
'h'
"""
return next(iter(iterable))
|
thug-1.6.1
|
thug-1.6.1//thug/ThugAPI/IThugAPI.pyclass:IThugAPI/set_elasticsearch_logging
|
def set_elasticsearch_logging():
"""
set_elasticsearch_logging
Enable ElasticSearch logging mode
@return: None
"""
|
regraph
|
regraph//backends/neo4j/cypher_utils/generic.pyfile:/backends/neo4j/cypher_utils/generic.py:function:get_edges/get_edges
|
def get_edges(source_label, target_label, edge_label, data=False):
"""Generate query for getting all the edges of the graph.
Parameters
----------
source_label : optional
Label of the source nodes to match
target_label : optional
Label of the target nodes to match
edge_label : iterable, optional
Label of the edges to match
"""
if data:
query = (
"""MATCH (n:{})-[r:{}]->(m:{})
RETURN n.id as source_id, m.id as target_id, properties(r) as attrs
"""
.format(source_label, edge_label, target_label))
else:
query = (
'MATCH (n:{})-[r:{}]->(m:{})\nRETURN n.id as source_id, m.id as target_id\n'
.format(source_label, edge_label, target_label))
return query
|
invenio_explicit_acls
|
invenio_explicit_acls//actors/mixins.pyclass:UserMixin/get_elasticsearch_schema
|
@classmethod
def get_elasticsearch_schema(clz, _es_version):
"""
Returns the elasticsearch schema for the _invenio_explicit_acls property.
The property looks like::
_invenio_explicit_acls [{
"timestamp": "...when the ACL has been applied to the resource",
"acl": <id of the acl>,
"operation": name of the operation
user: [1, 2, 3]
}]
:return:
"""
return {'type': 'integer'}
|
monero_glue
|
monero_glue//xmr/sub/tsx_helper.pyfile:/xmr/sub/tsx_helper.py:function:has_payment_id/has_payment_id
|
def has_payment_id(extra_nonce):
"""
Returns true if payment id is present
:param extra_nonce:
:return:
"""
return len(extra_nonce) == 33 and extra_nonce[0] == 0
|
forgi-2.0.2
|
forgi-2.0.2//forgi/visual/fornac.pyfile:/forgi/visual/fornac.py:function:nucleotide_colors_to_fornac_color_string/nucleotide_colors_to_fornac_color_string
|
def nucleotide_colors_to_fornac_color_string(nucleotide_colors):
"""
Convert a dictionary of per nucleotide colors to a fornac
color string.
:param nucleotide_colors: A dictionary with nucleotide numbers as keys and colors as values.
(e.g. {1: (255,0,0), 2: (255,255,0)})
:return: A color string (e.g "1:rgb(255,0,0) 2:rgb(255,0,0)")
"""
color_string = ''
for key in nucleotide_colors:
color_string += '{}:rgb({},{},{}) '.format(key, nucleotide_colors[
key][0], nucleotide_colors[key][1], nucleotide_colors[key][2])
return color_string
|
scarf
|
scarf//io.pyfile:/io.py:function:load_excel/load_excel
|
def load_excel(filename):
"""Obtain matching instance from excel."""
raise NotImplementedError('Developer has been lazy.')
|
Pillow-7.1.2
|
Pillow-7.1.2//src/PIL/Image.pyfile:/src/PIL/Image.py:function:composite/composite
|
def composite(image1, image2, mask):
"""
Create composite image by blending images using a transparency mask.
:param image1: The first image.
:param image2: The second image. Must have the same mode and
size as the first image.
:param mask: A mask image. This image can have mode
"1", "L", or "RGBA", and must have the same size as the
other two images.
"""
image = image2.copy()
image.paste(image1, None, mask)
return image
|
PyLTSpice-1.0.post4
|
PyLTSpice-1.0.post4//PyLTSpice/LTSpiceBatch.pyfile:/PyLTSpice/LTSpiceBatch.py:function:sweep/sweep
|
def sweep(start, stop, step=1):
"""Generator function to be used in sweeps.
Advantages towards the range python built-in functions_
- Supports floating point arguments
- Supports both up and down sweeps
- Less memory footprint for large sweeps"""
inc = 0
val = start
if start < stop:
while val <= stop:
yield val
inc += 1
val = start + inc * step
elif start > stop:
while start >= stop:
yield val
inc += 1
val = start + inc * step
|
openfisca_france
|
openfisca_france//model/prelevements_obligatoires/impot_revenu/reductions_impot.pyclass:rehab/formula_2017_01_01
|
def formula_2017_01_01(foyer_fiscal, period, parameters):
"""
Travaux de réhabilitation des résidences de tourisme
2017
"""
depenses_2017 = foyer_fiscal('f7xx', period)
P = parameters(period).impot_revenu.reductions_impots.rehab
return P.taux * depenses_2017
|
bacula_scripts-1.1.4
|
bacula_scripts-1.1.4//bacula_scripts/bacula_del_scatter.pyfile:/bacula_scripts/bacula_del_scatter.py:function:backuplevel/backuplevel
|
def backuplevel(x):
"""Returns backup level by looking for the words full, incremental and differential in the string"""
x = x.lower()
if 'full' in x:
return 'f'
elif 'diff' in x:
return 'd'
elif 'inc' in x:
return 'i'
else:
return None
|
Cython
|
Cython//Tempita/_tempita.pyfile:/Tempita/_tempita.py:function:find_position/find_position
|
def find_position(string, index, last_index, last_pos):
"""Given a string and index, return (line, column)"""
lines = string.count('\n', last_index, index)
if lines > 0:
column = index - string.rfind('\n', last_index, index)
else:
column = last_pos[1] + (index - last_index)
return last_pos[0] + lines, column
|
dijitso
|
dijitso//cache.pyfile:/cache.py:function:extract_function/extract_function
|
def extract_function(lines):
"""Extract function code starting at first line of lines."""
n = len(lines)
begin = 0
body_begin = begin
body_end = n
for i in range(begin, n):
if '{' in lines[i]:
body_begin = i
break
braces = 0
for i in range(body_begin, n):
if '{' in lines[i]:
braces += 1
if '}' in lines[i]:
braces -= 1
if braces == 0:
body_end = i
break
end = body_end + 1
sublines = lines[begin:end]
return ''.join(sublines)
|
zoo
|
zoo//util/tf_graph_util.pyfile:/util/tf_graph_util.py:function:_bfs_for_reachable_nodes/_bfs_for_reachable_nodes
|
def _bfs_for_reachable_nodes(target_nodes, name_to_input_name):
"""Breadth first search for reachable nodes from target nodes."""
nodes_to_keep = set()
next_to_visit = target_nodes[:]
while next_to_visit:
node = next_to_visit[0]
del next_to_visit[0]
if node in nodes_to_keep:
continue
nodes_to_keep.add(node)
if node in name_to_input_name:
next_to_visit += name_to_input_name[node]
return nodes_to_keep
|
ScopeFoundry-1.1.1
|
ScopeFoundry-1.1.1//build/lib/ScopeFoundry/h5_io.pyfile:/build/lib/ScopeFoundry/h5_io.py:function:h5_save_lqcoll_to_attrs/h5_save_lqcoll_to_attrs
|
def h5_save_lqcoll_to_attrs(settings, h5group):
"""
take a LQCollection
and create attributes inside h5group
:param logged_quantities:
:param h5group:
:return: None
"""
unit_group = h5group.create_group('units')
for lqname, lq in settings.as_dict().items():
try:
h5group.attrs[lqname] = lq.val
except:
h5group.attrs[lqname] = lq.ini_string_value()
if lq.unit:
unit_group.attrs[lqname] = lq.unit
|
tinflask-0.0.2
|
tinflask-0.0.2//tinflask/handlers.pyfile:/tinflask/handlers.py:function:ping/ping
|
def ping():
"""Handler that simply returns `pong` from a GET.
"""
return 'pong'
|
opal
|
opal//core/patient_lists.pyclass:PatientList/get_display_name
|
@classmethod
def get_display_name(klass):
"""
Default getter function - returns the `display_name` property
"""
return klass.display_name
|
fake-blender-api-2.79-0.3.1
|
fake-blender-api-2.79-0.3.1//bpy/ops/text.pyfile:/bpy/ops/text.py:function:jump/jump
|
def jump(line: int=1):
"""Jump cursor to line
:param line: Line, Line number to jump to
:type line: int
"""
pass
|
yass-algorithm-0.9
|
yass-algorithm-0.9//src/yass/neuralnetwork/get_nn_output.pyfile:/src/yass/neuralnetwork/get_nn_output.py:function:fix_indexes/fix_indexes
|
def fix_indexes(res, idx_local, idx, buffer_size):
"""Fixes indexes from detected spikes in batches
Parameters
----------
res: tuple
A result from the butterworth
idx_local: slice
A slice object indicating the indices for the data (excluding buffer)
idx: slice
A slice object indicating the absolute location of the data
buffer_size: int
Buffer size
"""
data_start = idx_local[0].start
data_end = idx_local[0].stop
return res[data_start:data_end]
|
kodiswift-0.0.8
|
kodiswift-0.0.8//kodiswift/constants.pyclass:SortMethod/from_string
|
@classmethod
def from_string(cls, sort_method):
"""Returns the sort method specified. sort_method is case insensitive.
Will raise an AttributeError if the provided sort_method does not
exist.
>>> SortMethod.from_string('title')
"""
return getattr(cls, sort_method.upper())
|
dgl_cu100-0.4.3.post2.data
|
dgl_cu100-0.4.3.post2.data//purelib/dgl/backend/backend.pyfile:/purelib/dgl/backend/backend.py:function:dtype/dtype
|
def dtype(input):
"""Return the data type of the tensor.
Parameters
----------
input : Tensor
The input tensor.
Returns
-------
data type
It should be one of the values in the data type dict.
"""
pass
|
ceph-deploy-2.0.1
|
ceph-deploy-2.0.1//ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_socket.pyfile:/ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway_socket.py:function:start_via/start_via
|
def start_via(gateway, hostport=None):
""" return a host, port tuple,
after instanciating a socketserver on the given gateway
"""
if hostport is None:
host, port = 'localhost', 0
else:
host, port = hostport
from execnet.script import socketserver
channel = gateway.remote_exec(socketserver)
channel.send((host, port))
realhost, realport = channel.receive()
if not realhost or realhost == '0.0.0.0':
realhost = 'localhost'
return realhost, realport
|
superset
|
superset//db_engine_specs/presto.pyclass:PrestoEngineSpec/_is_column_name_quoted
|
@classmethod
def _is_column_name_quoted(cls, column_name: str) ->bool:
"""
Check if column name is in quotes
:param column_name: column name
:return: boolean
"""
return column_name.startswith('"') and column_name.endswith('"')
|
apitest
|
apitest//util.pyfile:/util.py:function:value_from_dict/value_from_dict
|
def value_from_dict(dict_, key):
"""Return the value for the key.
Parameters
----------
dict_: dict
Dict containing the key
key: str
Key to lookup in the dict
"""
return dict_[key]
|
M-LOOP-2.2.0
|
M-LOOP-2.2.0//mloop/utilities.pyfile:/mloop/utilities.py:function:datetime_to_string/datetime_to_string
|
def datetime_to_string(datetime):
"""
Method for changing a datetime into a standard string format used by all packages.
"""
return datetime.strftime('%Y-%m-%d_%H-%M')
|
chaoscloud
|
chaoscloud//api/urls.pyfile:/api/urls.py:function:experiment/experiment
|
def experiment(base_url: str, experiment_id: str=None) ->str:
"""
Build the URL for an experiment to be published to.
"""
if not experiment_id:
return '/'.join([base_url, 'experiments'])
return '/'.join([base_url, 'experiments', experiment_id])
|
sos-vcs-2019.1220.3154
|
sos-vcs-2019.1220.3154//sos/pure.pyfile:/sos/pure.py:function:median/median
|
def median(values: 'List[Union[int, float]]', inplace: 'bool'=False):
""" TODO Use doctest here.
>>> print(median([1, 2, 3]))
2
>>> print(median([1, 2]))
1.5
"""
assert isinstance(values, list)
n = len(values)
assert n > 0
if n == 1:
return values[0]
if inplace:
values.sort()
else:
values = list(sorted(values))
return values[n >> 1] if n >> 1 << 1 != n else (values[(n >> 1) - 1] +
values[n >> 1]) / 2.0
|
utils_plus
|
utils_plus//router.pyclass:url/var
|
@classmethod
def var(cls, var_name, view=None, name=None, dtype=None, **kwargs) ->'url':
"""Implements having url-arguments. dtype is the casting argument.
the default cast-type is str as Django."""
route = f'{dtype}:{var_name}' if dtype else str(var_name)
return cls(f'<{route}>', view, name, **kwargs)
|
MongoFrames-1.3.5
|
MongoFrames-1.3.5//mongoframes/frames.pyclass:_BaseFrame/_remove_keys
|
@classmethod
def _remove_keys(cls, parent_dict, paths):
"""
Remove a list of keys from a dictionary.
Keys are specified as a series of `.` separated paths for keys in child
dictionaries, e.g 'parent_key.child_key.grandchild_key'.
"""
for path in paths:
keys = cls._path_to_keys(path)
child_dict = parent_dict
for key in keys[:-1]:
child_dict = child_dict.get(key)
if child_dict is None:
break
if child_dict is None:
continue
if keys[-1] in child_dict:
child_dict.pop(keys[-1])
|
synamic-0.7.1.dev7
|
synamic-0.7.1.dev7//src/synamic/core/services/filesystem/path_tree.pyclass:PathTree/to_path_comps
|
@classmethod
def to_path_comps(cls, *path_comps):
"""The one returned by CPath.path_comps"""
ccomps = cls.to_cpath_ccomps(*path_comps)
if len(ccomps) > 1 and ccomps[-1] == '':
ccomps = ccomps[:-1]
if len(ccomps) > 1 and ccomps[0] == '':
ccomps = ccomps[1:]
comps = ccomps
return comps
|
camtasia-6.1.1
|
camtasia-6.1.1//src/camtasia/media_bin/media_bin.pyfile:/src/camtasia/media_bin/media_bin.py:function:_datetime_to_str/_datetime_to_str
|
def _datetime_to_str(dt):
"""Convert datetime object to camtasia lastMod format.
<year><month><day>T<hour><minute><second>, e.g. 20190606T103830
"""
return (
f'{dt.year}{dt.month:02}{dt.day:02}T{dt.hour:02}{dt.minute:02}{dt.second:02}'
)
|
operun.contactform-1.0.0a4
|
operun.contactform-1.0.0a4//src/operun/contactform/setuphandlers.pyfile:/src/operun/contactform/setuphandlers.py:function:uninstall/uninstall
|
def uninstall(context):
"""Uninstall script"""
|
taurus-citrine-0.6.0
|
taurus-citrine-0.6.0//taurus/demo/cake.pyfile:/taurus/demo/cake.py:function:import_toothpick_picture/import_toothpick_picture
|
def import_toothpick_picture():
"""Return the stream of the toothpick picture."""
import pkg_resources
resource = pkg_resources.resource_stream('taurus.demo', 'toothpick.jpg')
return resource
|
pysyte-0.7.43
|
pysyte-0.7.43//pysyte/types/paths.pyfile:/pysyte/types/paths.py:function:add_star/add_star
|
def add_star(string):
"""Add '*' to string
>>> assert add_star('fred') == 'fred*'
"""
return f'{string}*'
|
quantum-pecos-0.1.2
|
quantum-pecos-0.1.2//pecos/simulators/sparsesim/cmd_one_qubit.pyfile:/pecos/simulators/sparsesim/cmd_one_qubit.py:function:Sd/Sd
|
def Sd(state, qubit):
"""
Applies a Hermitian adjoint phase gate (S^{\\dagger}) rotation to stabilizers and destabilizers
S = S
Z = S^2
S^{\\dagger} = S^3
I = S^4
X -> -iW = -Y
Z -> Z
W -> -iX
Y -> X
"""
stabs = state.stabs
stabs.signs_minus ^= stabs.col_x[qubit]
gens_common = stabs.signs_i & stabs.col_x[qubit]
gens_only_x = stabs.col_x[qubit] - stabs.signs_i
stabs.signs_minus ^= gens_common
stabs.signs_i -= gens_common
stabs.signs_i |= gens_only_x
for gens in state.gen_list:
gens.col_z[qubit] ^= gens.col_x[qubit]
for i in gens.col_x[qubit]:
gens.row_z[i] ^= {qubit}
|
dropbox-10.1.2
|
dropbox-10.1.2//dropbox/team_log.pyclass:EventDetails/enterprise_settings_locking_details
|
@classmethod
def enterprise_settings_locking_details(cls, val):
"""
Create an instance of this class set to the
``enterprise_settings_locking_details`` tag with value ``val``.
:param EnterpriseSettingsLockingDetails val:
:rtype: EventDetails
"""
return cls('enterprise_settings_locking_details', val)
|
okra-2.0.5
|
okra-2.0.5//okra/assn4.pyfile:/okra/assn4.py:function:smallest_owner_set/smallest_owner_set
|
def smallest_owner_set(authors, total, size=0.5):
""" Smallest set of authors owning more than half of project files.
:param authors: author_number_of_files_owned() output
:param total: total_number_of_files_by_project() output
:return: (number of members in smallest set, smallest set)
:rtype: tuple
"""
items = sorted(authors.items(), key=lambda k_v: k_v[1], reverse=True)
threshold = round(total * size)
agg = 0
members = []
member_count = 0
for k, v in items:
if agg <= threshold:
members.append((k, v))
member_count += 1
agg += v
else:
break
return member_count, members
|
BioDendro-0.0.2
|
BioDendro-0.0.2//BioDendro/preprocess.pyclass:MGFRecord/_get_retention
|
@classmethod
def _get_retention(cls, string, key='RTINSECONDS'):
""" Get the retention time in seconds back. """
rtinseconds = cls._split_kvline(key, string)
return float(rtinseconds)
|
minty_pyramid
|
minty_pyramid//code_generation/generate.pyfile:/code_generation/generate.py:function:group_routes_by/group_routes_by
|
def group_routes_by(routes: list, param: str) ->dict:
"""Group routes by parameter.
Create different groupings to accomodate different templates
:param routes: routes
:type routes: list
:param param: parameter to group by
:type param: str
:return: grouped routes
:rtype: dict
"""
grouped_routes = {}
for route in routes:
grouped_by_param = grouped_routes.get(route[param], [])
grouped_by_param.append(route)
grouped_routes[route[param]] = grouped_by_param
return grouped_routes
|
rucio-1.22.4
|
rucio-1.22.4//lib/rucio/core/permission/generic.pyfile:/lib/rucio/core/permission/generic.py:function:perm_get_ssh_challenge_token/perm_get_ssh_challenge_token
|
def perm_get_ssh_challenge_token(issuer, kwargs):
"""
Checks if an account can request a challenge token.
:param issuer: Account identifier which issues the command.
:returns: True if account is allowed to call the API call, otherwise False
"""
return True
|
GrandFatherSon-1.3
|
GrandFatherSon-1.3//grandfatherson/filters.pyclass:Days/mask
|
@classmethod
def mask(cls, dt, **options):
"""
Return a datetime with the same value as ``dt``, to a
resolution of days.
"""
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.