repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
stoq-4.7.0.post1
|
stoq-4.7.0.post1//stoqlib/lib/interfaces.pyclass:ICookieFile/get
|
def get():
"""Fetch the cookie or raise CookieError if a problem occurred
:returns: (username, password)
:rtype: tuple
"""
|
ironic-15.0.0
|
ironic-15.0.0//ironic/objects/bios.pyclass:BIOSSettingList/delete
|
@classmethod
def delete(cls, context, node_id, names):
"""Delete BIOS Settings based on node_id and names.
:param context: Security context.
:param node_id: The node id.
:param names: List of BIOS setting names to be deleted.
:raises: NodeNotFound if the node id is not found.
:raises: BIOSSettingNotFound if any of BIOS setting fails to delete.
"""
cls.dbapi.delete_bios_setting_list(node_id, names)
|
scons
|
scons//SCons/Util.pyfile:/SCons/Util.py:function:unique/unique
|
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
u = {}
try:
for x in s:
u[x] = 1
except TypeError:
pass
else:
return list(u.keys())
del u
try:
t = sorted(s)
except TypeError:
pass
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti = lasti + 1
i = i + 1
return t[:lasti]
del t
u = []
for x in s:
if x not in u:
u.append(x)
return u
|
sunpy-1.1.3
|
sunpy-1.1.3//sunpy/timeseries/sources/eve.pyclass:EVESpWxTimeSeries/is_datasource_for
|
@classmethod
def is_datasource_for(cls, **kwargs):
"""
Determines if header corresponds to an EVE image.
"""
if kwargs.get('source', ''):
return kwargs.get('source', '').lower().startswith(cls._source)
|
elastic_lib-19.4.23
|
elastic_lib-19.4.23//elastic_lib/elastic.pyclass:ElasticModel/build_aggregation
|
@classmethod
def build_aggregation(cls, fields, size, filters=None):
"""Build the aggregation for a field or a list of fields.
/!\\ The fields MUST be multifield with an untouched sub-field
field: list or str
size: int
filters: list
return: aggs
"""
aggs_list = {}
aggs = {}
if isinstance(fields, str):
fields = fields,
for field in fields:
if isinstance(field, (tuple, list)):
nested_aggs = []
for idx, sub_field in enumerate(field):
if idx == 0:
continue
nested_aggs.append({'aggregations': {sub_field: {'terms': {
'field': '%s.untouched' % sub_field}}}})
agg = {field[0]: {'field': '%s.untouched' % field[0], 'size':
size, 'nested_aggs': nested_aggs}}
else:
agg = {field: {'field': '%s.untouched' % field, 'size': size}}
if aggs_list:
aggs_list.update(agg)
else:
aggs_list = agg
for key, agg in aggs_list.items():
aggs_name = key.replace('.', '_')
aggregation = {'terms': {'field': agg['field'], 'size': agg['size'],
'order': {'_count': 'desc'}}}
if filters:
aggs['%s_filtered' % aggs_name] = {'filter': filters}
aggs['%s_filtered' % aggs_name]['aggregations'] = {aggs_name:
aggregation}
else:
aggs[aggs_name] = aggregation
if agg.get('nested_aggs'):
aggs[aggs_name] = {'terms': aggs[aggs_name]['terms'],
'aggregations': agg['nested_aggs'][0]['aggregations']}
return aggs
|
dropbox
|
dropbox//files.pyclass:GetMetadataError/path
|
@classmethod
def path(cls, val):
"""
Create an instance of this class set to the ``path`` tag with value
``val``.
:param LookupError val:
:rtype: GetMetadataError
"""
return cls('path', val)
|
nictools
|
nictools//tfbutil.pyfile:/tfbutil.py:function:setNoclean/setNoclean
|
def setNoclean(noclean_value):
"""Copy no_clean to a variable that is global for this file.
Parameters
----------
no_clean : string
string that is either True or False
"""
global noclean
noclean = noclean_value
|
seedbox
|
seedbox//torrent/loader.pyfile:/torrent/loader.py:function:_is_parsing_required/_is_parsing_required
|
def _is_parsing_required(torrent):
"""Determines if parsing is required.
Checks the following attributes to determine if parsing is needed:
- if torrent.invalid is True, skip parsing
- if torrent.purged is True, skip parsing
- if len(torrent.media_files) > 0, skip parsing
"""
parse = True
if torrent.invalid:
parse = False
if torrent.purged:
parse = False
if torrent.media_files:
parse = False
return parse
|
mne
|
mne//viz/evoked.pyfile:/viz/evoked.py:function:_plot_compare_evokeds/_plot_compare_evokeds
|
def _plot_compare_evokeds(ax, data_dict, conditions, times, ci_dict, styles,
title, all_positive, topo):
"""Plot evokeds (to compare them; with CIs) based on a data_dict."""
for condition in conditions:
dat = data_dict[condition].T
ax.plot(times, dat, zorder=1000, label=condition, clip_on=False, **
styles[condition])
if ci_dict.get(condition, None) is not None:
ci_ = ci_dict[condition]
ax.fill_between(times, ci_[0].flatten(), ci_[1].flatten(),
zorder=9, color=styles[condition]['color'], alpha=0.3,
clip_on=False)
if topo:
ax.text(-0.1, 1, title, transform=ax.transAxes)
else:
ax.set_title(title)
|
mando
|
mando//utils.pyfile:/utils.py:function:purify_kwargs/purify_kwargs
|
def purify_kwargs(kwargs):
"""If type or metavar are set to None, they are removed from kwargs."""
for key, value in kwargs.copy().items():
if key in set(['type', 'metavar']) and value is None:
del kwargs[key]
return kwargs
|
autofront
|
autofront//utilities.pyfile:/utilities.py:function:add_args_to_title/add_args_to_title
|
def add_args_to_title(func_name, arg_list, script=False):
""" Add fixed args to function name for display in browser """
title = func_name
if not script:
title += ' ('
if arg_list:
title += ' '
title += ', '.join(arg_list)
title += ','
return title
|
fantastico-0.7.1
|
fantastico-0.7.1//virtual_env/libs/mysql-connector/python3/mysql/connector/errors.pyfile:/virtual_env/libs/mysql-connector/python3/mysql/connector/errors.py:function:custom_error_exception/custom_error_exception
|
def custom_error_exception(error=None, exception=None):
"""Define custom exceptions for MySQL server errors
This function defines custom exceptions for MySQL server errors and
returns the current set customizations.
If error is a MySQL Server error number, then you have to pass also the
exception class.
The error argument can also be a dictionary in which case the key is
the server error number, and value the exception to be raised.
If none of the arguments are given, then custom_error_exception() will
simply return the current set customizations.
To reset the customizations, simply supply an empty dictionary.
Examples:
import mysql.connector
from mysql.connector import errorcode
# Server error 1028 should raise a DatabaseError
mysql.connector.custom_error_exception(
1028, mysql.connector.DatabaseError)
# Or using a dictionary:
mysql.connector.custom_error_exception({
1028: mysql.connector.DatabaseError,
1029: mysql.connector.OperationalError,
})
# Reset
mysql.connector.custom_error_exception({})
Returns a dictionary.
"""
global _CUSTOM_ERROR_EXCEPTIONS
if isinstance(error, dict) and not len(error):
_CUSTOM_ERROR_EXCEPTIONS = {}
return _CUSTOM_ERROR_EXCEPTIONS
if not error and not exception:
return _CUSTOM_ERROR_EXCEPTIONS
if not isinstance(error, (int, dict)):
raise ValueError(
'The error argument should be either an integer or dictionary')
if isinstance(error, int):
error = {error: exception}
for errno, exception in error.items():
if not isinstance(errno, int):
raise ValueError('error number should be an integer')
try:
if not issubclass(exception, Exception):
raise TypeError
except TypeError:
raise ValueError('exception should be subclass of Exception')
_CUSTOM_ERROR_EXCEPTIONS[errno] = exception
return _CUSTOM_ERROR_EXCEPTIONS
|
exoedge_simulator
|
exoedge_simulator//sys_info.pyfile:/sys_info.py:function:net_io/net_io
|
def net_io():
"""
Get usage statistics on all system net interfaces.
config_io:
{
"channels": {
"net_io": {
"display_name": "NetIO",
"description": "Network statistics of the gateway.",
"properties": {
"data_type": "STRING"
},
"protocol_config": {
"application": "Simulator",
"report_on_change": false,
"sample_rate": 30000,
"report_rate": 30000,
"app_specific_config": {
"function": "net_io",
"parameters": {},
"positionals": []
}
}
}
}
}
"""
try:
import psutil as _psutil
except ImportError:
error_msg = (
'Unable to import psutil. Please install in order to use the sys_info function.'
)
raise ImportError(error_msg)
payload = {}
ioc = _psutil.net_io_counters(pernic=True)
for iface in ioc:
if not payload.get(iface):
payload[iface] = {}
for elem in dir(ioc[iface]):
if not elem.startswith('_'):
if hasattr(ioc[iface], elem):
attr = getattr(ioc[iface], elem)
if callable(attr):
pass
else:
payload[iface][elem] = attr
elif hasattr(ioc[iface], '__dict__') and ioc[iface
].__dict__.get(elem):
payload[iface][elem] = ioc[iface].__dict__[elem]
return payload
|
asyncssh
|
asyncssh//auth.pyclass:_ServerKbdIntAuth/supported
|
@classmethod
def supported(cls, conn):
"""Return whether keyboard interactive authentication is supported"""
return conn.kbdint_auth_supported()
|
squyrrel-0.2.1
|
squyrrel-0.2.1//squyrrel/core/utils/paths.pyfile:/squyrrel/core/utils/paths.py:function:convert_path_to_import_string/convert_path_to_import_string
|
def convert_path_to_import_string(path):
"""Replaces all occurencies of '/' by '.'"""
contains_slashes = False
path = path.replace('\\\\', '/')
path = path.replace('\\', '/')
if not '/' in path:
return path
folders = path.strip().split('/')
num_parents = 0
if not folders[-1]:
folders.pop(-1)
for i, folder in enumerate(folders):
if folder == '..':
num_parents += 1
if folders[0] == '.':
relative_path = True
else:
relative_path = False
folders_ = []
for folder in folders:
if not folder.startswith('.'):
folders_.append(folder)
if num_parents:
base_path = '.'.join([num_parents * '.', '.'.join(folders_)])
else:
base_path = '.'.join(folders_)
if relative_path and num_parents == 0:
return '.' + base_path
return base_path
|
owslib
|
owslib//util.pyfile:/util.py:function:getTypedValue/getTypedValue
|
def getTypedValue(data_type, value):
"""Utility function to cast a string value to the appropriate XSD type. """
if data_type == 'boolean':
return True if value.lower() == 'true' else False
elif data_type == 'integer':
return int(value)
elif data_type == 'float':
return float(value)
elif data_type == 'string':
return str(value)
else:
return value
|
dgl-0.4.3.post2.data
|
dgl-0.4.3.post2.data//purelib/dgl/backend/backend.pyfile:/purelib/dgl/backend/backend.py:function:zeros/zeros
|
def zeros(shape, dtype, ctx):
"""Create a zero tensor.
Parameters
----------
shape : tuple of int
The tensor shape.
dtype : data type
It should be one of the values in the data type dict.
ctx : context
The device of the result tensor.
Returns
-------
Tensor
The zero tensor.
"""
pass
|
aiida_fleur
|
aiida_fleur//tools/StructureData_util.pyfile:/tools/StructureData_util.py:function:rescale_xyz/rescale_xyz
|
def rescale_xyz(inp_structure, scalevec):
"""
rescales a structure a certain way...
"""
pass
|
pyeagle-0.1.1
|
pyeagle-0.1.1//pyeagle/geometry.pyclass:SMD/from_xml
|
@classmethod
def from_xml(cls, node):
"""
Construct an SMD from an EAGLE XML ``smd`` node.
"""
return cls(name=node.attrib['name'], pos=(float(node.attrib['x']),
float(node.attrib['y'])), size=(float(node.attrib['dx']), float(
node.attrib['dy'])), layer=int(node.attrib['layer']))
|
nova-20.2.0
|
nova-20.2.0//nova/virt/libvirt/designer.pyfile:/nova/virt/libvirt/designer.py:function:set_numa_memnode/set_numa_memnode
|
def set_numa_memnode(conf, guest_node_id, host_cell_id):
"""Prepares numa memory node config for the guest.
"""
conf.cellid = guest_node_id
conf.nodeset = [host_cell_id]
conf.mode = 'strict'
|
pymprog-1.1.2
|
pymprog-1.1.2//pymprog.pyclass:model/set_mat_row
|
def set_mat_row(me, rid, n, cidx, cval):
"""set_mat_row with transparent index mapping."""
rid = me._rowmap.map(rid)
if rid:
return me._set_mat_row(rid, n, cidx, cval)
|
dfvfs-20200429
|
dfvfs-20200429//dfvfs/resolver_helpers/manager.pyclass:ResolverHelperManager/DeregisterHelper
|
@classmethod
def DeregisterHelper(cls, resolver_helper):
"""Deregisters a path specification resolver helper.
Args:
resolver_helper (ResolverHelper): resolver helper.
Raises:
KeyError: if resolver helper object is not set for the corresponding
type indicator.
"""
if resolver_helper.type_indicator not in cls._resolver_helpers:
raise KeyError(
'Resolver helper object not set for type indicator: {0:s}.'.
format(resolver_helper.type_indicator))
del cls._resolver_helpers[resolver_helper.type_indicator]
|
astropy
|
astropy//modeling/functional_models.pyclass:Shift/sum_of_implicit_terms
|
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
|
aws-orgs-0.3.4
|
aws-orgs-0.3.4//awsorgs/orgs.pyfile:/awsorgs/orgs.py:function:get_parent_id/get_parent_id
|
def get_parent_id(org_client, account_id):
"""
Query deployed AWS organanization for 'account_id. Return the 'Id' of
the parent OrganizationalUnit or 'None'.
"""
parents = org_client.list_parents(ChildId=account_id)['Parents']
try:
len(parents) == 1
return parents[0]['Id']
except:
raise RuntimeError(
"API Error: account '%s' has more than one parent: " % (
account_id, parents))
|
vtkplotter-2020.3.0
|
vtkplotter-2020.3.0//vtkplotter/colors.pyfile:/vtkplotter/colors.py:function:rgb2int/rgb2int
|
def rgb2int(rgb_tuple):
"""Return the int number of a color from (r,g,b), with 0<r<1 etc."""
rgb = int(rgb_tuple[0] * 255), int(rgb_tuple[1] * 255), int(rgb_tuple[2
] * 255)
return 65536 * rgb[0] + 256 * rgb[1] + rgb[2]
|
repoze.who-2.3
|
repoze.who-2.3//repoze/who/interfaces.pyclass:IAPI/login
|
def login(credentials, identifier_name=None):
""" -> (identity, headers)
o This is an API for browser-based application login forms.
o If 'identifier_name' is passed, use it to look up the identifier;
othewise, use the first configured identifier.
o Attempt to authenticate 'credentials' as though the identifier
had extracted them.
o On success, 'identity' will be authenticated mapping, and 'headers'
will be "remember" headers.
o On failure, 'identity' will be None, and response_headers will be
"forget" headers.
"""
|
mailman-3.3.1
|
mailman-3.3.1//src/mailman/interfaces/subscriptions.pyclass:ISubscriptionService/find_member
|
def find_member(subscriber=None, list_id=None, role=None):
"""Search for a member matching some criteria.
This is like find_members() but is guaranteed to return exactly
one member.
:param subscriber: The email address or user id of the user getting
subscribed.
:type subscriber: string or int
:param list_id: The list id of the mailing list to search for the
subscriber's memberships on.
:type list_id: string
:param role: The member role.
:type role: `MemberRole`
:return: The member matching the given criteria or None if no
members match the criteria.
:rtype: `IMember` or None
:raises TooManyMembersError: when the given criteria matches
more than one membership.
"""
|
OpenTimelineIO-0.12.1
|
OpenTimelineIO-0.12.1//contrib/opentimelineio_contrib/adapters/kdenlive.pyfile:/contrib/opentimelineio_contrib/adapters/kdenlive.py:function:read_property/read_property
|
def read_property(element, name):
"""Decode an MLT item property
which value is contained in a "property" XML element
with matching "name" attribute"""
return element.findtext("property[@name='{}']".format(name), '')
|
flit_core
|
flit_core//common.pyfile:/common.py:function:parse_entry_point/parse_entry_point
|
def parse_entry_point(ep):
"""Check and parse a 'package.module:func' style entry point specification.
Returns (modulename, funcname)
"""
if ':' not in ep:
raise ValueError("Invalid entry point (no ':'): %r" % ep)
mod, func = ep.split(':')
if not func.isidentifier():
raise ValueError('Invalid entry point: %r is not an identifier' % func)
for piece in mod.split('.'):
if not piece.isidentifier():
raise ValueError('Invalid entry point: %r is not a module path' %
piece)
return mod, func
|
pymdict-0.0.4
|
pymdict-0.0.4//pymdict/mongo_query_parser.pyclass:MongoQueryParser/_do_encapsulated_split
|
@staticmethod
def _do_encapsulated_split(text: str, sep_init='(', sep_end=')',
include_separators=False):
"""
Splits by text between tokens.
:param text: text to split
:param sep_init: token that specifies the start of the segment
:param sep_end: token that specifies the end of the segment
:param include_separators: boolean flag that specifies if the token separators should be included in the
segment or not.
:return: split segments
"""
segment = ''
open = False
ESCAPE_CHAR = '\\'
previous = ''
for index, l in enumerate(text):
if l == sep_init and not open and previous != ESCAPE_CHAR:
open = True
elif l == sep_end and previous != ESCAPE_CHAR:
break
elif open:
segment += l
previous = l
return segment if not include_separators else sep_init + segment + sep_end
|
completion_aggregator
|
completion_aggregator//compat.pyfile:/compat.py:function:get_children/get_children
|
def get_children(course_blocks, block_key):
"""
Return a list of blocks that are direct children of the specified block.
``course_blocks`` is not imported here, but it is hard to replicate
without access to edx-platform, so tests will want to stub it out.
"""
return course_blocks.get_children(block_key)
|
geminicassandra-0.1.15.39
|
geminicassandra-0.1.15.39//geminicassandra/gemini_amend.pyfile:/geminicassandra/gemini_amend.py:function:add_columns/add_columns
|
def add_columns(header, c):
"""
add any missing columns to the samples table
"""
for column in header:
try:
c.execute('ALTER TABLE samples ADD COLUMN {0}'.format(column))
except:
pass
|
pattern
|
pattern//metrics.pyfile:/metrics.py:function:cumsum/cumsum
|
def cumsum(iterable):
""" Returns an iterator over the cumulative sum of values in the given list.
"""
n = 0
for x in iterable:
n += x
yield n
|
confetti-2.5.3
|
confetti-2.5.3//confetti/config.pyclass:Config/from_filename
|
@classmethod
def from_filename(cls, filename, namespace=None):
"""
Initializes the config from a file named ``filename``. The file is expected to contain a variable named ``CONFIG``.
"""
with open(filename, 'rb') as f:
return cls.from_file(f, filename)
|
deepomatic
|
deepomatic//oef/utils/serializer.pyclass:Serializer/_parameter_helper
|
@staticmethod
def _parameter_helper(**kwargs):
"""To be overriden for custom input parameters."""
return kwargs
|
panya-social-0.0.4
|
panya-social-0.0.4//social/models.pyfile:/social/models.py:function:is_member_nobody/is_member_nobody
|
def is_member_nobody(target_user, requesting_user):
"""
Always return False. No users are part of 'Nobody'.
"""
return False
|
django-lfs-0.11
|
django-lfs-0.11//lfs/core/utils.pyfile:/lfs/core/utils.py:function:getLOL/getLOL
|
def getLOL(objects, objects_per_row=3):
"""Returns a list of list of the passed objects with passed objects per
row.
"""
result = []
row = []
for i, object in enumerate(objects):
row.append(object)
if (i + 1) % objects_per_row == 0:
result.append(row)
row = []
if len(row) > 0:
result.append(row)
return result
|
kwonly_args
|
kwonly_args//utils.pyfile:/utils.py:function:update_wrapper/update_wrapper
|
def update_wrapper(wrapper, wrapped):
""" To be used under python2.4 because functools.update_wrapper() is available only from python2.5+ """
for attr_name in ('__module__', '__name__', '__doc__'):
attr_value = getattr(wrapped, attr_name, None)
if attr_value is not None:
setattr(wrapper, attr_name, attr_value)
wrapper.__dict__.update(getattr(wrapped, '__dict__', {}))
return wrapper
|
pyocd
|
pyocd//coresight/cortex_m.pyfile:/coresight/cortex_m.py:function:is_single_float_register/is_single_float_register
|
def is_single_float_register(index):
"""! @brief Returns true for registers holding single-precision float values"""
return 64 <= index <= 95
|
kivy
|
kivy//modules/console.pyfile:/modules/console.py:function:stop/stop
|
def stop(win, ctx):
"""Stop and unload any active Inspectors for the given *ctx*."""
if hasattr(ctx, 'console'):
win.unbind(children=ctx.console.on_window_children, on_keyboard=ctx
.console.keyboard_shortcut)
win.remove_widget(ctx.console)
del ctx.console
|
brew_tools-0.2.7
|
brew_tools-0.2.7//brew_tools/brew_maths.pyfile:/brew_tools/brew_maths.py:function:c_to_f/c_to_f
|
def c_to_f(c):
"""
Convert celcius to fahrenheit
"""
return c * 1.8 + 32.0
|
nitpick
|
nitpick//app.pyclass:Nitpick/current_app
|
@classmethod
def current_app(cls):
"""Get the current app from the stack."""
return cls._current_app
|
talos-0.6.6
|
talos-0.6.6//talos/model/network_shape.pyfile:/talos/model/network_shape.py:function:network_shape/network_shape
|
def network_shape(params, last_neuron):
"""Provides the ability to include network shape in experiments. If params
dictionary for the round contains float value for params['shapes'] then
a linear contraction towards the last_neuron value. The higher the value,
the fewer layers it takes to reach lesser than last_neuron.
Supports three inbuilt shapes 'brick', 'funnel', and 'triangle'.
params : dict
Scan() params for a single roundself.
last_neuron : int
Number of neurons on the output layer in the Keras model.
"""
import numpy as np
from ..utils.exceptions import TalosParamsError
layers = params['hidden_layers']
shape = params['shapes']
first_neuron = params['first_neuron']
out = []
n = first_neuron
if layers == 0:
return [0]
if isinstance(shape, float):
for i in range(layers):
n *= 1 - shape
if n > last_neuron:
out.append(int(n))
else:
out.append(last_neuron)
elif shape == 'brick':
out = [first_neuron] * layers
elif shape == 'funnel':
for i in range(layers + 1):
n -= int((first_neuron - last_neuron) / layers)
out.append(n)
out.pop(-1)
elif shape == 'triangle':
out = np.linspace(first_neuron, last_neuron, layers + 2, dtype=int
).tolist()
out.pop(0)
out.pop(-1)
out.reverse()
else:
message = (
"'shapes' must be float or in ['funnel', 'brick', 'triangle']")
raise TalosParamsError(message)
return out
|
silva.core.references-3.0.4
|
silva.core.references-3.0.4//src/silva/core/references/interfaces.pyclass:IReferenceValue/set_target_id
|
def set_target_id(target_id):
"""Set the target_id of the relation. It must be an ID
given by get_content_id or an IntID.
"""
|
coneyeye-0.1.post21
|
coneyeye-0.1.post21//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old
|
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
strct-0.0.30
|
strct-0.0.30//strct/sortedlists/sortedlist.pyfile:/strct/sortedlists/sortedlist.py:function:find_range_ix_in_point_list/find_range_ix_in_point_list
|
def find_range_ix_in_point_list(start, end, point_list):
"""Returns the index range all points inside the given range.
Parameters
---------
start : float
The start of the desired range.
end : float
The end of the desired range.
point_list : sortedcontainers.SortedList
A list of points.
Returns
-------
iterable
The index range of all points inside the given range.
Example
-------
>>> from sortedcontainers import SortedList
>>> point_list = SortedList([5, 8, 15])
>>> find_range_ix_in_point_list(3, 4, point_list)
[0, 0]
>>> find_range_ix_in_point_list(3, 7, point_list)
[0, 1]
>>> find_range_ix_in_point_list(3, 8, point_list)
[0, 2]
>>> find_range_ix_in_point_list(4, 15, point_list)
[0, 3]
>>> find_range_ix_in_point_list(4, 321, point_list)
[0, 3]
>>> find_range_ix_in_point_list(6, 321, point_list)
[1, 3]
"""
return [point_list.bisect_left(start), point_list.bisect_right(end)]
|
hgx
|
hgx//config.pyclass:_NamedListMeta/__len__
|
def __len__(cls):
""" Use the number of _fields for the class length.
"""
return len(cls._fields)
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/clouddirectory.pyfile:/pyboto3/clouddirectory.py:function:create_facet/create_facet
|
def create_facet(SchemaArn=None, Name=None, Attributes=None, ObjectType=None):
"""
Creates a new Facet in a schema. Facet creation is allowed only in development or applied schemas.
See also: AWS API Documentation
:example: response = client.create_facet(
SchemaArn='string',
Name='string',
Attributes=[
{
'Name': 'string',
'AttributeDefinition': {
'Type': 'STRING'|'BINARY'|'BOOLEAN'|'NUMBER'|'DATETIME',
'DefaultValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'IsImmutable': True|False,
'Rules': {
'string': {
'Type': 'BINARY_LENGTH'|'NUMBER_COMPARISON'|'STRING_FROM_SET'|'STRING_LENGTH',
'Parameters': {
'string': 'string'
}
}
}
},
'AttributeReference': {
'TargetFacetName': 'string',
'TargetAttributeName': 'string'
},
'RequiredBehavior': 'REQUIRED_ALWAYS'|'NOT_REQUIRED'
},
],
ObjectType='NODE'|'LEAF_NODE'|'POLICY'|'INDEX'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The schema ARN in which the new Facet will be created. For more information, see arns .
:type Name: string
:param Name: [REQUIRED]
The name of the Facet , which is unique for a given schema.
:type Attributes: list
:param Attributes: The attributes that are associated with the Facet .
(dict) --An attribute that is associated with the Facet .
Name (string) -- [REQUIRED]The name of the facet attribute.
AttributeDefinition (dict) --A facet attribute consists of either a definition or a reference. This structure contains the attribute definition. See Attribute References for more information.
Type (string) -- [REQUIRED]The type of the attribute.
DefaultValue (dict) --The default value of the attribute (if configured).
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
IsImmutable (boolean) --Whether the attribute is mutable or not.
Rules (dict) --Validation rules attached to the attribute definition.
(string) --
(dict) --Contains an Amazon Resource Name (ARN) and parameters that are associated with the rule.
Type (string) --The type of attribute validation rule.
Parameters (dict) --The minimum and maximum parameters that are associated with the rule.
(string) --
(string) --
AttributeReference (dict) --An attribute reference that is associated with the attribute. See Attribute References for more information.
TargetFacetName (string) -- [REQUIRED]The target facet name that is associated with the facet reference. See Attribute References for more information.
TargetAttributeName (string) -- [REQUIRED]The target attribute name that is associated with the facet reference. See Attribute References for more information.
RequiredBehavior (string) --The required behavior of the FacetAttribute .
:type ObjectType: string
:param ObjectType: [REQUIRED]
Specifies whether a given object created from this facet is of type node, leaf node, policy or index.
Node: Can have multiple children but one parent.
Leaf node: Cannot have children but can have multiple parents.
Policy: Allows you to store a policy document and policy type. For more information, see Policies .
Index: Can be created with the Index API.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
|
fake-blender-api-2.79-0.3.1
|
fake-blender-api-2.79-0.3.1//bpy/ops/mball.pyfile:/bpy/ops/mball.py:function:delete_metaelems/delete_metaelems
|
def delete_metaelems():
"""Delete selected metaelement(s)
"""
pass
|
parselcli
|
parselcli//embed.pyfile:/embed.py:function:embed_bpython_shell/embed_bpython_shell
|
def embed_bpython_shell(namespace=None, history_filename=None):
"""Start a bpython shell"""
import bpython
bpython.embed(locals_=namespace)
|
mingus-0.6.0
|
mingus-0.6.0//mingus/midi/pyfluidsynth.pyfile:/mingus/midi/pyfluidsynth.py:function:raw_audio_string/raw_audio_string
|
def raw_audio_string(data):
"""Return a string of bytes to send to soundcard.
Input is a numpy array of samples. Default output format is 16-bit
signed (other formats not currently supported).
"""
import numpy
return data.astype(numpy.int16).tostring()
|
mu_repo
|
mu_repo//umsgpack_s_conn.pyfile:/umsgpack_s_conn.py:function:wait_for_condition/wait_for_condition
|
def wait_for_condition(condition, timeout=2.0):
"""
Helper to wait for a condition with a timeout.
:param float condition:
Timeout to reach condition (in seconds).
:return bool:
True if the condition wasn't satisfied and True if it was.
"""
import time
initial = time.time()
while not condition():
if time.time() - initial > timeout:
return False
time.sleep(0.01)
return True
|
siki
|
siki//basics/Convert.pyfile:/basics/Convert.py:function:string_to_binary/string_to_binary
|
def string_to_binary(strdata):
"""
this function will convert a string data to binary
"""
return strdata.encode('UTF8')
|
control-0.8.3
|
control-0.8.3//control/modelsimp.pyfile:/control/modelsimp.py:function:era/era
|
def era(YY, m, n, nin, nout, r):
"""
Calculate an ERA model of order `r` based on the impulse-response data `YY`.
.. note:: This function is not implemented yet.
Parameters
----------
YY: array
`nout` x `nin` dimensional impulse-response data
m: integer
Number of rows in Hankel matrix
n: integer
Number of columns in Hankel matrix
nin: integer
Number of input variables
nout: integer
Number of output variables
r: integer
Order of model
Returns
-------
sys: StateSpace
A reduced order model sys=ss(Ar,Br,Cr,Dr)
Examples
--------
>>> rsys = era(YY, m, n, nin, nout, r)
"""
raise NotImplementedError('This function is not implemented yet.')
|
wmcore-1.1.19.2
|
wmcore-1.1.19.2//src/python/WMComponent/AnalyticsDataCollector/DataCollectAPI.pyfile:/src/python/WMComponent/AnalyticsDataCollector/DataCollectAPI.py:function:_setMultiLevelStatus/_setMultiLevelStatus
|
def _setMultiLevelStatus(statusData, status, value):
"""
handle the sub status structure
(i.e. submitted_pending, submitted_running -> {submitted: {pending: , running:}})
prerequisite: status structure is seperated by '_'
Currently handle only upto 2 level stucture but can be extended
"""
statusStruct = status.split('_')
if len(statusStruct) == 1:
statusData.setdefault(status, 0)
statusData[status] += value
else:
statusData.setdefault(statusStruct[0], {})
statusData[statusStruct[0]].setdefault(statusStruct[1], 0)
statusData[statusStruct[0]][statusStruct[1]] += value
return
|
anyblok_pyramid_rest_api
|
anyblok_pyramid_rest_api//pyramid_config.pyfile:/pyramid_config.py:function:pyramid_cornice/pyramid_cornice
|
def pyramid_cornice(config):
"""Add cornice includeme in pyramid configuration
:param config: Pyramid configurator instance
"""
config.include('cornice')
|
pymatch-0.3.4
|
pymatch-0.3.4//pymatch/functions.pyfile:/pymatch/functions.py:function:is_continuous/is_continuous
|
def is_continuous(colname, dmatrix):
"""
Check if the colname was treated as continuous in the patsy.dmatrix
Would look like colname[<factor_value>] otherwise
"""
return colname in dmatrix.columns
|
bumblebee
|
bumblebee//modules/cpu2.pyclass:Module/add_color
|
@staticmethod
def add_color(bar):
"""add color as pango markup to a bar"""
if bar in ['β', 'β']:
color = 'green'
elif bar in ['β', 'β']:
color = 'yellow'
elif bar in ['β
', 'β']:
color = 'orange'
elif bar in ['β', 'β']:
color = 'red'
colored_bar = "<span foreground='{}'>{}</span>".format(color, bar)
return colored_bar
|
pants
|
pants//help/help_info_extracter.pyclass:HelpInfoExtracter/get_option_scope_help_info_from_parser
|
@classmethod
def get_option_scope_help_info_from_parser(cls, parser):
"""Returns a dict of help information for the options registered on the given parser.
Callers can format this dict into cmd-line help, HTML or whatever.
"""
return cls(parser.scope).get_option_scope_help_info(parser.
option_registrations_iter())
|
awlsim-0.73.0
|
awlsim-0.73.0//awlsim/common/env.pyclass:AwlSimEnv/getSched
|
@classmethod
def getSched(cls):
"""Get AWLSIM_SCHED.
Returns one of the SCHED_... constants.
Returns None, if AWLSIM_SCHED has an invalid value.
"""
schedStr = cls.__getVar('SCHED', '').lower().strip()
ifMulticore = False
if schedStr.endswith('-if-multicore'):
schedStr = schedStr[:-len('-if-multicore')]
ifMulticore = True
if schedStr == cls.SCHED_DEFAULT:
return cls.SCHED_DEFAULT
if schedStr == cls.SCHED_NORMAL or schedStr == 'other':
return cls.SCHED_NORMAL
if schedStr == cls.SCHED_FIFO or schedStr == 'realtime':
if cls.__getCpuCount() <= 1 and ifMulticore:
return cls.SCHED_NORMAL
return cls.SCHED_FIFO
if schedStr == cls.SCHED_RR:
if cls.__getCpuCount() <= 1 and ifMulticore:
return cls.SCHED_NORMAL
return cls.SCHED_RR
if schedStr == cls.SCHED_DEADLINE:
if cls.__getCpuCount() <= 1 and ifMulticore:
return cls.SCHED_NORMAL
return cls.SCHED_DEADLINE
return None
|
Products.ATSchemaEditorNG-0.6
|
Products.ATSchemaEditorNG-0.6//Products/ATSchemaEditorNG/interfaces.pyclass:ISchemaEditor/atse_formatVocabulary
|
def atse_formatVocabulary(field):
""" format the DisplayList of a field to be displayed
within a textarea.
"""
|
text2vec
|
text2vec//utils/ngram.pyclass:NgramUtil/unigrams
|
@staticmethod
def unigrams(words):
"""
Input: a list of words, e.g., ["I", "am", "Denny"]
Output: a list of unigram
"""
assert type(words) == list
return words
|
predator
|
predator//utils.pyfile:/utils.py:function:remove_terminal/remove_terminal
|
def remove_terminal(node, dag: dict, parents: set=None):
"""Modify dag in-place, removing given node as if it was a terminal"""
if not parents:
parents = set()
for pred, succs in dag.items():
if node in succs:
parents.add(pred)
for parent in parents:
dag[parent].remove(node)
if node in dag:
del dag[node]
|
python-msp430-tools-0.9.2
|
python-msp430-tools-0.9.2//msp430/memory/hexdump.pyfile:/msp430/memory/hexdump.py:function:sixteen/sixteen
|
def sixteen(address, sequence):
"""A generator that yields sequences of 16 elements"""
row = []
for x in sequence:
row.append(x)
if len(row) == 16:
yield address, row
del row[:]
address += 16
if row:
yield address, row
|
cloudmarker
|
cloudmarker//util.pyfile:/util.py:function:friendly_string/friendly_string
|
def friendly_string(technical_string):
"""Translate a technical string to a human-friendly phrase.
In most of our code, we use succint strings to express various
technical details, e.g., ``'gcp'`` to express Google Cloud Platform.
However these technical strings are not ideal while writing
human-friendly messages such as a description of a security issue
detected or a recommendation to remediate such an issue.
This function helps in converting such technical strings into
human-friendly phrases that can be used in strings intended to be
read by end users (e.g., security analysts responsible for
protecting their cloud infrastructure) of this project.
Examples:
Here are a few example usages of this function:
>>> from cloudmarker import util
>>> util.friendly_string('azure')
'Azure'
>>> util.friendly_string('gcp')
'Google Cloud Platform (GCP)'
Arguments:
technical_string (str): A technical string.
Returns:
str: Human-friendly string if a translation from a technical
string to friendly string exists; the same string otherwise.
"""
phrase_map = {'azure': 'Azure', 'gcp': 'Google Cloud Platform (GCP)'}
return phrase_map.get(technical_string, technical_string)
|
firms-0.0.11
|
firms-0.0.11//firms/graders.pyfile:/firms/graders.py:function:update_with/update_with
|
def update_with(d1, d2, aggregator, zero):
"""
Fold the right dictionary into the left.
This is a mutating function
:param d1: Dictionary to fold into
:param d2: Dictionary draw updates from
:param aggregator: Definition of how to update d1
:param zero: What to do if a key in d2 isn't in d1
"""
for k, v in d2.items():
if k not in d1:
d1[k] = zero()
d1[k] = aggregator(d1[k], v)
return d1
|
lxml
|
lxml//html/diff.pyfile:/html/diff.py:function:expand_tokens/expand_tokens
|
def expand_tokens(tokens, equal=False):
"""Given a list of tokens, return a generator of the chunks of
text for the data in the tokens.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
if not equal or not token.hide_when_equal:
if token.trailing_whitespace:
yield token.html() + token.trailing_whitespace
else:
yield token.html()
for post in token.post_tags:
yield post
|
metaknowledge-3.3.2
|
metaknowledge-3.3.2//metaknowledge/medline/tagProcessing/specialFunctions.pyfile:/metaknowledge/medline/tagProcessing/specialFunctions.py:function:address/address
|
def address(R):
"""Gets the first address of the first author"""
return R['AD'][R['AU'][0]][0]
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/athena.pyfile:/pyboto3/athena.py:function:get_named_query/get_named_query
|
def get_named_query(NamedQueryId=None):
"""
Returns information about a single query.
See also: AWS API Documentation
:example: response = client.get_named_query(
NamedQueryId='string'
)
:type NamedQueryId: string
:param NamedQueryId: [REQUIRED]
The unique ID of the query. Use ListNamedQueries to get query IDs.
:rtype: dict
:return: {
'NamedQuery': {
'Name': 'string',
'Description': 'string',
'Database': 'string',
'QueryString': 'string',
'NamedQueryId': 'string'
}
}
"""
pass
|
bpy
|
bpy//ops/clip.pyfile:/ops/clip.py:function:dopesheet_select_channel/dopesheet_select_channel
|
def dopesheet_select_channel(location: float=(0.0, 0.0), extend: bool=False):
"""Select movie tracking channel
:param location: Location, Mouse location to select channel
:type location: float
:param extend: Extend, Extend selection rather than clearing the existing selection
:type extend: bool
"""
pass
|
torchscan
|
torchscan//modules/flops.pyfile:/modules/flops.py:function:flops_elu/flops_elu
|
def flops_elu(module, input, output):
"""FLOPs estimation for `torch.nn.ELU`"""
return input.numel() * 6
|
asyncssh
|
asyncssh//misc.pyfile:/misc.py:function:hide_empty/hide_empty
|
def hide_empty(value, prefix=', '):
"""Return a string with optional prefix if value is non-empty"""
value = str(value)
return prefix + value if value else ''
|
idact
|
idact//detail/deployment/get_command_to_append_local_bin.pyfile:/detail/deployment/get_command_to_append_local_bin.py:function:get_command_to_append_local_bin/get_command_to_append_local_bin
|
def get_command_to_append_local_bin() ->str:
"""Returns a command that prepends the pip local binary installation dir
(usually `~/.local/bin`). It may not be in `PATH` for a non-login
shell."""
return 'export PATH="$PATH:$(python -m site --user-base)/bin"'
|
fake-blender-api-2.79-0.3.1
|
fake-blender-api-2.79-0.3.1//freestyle/utils/ContextFunctions.pyfile:/freestyle/utils/ContextFunctions.py:function:get_canvas_height/get_canvas_height
|
def get_canvas_height() ->int:
"""Returns the canvas height.
:return: The canvas height.
"""
pass
|
oop-ext-0.6.0
|
oop-ext-0.6.0//src/oop_ext/foundation/singleton.pyclass:Singleton/_UsingDefaultSingleton
|
@classmethod
def _UsingDefaultSingleton(cls):
"""
Checks if the current singleton instance is the default instance.
:rtype: bool
:returns:
True if the current singleton instance is the default created instance. Returns False if the current instance
is a pushed singleton or if no instance is currently set
"""
stack = cls._ObtainStack()
has_pushed = len(stack) != cls.__singleton_stack_start_index
has_singleton = cls.HasSingleton()
return has_singleton and not has_pushed
|
eric6
|
eric6//Utilities/uic.pyfile:/Utilities/uic.py:function:__pyName/__pyName
|
def __pyName(py_dir, py_file):
"""
Local function to create the Python source file name for the compiled
.ui file.
@param py_dir suggested name of the directory (string)
@param py_file suggested name for the compile source file (string)
@return tuple of directory name (string) and source file name (string)
"""
return py_dir, 'Ui_{0}'.format(py_file)
|
hackedit-1.0a2
|
hackedit-1.0a2//hackedit/api/plugins.pyclass:EditorPlugin/apply_specific_preferences
|
@classmethod
def apply_specific_preferences(cls, editor):
"""
Apply the specific preferences to an editor instance
"""
pass
|
audio.wave-4.0.2
|
audio.wave-4.0.2//.lib/setuptools/extension.pyfile:/.lib/setuptools/extension.py:function:have_pyrex/have_pyrex
|
def have_pyrex():
"""
Return True if Cython or Pyrex can be imported.
"""
pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext'
for pyrex_impl in pyrex_impls:
try:
__import__(pyrex_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
|
radssh-1.1.2
|
radssh-1.1.2//radssh/console.pyfile:/radssh/console.py:function:colorizer/colorizer
|
def colorizer(tag, text):
"""Basic ANSI colorized output - host hash value map to 7-color palette, stderr bold"""
label, hilight = tag
color = 1 + hash(label) % 7
for line in text.split('\n'):
if hilight:
yield '\x1b[30;4%dm[%s]\x1b[0;1;3%dm %s\x1b[0m\n' % (color,
label, color, line)
else:
yield '\x1b[3%dm[%s] %s\x1b[0m\n' % (color, label, line)
|
ftk
|
ftk//utils.pyfile:/utils.py:function:get_os/get_os
|
def get_os():
"""
Return the operating system of the host machine.
:return:
One of these values: "windows", "linux", and "others".
"""
import sys
os_name = sys.platform
if os_name.startswith('win'):
return 'windows'
elif os_name == 'linux':
return 'linux'
else:
return 'others'
|
noworkflow
|
noworkflow//now/cmd/cmd_diff.pyfile:/now/cmd/cmd_diff.py:function:hide_timestamp/hide_timestamp
|
def hide_timestamp(elements):
"""Set hide_timestamp of elements"""
for element in elements:
element.hide_timestamp = True
|
depository-0.1
|
depository-0.1//versioneer.pyfile:/versioneer.py:function:scan_setup_py/scan_setup_py
|
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open('setup.py', 'r') as f:
for line in f.readlines():
if 'import versioneer' in line:
found.add('import')
if 'versioneer.get_cmdclass()' in line:
found.add('cmdclass')
if 'versioneer.get_version()' in line:
found.add('get_version')
if 'versioneer.VCS' in line:
setters = True
if 'versioneer.versionfile_source' in line:
setters = True
if len(found) != 3:
print('')
print('Your setup.py appears to be missing some important items')
print('(but I might be wrong). Please make sure it has something')
print('roughly like the following:')
print('')
print(' import versioneer')
print(' setup( version=versioneer.get_version(),')
print(' cmdclass=versioneer.get_cmdclass(), ...)')
print('')
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print('now lives in setup.cfg, and should be removed from setup.py')
print('')
errors += 1
return errors
|
QMix-1.0.5
|
QMix-1.0.5//qmix/exp/clean_data.pyfile:/qmix/exp/clean_data.py:function:sort_xy/sort_xy
|
def sort_xy(x, y):
"""Sort x/y data by the x values.
Args:
x (ndarray): x array
y (ndarray): y array
Returns:
x/y data sorted by x
"""
idx = x.argsort()
return x[idx], y[idx]
|
pya2l-0.0.1
|
pya2l-0.0.1//pya2l/parser/grammar/parser.pyclass:A2lParser/p_guard_rails
|
@staticmethod
def p_guard_rails(p):
"""guard_rails : GUARD_RAILS"""
p[0] = p[1]
|
xarray-0.15.1
|
xarray-0.15.1//xarray/coding/cftime_offsets.pyfile:/xarray/coding/cftime_offsets.py:function:_generate_range/_generate_range
|
def _generate_range(start, end, periods, offset):
"""Generate a regular range of cftime.datetime objects with a
given time offset.
Adapted from pandas.tseries.offsets.generate_range.
Parameters
----------
start : cftime.datetime, or None
Start of range
end : cftime.datetime, or None
End of range
periods : int, or None
Number of elements in the sequence
offset : BaseCFTimeOffset
An offset class designed for working with cftime.datetime objects
Returns
-------
A generator object
"""
if start:
start = offset.rollforward(start)
if end:
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
current = start
if offset.n >= 0:
while current <= end:
yield current
next_date = current + offset
if next_date <= current:
raise ValueError(f'Offset {offset} did not increment date')
current = next_date
else:
while current >= end:
yield current
next_date = current + offset
if next_date >= current:
raise ValueError(f'Offset {offset} did not decrement date')
current = next_date
|
sypi-0.1.4
|
sypi-0.1.4//sypi/info.pyfile:/sypi/info.py:function:_linux_get_system_memory/_linux_get_system_memory
|
def _linux_get_system_memory():
"""
Get node total memory and memory usage.
http://stackoverflow.com/a/17718729.
"""
try:
with open('/proc/meminfo', 'r') as mem:
ret = {}
tmp = 0
for i in mem:
sline = i.split()
if str(sline[0]) == 'MemTotal:':
ret['total'] = int(sline[1])
elif str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):
tmp += int(sline[1])
ret['free'] = tmp
ret['used'] = int(ret['total']) - int(ret['free'])
except:
return {'error': "can't read meminfo"}
return ret
|
IPython
|
IPython//core/async_helpers.pyfile:/core/async_helpers.py:function:_curio_runner/_curio_runner
|
def _curio_runner(coroutine):
"""
handler for curio autoawait
"""
import curio
return curio.run(coroutine)
|
collective.cover-2.2.2
|
collective.cover-2.2.2//src/collective/cover/tiles/base.pyclass:IPersistentCoverTile/getAllowedGroupsForEdit
|
def getAllowedGroupsForEdit():
"""
This method will return a list of groups that are allowed to edit the
contents of this tile
"""
|
xmitgcm
|
xmitgcm//mds_store.pyfile:/mds_store.py:function:_set_coords/_set_coords
|
def _set_coords(ds):
"""Turn all variables without `time` dimensions into coordinates."""
coords = set()
for vname in ds.variables:
if 'time' not in ds[vname].dims or ds[vname].dims == ('time',):
coords.add(vname)
return ds.set_coords(list(coords))
|
properly_util_python
|
properly_util_python//dynamo_streams.pyclass:DynamoStreamEventHandler/_get_table_name
|
@staticmethod
def _get_table_name(record):
"""
The source ARN is in the format of:
'arn:aws:dynamodb:us-east-1:529943178829:table/staging-listing-clean-001/stream/2018-10-18T19:46:17.718'
We are doing the following:
* looking for the first backslash '/' and second backslash '/'
* then parsing out the environment out of the string
:param record: The DynamoDB stream record
:return: The environment agnostic table name
"""
source_arn = record.get('eventSourceARN')
first_index = source_arn.index('/', 0)
second_index = source_arn.index('/', first_index + 1)
full_table_name = source_arn[first_index + 1:second_index]
first_dash_index = full_table_name.index('-', 0)
simplified_table_name = full_table_name[first_dash_index + 1:]
return simplified_table_name
|
cvtron
|
cvtron//preprocessor/voc_preprocessing.pyfile:/preprocessor/voc_preprocessing.py:function:convert_to_string/convert_to_string
|
def convert_to_string(image_path, labels):
"""convert image_path, lables to string
Returns:
string
"""
out_string = ''
out_string += image_path
for label in labels:
for i in label:
out_string += ' ' + str(i)
out_string += '\n'
return out_string
|
Pytzer-0.4.3
|
Pytzer-0.4.3//pytzer/parameters.pyfile:/pytzer/parameters.py:function:theta_H_Mnjj_PK74/theta_H_Mnjj_PK74
|
def theta_H_Mnjj_PK74(T, P):
"""c-c': hydrogen manganese(II) [PK74]."""
theta = 0.0
valid = T == 298.15
return theta, valid
|
binary-refinery-0.2.0
|
binary-refinery-0.2.0//refinery/explore.pyfile:/refinery/explore.py:function:highlight/highlight
|
def highlight(text, expression, color):
"""
Uses ANSI color codes to highlight matches of the given regular `expression`
in `text`.
"""
return expression.sub(lambda m: '\x1b[' + color + 'm' + m.group(0) +
'\x1b[0m', text)
|
flywheel_cli
|
flywheel_cli//commands/providers.pyfile:/commands/providers.py:function:add_gc_storage_config/add_gc_storage_config
|
def add_gc_storage_config(parser):
"""Adds GC stroage arguments"""
parser.add_argument('--region', required=False, help='GC Storage region')
parser.add_argument('--path', required=False, help='GC Storge path')
parser.add_argument('--bucket', required=False, help='GC bucket name')
|
mockaccino-0.2
|
mockaccino-0.2//mockaccino/mocks.pyfile:/mockaccino/mocks.py:function:replay/replay
|
def replay(*args):
"""
Sets the specified mocks on replay mode, meaning that all method calls
will be checked against what was previously recorded
"""
for mock in args:
mock.enter_replay_mode()
|
smc
|
smc//core/route.pyfile:/core/route.py:function:_which_ip_protocol/_which_ip_protocol
|
def _which_ip_protocol(element):
"""
Validate the protocol addresses for the element. Most elements can
have an IPv4 or IPv6 address assigned on the same element. This
allows elements to be validated and placed on the right network.
:return: boolean tuple
:rtype: tuple(ipv4, ipv6)
"""
try:
if element.typeof in ('host', 'router'):
return getattr(element, 'address', False), getattr(element,
'ipv6_address', False)
elif element.typeof == 'netlink':
gateway = element.gateway
if gateway.typeof == 'router':
return getattr(gateway, 'address', False), getattr(gateway,
'ipv6_address', False)
elif element.typeof == 'network':
return getattr(element, 'ipv4_network', False), getattr(element,
'ipv6_network', False)
except AttributeError:
pass
return True, True
|
virtuinbridge
|
virtuinbridge//virtuinglobalstubs.pyfile:/virtuinglobalstubs.py:function:GetLatestMeasurementText/GetLatestMeasurementText
|
def GetLatestMeasurementText(*args, **kwargs):
""" Stub Anduin function."""
print('GetLatestMeasurementText', args, kwargs)
|
fake-blender-api-2.79-0.3.1
|
fake-blender-api-2.79-0.3.1//bpy/ops/screen.pyfile:/bpy/ops/screen.py:function:area_swap/area_swap
|
def area_swap():
"""Swap selected areas screen positions
"""
pass
|
pydens
|
pydens//batchflow/batchflow/models/tf/base.pyclass:TFModel/fill_params
|
@classmethod
def fill_params(cls, _name, **kwargs):
""" Fill block params from default config and kwargs """
config = cls.default_config()
_config = config.get(_name)
config = {**config['common'], **_config, **kwargs}
return config
|
cinder-15.1.0
|
cinder-15.1.0//cinder/volume/drivers/netapp/utils.pyfile:/cinder/volume/drivers/netapp/utils.py:function:get_legacy_qos_policy/get_legacy_qos_policy
|
def get_legacy_qos_policy(extra_specs):
"""Return legacy qos policy information if present in extra specs."""
external_policy_name = extra_specs.get('netapp:qos_policy_group')
if external_policy_name is None:
return None
return dict(policy_name=external_policy_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.