repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
tacker-2.0.0
|
tacker-2.0.0//tacker/wsgi.pyclass:Application/factory
|
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import tacker.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
|
ODtools-1.2.5
|
ODtools-1.2.5//ODtools/fdfs_client/utils.pyfile:/ODtools/fdfs_client/utils.py:function:split_remote_fileid/split_remote_fileid
|
def split_remote_fileid(remote_file_id):
"""
Splite remote_file_id to (group_name, remote_file_name)
arguments:
@remote_file_id: string
@return tuple, (group_name, remote_file_name)
"""
index = remote_file_id.find('/')
if -1 == index:
return None
return remote_file_id[0:index], remote_file_id[index + 1:]
|
pygame-1.9.6
|
pygame-1.9.6//src_py/sysfont.pyfile:/src_py/sysfont.py:function:_addfont/_addfont
|
def _addfont(name, bold, italic, font, fontdict):
"""insert a font and style into the font dictionary"""
if name not in fontdict:
fontdict[name] = {}
fontdict[name][bold, italic] = font
|
neuprint
|
neuprint//utils.pyfile:/utils.py:function:skeleton_df_to_swc/skeleton_df_to_swc
|
def skeleton_df_to_swc(df, export_path=None):
"""
Convert a skeleton DataFrame into a the text of an SWC file.
Args:
df:
DataFrame, as returned by :py:meth:`.Client.fetch_skeleton()`
export_path:
Optional. Write the SWC file to disk a the given location.
Returns:
string
"""
df['node_type'] = 0
df = df[['rowId', 'node_type', 'x', 'y', 'z', 'radius', 'link']]
swc = '# '
swc += df.to_csv(sep=' ', header=True, index=False)
if export_path:
with open(export_path, 'w') as f:
f.write(swc)
return swc
|
dorthy-0.7.7
|
dorthy-0.7.7//dorthy/enum.pyclass:DeclarativeEnum/convert
|
@classmethod
def convert(cls, value):
"""
Converts the given value to the enum value
Returns:
the enum for the given value
Raises:
ValueError: the enum value does not exist in the enum
"""
value = str(value)
try:
return cls._registered[value]
except KeyError:
raise ValueError('Invalid value for {}: {}'.format(cls.__name__, value)
)
|
telegraphy-0.1.2.7
|
telegraphy-0.1.2.7//telegraphy/utils.pyfile:/telegraphy/utils.py:function:build_url_from_settings/build_url_from_settings
|
def build_url_from_settings(settings):
"""Constructs web socket urls from settings"""
is_secure = settings.TELEGRAPHY_IS_SECURE
proto = 'wss' if is_secure else 'ws'
url_parts = [proto, '://', settings.TELEGRAPHY_WS_HOST or 'localhost',
':', str(settings.TELEGRAPHY_WS_PORT), '/', settings.
TELEGRAPHY_WS_URI or '']
return ''.join(url_parts)
|
django-dcore-0.17
|
django-dcore-0.17//dcore/enums.pyclass:ChoiceEnum/has_key
|
@classmethod
def has_key(cls, key):
"""Check if the key is defined in the enum."""
return key in cls.__members__
|
srl
|
srl//parsers/parse.pyfile:/parsers/parse.py:function:p_quantifier_once_or_more/p_quantifier_once_or_more
|
def p_quantifier_once_or_more(p):
"""quantifier : K_ONCE K_OR K_MORE"""
p[0] = [('once_or_more', ())]
|
Fortpy-1.7.7
|
Fortpy-1.7.7//fortpy/interop/ftypes.pyfile:/fortpy/interop/ftypes.py:function:_py_pytype/_py_pytype
|
def _py_pytype(parameter):
"""Returns the name of the *python* type for the fortran parameter.
"""
pytype = parameter.pytype
if pytype is None:
raise ValueError("Can't pre-determine type for parameter: {}".
format(parameter.definition()))
return pytype
|
otree-core-1.4.49
|
otree-core-1.4.49//otree/views/abstract.pyclass:FormPageOrInGameWaitPage/get_url
|
@classmethod
def get_url(cls, participant_code, name_in_url, page_index):
"""need this because reverse() is too slow in create_session"""
return '/p/{pcode}/{name_in_url}/{ClassName}/{page_index}/'.format(pcode
=participant_code, name_in_url=name_in_url, ClassName=cls.__name__,
page_index=page_index)
|
mlflow-1.8.0
|
mlflow-1.8.0//mlflow/server/prometheus_exporter.pyfile:/mlflow/server/prometheus_exporter.py:function:change_path_for_metric/change_path_for_metric
|
def change_path_for_metric(path):
"""
Replace the '/' in the metric path by '_' so grafana can correctly use it.
:param path: path of the metric (example: runs/search)
:return: path with '_' instead of '/'
"""
if 'mlflow/' in path:
path = path.split('mlflow/')[-1]
return path.replace('/', '_')
|
watson-developer-cloud-2.10.1
|
watson-developer-cloud-2.10.1//watson_developer_cloud/visual_recognition_v3.pyclass:ErrorInfo/_from_dict
|
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ErrorInfo object from a json dictionary."""
args = {}
if 'code' in _dict:
args['code'] = _dict.get('code')
else:
raise ValueError(
"Required property 'code' not present in ErrorInfo JSON")
if 'description' in _dict:
args['description'] = _dict.get('description')
else:
raise ValueError(
"Required property 'description' not present in ErrorInfo JSON")
if 'error_id' in _dict:
args['error_id'] = _dict.get('error_id')
else:
raise ValueError(
"Required property 'error_id' not present in ErrorInfo JSON")
return cls(**args)
|
openstack-heat-13.0.1
|
openstack-heat-13.0.1//heat/engine/resources/openstack/neutron/neutron.pyclass:NeutronResource/validate_properties
|
@staticmethod
def validate_properties(properties):
"""Validate properties for the resource.
Validates to ensure nothing in value_specs overwrites any key that
exists in the schema.
Also ensures that shared and tenant_id is not specified
in value_specs.
"""
if 'value_specs' in properties:
banned_keys = set(['shared', 'tenant_id']).union(set(properties))
found = banned_keys.intersection(set(properties['value_specs']))
if found:
return '%s not allowed in value_specs' % ', '.join(found)
|
pyspiflash-0.6.2
|
pyspiflash-0.6.2//spiflash/serialflash.pyclass:SerialFlash/has_feature
|
@classmethod
def has_feature(cls, feature: int) ->bool:
"""Test whether the flash device supports a feature.
:param feature: the feature to test
:return: True if the feature is supported, False otherwise
"""
raise NotImplementedError()
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/elasticsearchservice.pyfile:/pyboto3/elasticsearchservice.py:function:generate_presigned_url/generate_presigned_url
|
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None,
HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
|
zxcvbn
|
zxcvbn//scoring.pyfile:/scoring.py:function:binom/binom
|
def binom(n, k):
"""
Returns binomial coefficient (n choose k).
"""
if k > n:
return 0
if k == 0:
return 1
result = 1
for denom in range(1, k + 1):
result *= n
result /= denom
n -= 1
return result
|
nansat-1.2.5
|
nansat-1.2.5//nansat/nansat.pyclass:Nansat/_get_crop_offset_size
|
@staticmethod
def _get_crop_offset_size(axis, points, factor):
"""Get offset and size of cropped image"""
offset = round(points.min(axis=1)[axis] / factor)
size = round((points.max(axis=1)[axis] - offset) / factor)
return offset, size
|
pymc3
|
pymc3//variational/approximations.pyfile:/variational/approximations.py:function:sample_approx/sample_approx
|
def sample_approx(approx, draws=100, include_transformed=True):
"""Draw samples from variational posterior.
Parameters
----------
approx : :class:`Approximation`
Approximation to sample from
draws : `int`
Number of random samples.
include_transformed : `bool`
If True, transformed variables are also sampled. Default is True.
Returns
-------
trace : class:`pymc3.backends.base.MultiTrace`
Samples drawn from variational posterior.
"""
return approx.sample(draws=draws, include_transformed=include_transformed)
|
django-crucrudile-0.9.5
|
django-crucrudile-0.9.5//django_crucrudile/urlutils.pyclass:OptionalPartList/apply_required_default
|
@staticmethod
def apply_required_default(items, default):
"""Apply default value to first element of item if it's None.
:argument items: List of tuples
:type items: iterable
:argument default: Value to use if none provided
:type default: boolean
:returns: List of tuples, with required default value applied
:rtype: iterable of tuple
>>> list(
... OptionalPartList.apply_required_default(
... [
... ('<provided>', '<1>'),
... (None, '<2>')
... ],
... default='<default>'
... )
... )
[('<provided>', '<1>'), ('<default>', '<2>')]
"""
for required, args in items:
if required is None:
required = default
yield required, args
|
meshio
|
meshio//flac3d/_flac3d.pyfile:/flac3d/_flac3d.py:function:_write_points/_write_points
|
def _write_points(f, points, float_fmt):
"""
Write points coordinates.
"""
for i, point in enumerate(points):
fmt = 'G\t{:8}\t' + '\t'.join(3 * ['{:' + float_fmt + '}']) + '\n'
f.write(fmt.format(i + 1, *point))
|
tripkit
|
tripkit//process/complete_days/triplab/counter.pyfile:/process/complete_days/triplab/counter.py:function:find_participation_daterange/find_participation_daterange
|
def find_participation_daterange(trips, min_dt):
"""
Determine the time bounds for selecting user trips to remove any trips
with inproper timestamps from a user's incorrect system clock.
"""
first_date, last_date = None, None
for trip in trips:
if trip.start_local > min_dt:
first_date = trip.start_local.date()
break
last_date = trips[-1].end_local.date()
return first_date, last_date
|
watson-developer-cloud-2.10.1
|
watson-developer-cloud-2.10.1//watson_developer_cloud/assistant_v2.pyclass:DialogNodeAction/_from_dict
|
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DialogNodeAction object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError(
"Required property 'name' not present in DialogNodeAction JSON")
if 'type' in _dict or 'action_type' in _dict:
args['action_type'] = _dict.get('type') or _dict.get('action_type')
if 'parameters' in _dict:
args['parameters'] = _dict.get('parameters')
if 'result_variable' in _dict:
args['result_variable'] = _dict.get('result_variable')
else:
raise ValueError(
"Required property 'result_variable' not present in DialogNodeAction JSON"
)
if 'credentials' in _dict:
args['credentials'] = _dict.get('credentials')
return cls(**args)
|
anymail
|
anymail//backends/mailgun.pyfile:/backends/mailgun.py:function:isascii/isascii
|
def isascii(s):
"""Returns True if str s is entirely ASCII characters.
(Compare to Python 3.7 `str.isascii()`.)
"""
try:
s.encode('ascii')
except UnicodeEncodeError:
return False
return True
|
co2mpas
|
co2mpas//model/physical/wheels.pyfile:/model/physical/wheels.py:function:calculate_wheel_power/calculate_wheel_power
|
def calculate_wheel_power(velocities, accelerations, road_loads, vehicle_mass):
"""
Calculates the wheel power [kW].
:param velocities:
Velocity [km/h].
:type velocities: numpy.array | float
:param accelerations:
Acceleration [m/s2].
:type accelerations: numpy.array | float
:param road_loads:
Cycle road loads [N, N/(km/h), N/(km/h)^2].
:type road_loads: list, tuple
:param vehicle_mass:
Vehicle mass [kg].
:type vehicle_mass: float
:return:
Power at wheels [kW].
:rtype: numpy.array | float
"""
f0, f1, f2 = road_loads
quadratic_term = f0 + (f1 + f2 * velocities) * velocities
vel = velocities / 3600
return (quadratic_term + 1.03 * vehicle_mass * accelerations) * vel
|
owmeta
|
owmeta//data.pyclass:Data/load
|
@classmethod
def load(cls, file_name):
""" Load a file into a new Data instance storing configuration in a JSON format """
return cls.open(file_name)
|
werkzeug
|
werkzeug//contrib/securecookie.pyclass:SecureCookie/load_cookie
|
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
|
GISAXS_XPCS-0.2.4
|
GISAXS_XPCS-0.2.4//gisaxs_xpcs/common_functions.pyfile:/gisaxs_xpcs/common_functions.py:function:get_nm/get_nm
|
def get_nm(delta):
"""
Returns size in [nm] corresponding for the provided delta in degrees
for 1.5307 [A] wavelength (common approximate formula is lambda [nm] / delta [rad] for small angles).
"""
return 8.77 / delta
|
kur
|
kur//supplier/numpy_dict.pyclass:NumpyDictSupplier/get_name
|
@classmethod
def get_name(cls):
""" Returns the name of the supplier.
"""
return 'numpy_dict'
|
mesma-1.0.7
|
mesma-1.0.7//mesma/external/qps/externals/pyqtgraph/exceptionHandling.pyfile:/mesma/external/qps/externals/pyqtgraph/exceptionHandling.py:function:setTracebackClearing/setTracebackClearing
|
def setTracebackClearing(clear=True):
"""
Enable or disable traceback clearing.
By default, clearing is disabled and Python will indefinitely store unhandled exception stack traces.
This function is provided since Python's default behavior can cause unexpected retention of
large memory-consuming objects.
"""
global clear_tracebacks
clear_tracebacks = clear
|
ged2doc-0.1.16
|
ged2doc-0.1.16//ged2doc/utils.pyfile:/ged2doc/utils.py:function:split_refs/split_refs
|
def split_refs(text):
"""Split text with embedded references into a sequence of text
and references.
Reference is returned as tuple (id, name).
:returns: iterator over pieces of text and references.
"""
while True:
pos = text.find('\x01')
if pos < 0:
if text:
yield text
break
else:
if pos > 0:
yield text[:pos]
text = text[pos + 1:]
pos = text.find('\x03')
ref_text = text[:pos]
text = text[pos + 1:]
ref, _, name = ref_text.partition('\x02')
yield ref, name
|
pandoc-1.0.2
|
pandoc-1.0.2//.lib/setuptools/msvc.pyfile:/.lib/setuptools/msvc.py:function:_augment_exception/_augment_exception
|
def _augment_exception(exc, version, arch=''):
"""
Add details to the exception message to help guide the user
as to what action will resolve it.
"""
message = exc.args[0]
if 'vcvarsall' in message.lower() or 'visual c' in message.lower():
tmpl = 'Microsoft Visual C++ {version:0.1f} is required.'
message = tmpl.format(**locals())
msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
if version == 9.0:
if arch.lower().find('ia64') > -1:
message += ' Get it with "Microsoft Windows SDK 7.0": '
message += msdownload % 3138
else:
message += ' Get it from http://aka.ms/vcpython27'
elif version == 10.0:
message += ' Get it with "Microsoft Windows SDK 7.1": '
message += msdownload % 8279
elif version >= 14.0:
message += (
' Get it with "Microsoft Visual C++ Build Tools": http://landinghub.visualstudio.com/visual-cpp-build-tools'
)
exc.args = message,
|
hyperspyUI-1.1.1
|
hyperspyUI-1.1.1//hyperspyui/overrides.pyfile:/hyperspyui/overrides.py:function:_on_figure_window_close/_on_figure_window_close
|
def _on_figure_window_close(figure, function):
"""Connects a close figure signal to a given function.
Parameters
----------
figure : mpl figure instance
function : function
"""
window = figure.canvas.manager.window
if not hasattr(figure, '_on_window_close'):
figure._on_window_close = list()
if function not in figure._on_window_close:
figure._on_window_close.append(function)
window.closing.connect(function)
|
rsonlite-0.1.0
|
rsonlite-0.1.0//rsonlite.pyfile:/rsonlite.py:function:multiline/multiline
|
def multiline(lineinfo, dedent):
""" Returns one string for each line,
properly dedented.
"""
linenum = lineinfo[0].line
for tok in lineinfo:
while linenum < tok.line:
yield ''
linenum += 1
yield (tok.col - dedent) * ' ' + tok.rstrip()
linenum += 1
|
summarycode
|
summarycode//copyright_summary.pyfile:/copyright_summary.py:function:strip_suffixes/strip_suffixes
|
def strip_suffixes(s, suffixes=suffixes):
"""
Return the `s` string with any of the string in the `suffixes` set
striped from the right. Normalize and strip spaces.
For example:
>>> s = 'RedHat Inc corp'
>>> strip_suffixes(s, set(['corp'])) == 'RedHat Inc'
True
"""
s = s.split()
while s and s[-1].lower().strip().strip('.,') in suffixes:
s = s[:-1]
return u' '.join(s)
|
edict-0.51
|
edict-0.51//edict/edict.pyfile:/edict/edict.py:function:_setitem_via_pathlist/_setitem_via_pathlist
|
def _setitem_via_pathlist(external_dict, path_list, value, **kwargs):
"""
y = {'c': {'b': {}}}
_setitem_via_pathlist(y,['c','b'],200)
"""
if 's2n' in kwargs:
s2n = kwargs['s2n']
else:
s2n = 0
if 'n2s' in kwargs:
n2s = kwargs['n2s']
else:
n2s = 0
this = external_dict
for i in range(0, path_list.__len__() - 1):
key = path_list[i]
if n2s == 1:
key = str(key)
if s2n == 1:
try:
int(key)
except:
pass
else:
key = int(key)
this = this.__getitem__(key)
this.__setitem__(path_list[-1], value)
return external_dict
|
good-library-0.3-beta.0
|
good-library-0.3-beta.0//good/annotation.pyclass:AnnotationType/get
|
@classmethod
def get(cls, obj):
"""
Returns the annotation in the given object (or False)
:param obj: the object to check
:return: the annotation in the given object (or False)
"""
return getattr(obj, cls._AT_get_full_name(), False)
|
actions
|
actions//procs/restaurant_form.pyclass:RestaurantForm/cuisine_db
|
@staticmethod
def cuisine_db():
"""Database of supported cuisines"""
return ['caribbean', 'chinese', 'french', 'greek', 'indian', 'italian',
'mexican']
|
tarwalker-1.1
|
tarwalker-1.1//tarwalker.pyclass:TarWalker/_file_type
|
@classmethod
def _file_type(cls, path):
"""Returns the FileType enum value based only on the file suffix."""
for ctype, ftype, suff in cls.SUFFIXES:
if path.lower().endswith(suff):
base = path[:-len(suff)]
return base, ctype, ftype
return path, cls.Types.NONE, cls.Types.NORMAL
|
ethereumd
|
ethereumd//utils.pyfile:/utils.py:function:hex_to_dec/hex_to_dec
|
def hex_to_dec(x: str) ->int:
"""
Convert hex to decimal
"""
return int(x, 16)
|
models
|
models//slim/datasets/download_and_convert_mnist.pyfile:/slim/datasets/download_and_convert_mnist.py:function:_get_output_filename/_get_output_filename
|
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The directory where the temporary files are stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/mnist_%s.tfrecord' % (dataset_dir, split_name)
|
easytext-0.1
|
easytext-0.1//easytext/algorithms.pyfile:/easytext/algorithms.py:function:calc_cutoffind/calc_cutoffind
|
def calc_cutoffind(freqs, min_tf):
"""
Using an ordered frequency list (biggest->smallest), identifies index of
first item to be cut off.
freqs: ordered frequency list (biggest->smallest)
min_tf: smallest frequency to accept.
"""
if min_tf > freqs[0]:
raise Exception('Cutoff {} is larger than largest frequency {}.'.
format(min_tf, freqs[0]))
if min_tf <= freqs[-1]:
return len(freqs)
i = 0
while freqs[i] >= min_tf:
i += 1
return i
|
pylytics-1.2.2
|
pylytics-1.2.2//pylytics/conf/project_template/dimension/store/transform.pyfile:/pylytics/conf/project_template/dimension/store/transform.py:function:convert_str_to_int/convert_str_to_int
|
def convert_str_to_int(data):
""" An example expansion.
"""
data['employees'] = int(data['employees'])
return data
|
shutilwhich-cwdpatch-0.1.0
|
shutilwhich-cwdpatch-0.1.0//versioneer.pyfile:/versioneer.py:function:scan_setup_py/scan_setup_py
|
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open('setup.py', 'r') as f:
for line in f.readlines():
if 'import versioneer' in line:
found.add('import')
if 'versioneer.get_cmdclass()' in line:
found.add('cmdclass')
if 'versioneer.get_version()' in line:
found.add('get_version')
if 'versioneer.VCS' in line:
setters = True
if 'versioneer.versionfile_source' in line:
setters = True
if len(found) != 3:
print('')
print('Your setup.py appears to be missing some important items')
print('(but I might be wrong). Please make sure it has something')
print('roughly like the following:')
print('')
print(' import versioneer')
print(' setup( version=versioneer.get_version(),')
print(' cmdclass=versioneer.get_cmdclass(), ...)')
print('')
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print('now lives in setup.cfg, and should be removed from setup.py')
print('')
errors += 1
return errors
|
PyEIS-1.0.10
|
PyEIS-1.0.10//PyEIS/PyEIS.pyfile:/PyEIS/PyEIS.py:function:cir_RsC/cir_RsC
|
def cir_RsC(w, Rs, C):
"""
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
"""
return Rs + 1 / (C * (w * 1.0j))
|
reahl-webdev-4.0.5
|
reahl-webdev-4.0.5//reahl/webdev/tools.pyclass:XPath/link_starting_with_text
|
@classmethod
def link_starting_with_text(cls, text):
"""Returns an XPath to find an HTML <a> containing text that starts with the contents of `text`."""
return cls('//a[starts-with(node(), "%s")]' % text)
|
handygeometry-0.12
|
handygeometry-0.12//handygeometry/handygeometry.pyfile:/handygeometry/handygeometry.py:function:perimeter_quadrilateral/perimeter_quadrilateral
|
def perimeter_quadrilateral(a, b, c, d):
"""
P = a + b + c + d
"""
return a + b + c + d
|
ask-sdk-model-1.23.0
|
ask-sdk-model-1.23.0//ask_sdk_model/interfaces/alexa/presentation/apl/listoperations/operation.pyclass:Operation/get_real_child_model
|
@classmethod
def get_real_child_model(cls, data):
"""Returns the real base class specified by the discriminator"""
discriminator_value = data[cls.json_discriminator_key]
return cls.discriminator_value_class_map.get(discriminator_value)
|
skelebot-1.18.4
|
skelebot-1.18.4//skelebot/objects/skeleYaml.pyclass:SkeleYaml/loadList
|
@classmethod
def loadList(cls, config):
"""Iterates over a list of Dicts that represent SkeleYamls and loads them into a list"""
objs = []
if isinstance(config, list):
for element in config:
objs.append(cls.load(element))
return objs
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/lexmodelbuildingservice.pyfile:/pyboto3/lexmodelbuildingservice.py:function:delete_intent/delete_intent
|
def delete_intent(name=None):
"""
Deletes all versions of the intent, including the $LATEST version. To delete a specific version of the intent, use the operation.
You can delete a version of an intent only if it is not referenced. To delete an intent that is referred to in one or more bots (see how-it-works ), you must remove those references first.
This operation requires permission for the lex:DeleteIntent action.
See also: AWS API Documentation
:example: response = client.delete_intent(
name='string'
)
:type name: string
:param name: [REQUIRED]
The name of the intent. The name is case sensitive.
"""
pass
|
dask
|
dask//utils.pyfile:/utils.py:function:ensure_unicode/ensure_unicode
|
def ensure_unicode(s):
""" Turn string or bytes to bytes
>>> ensure_unicode(u'123')
'123'
>>> ensure_unicode('123')
'123'
>>> ensure_unicode(b'123')
'123'
"""
if isinstance(s, str):
return s
if hasattr(s, 'decode'):
return s.decode()
msg = 'Object %s is neither a bytes object nor has an encode method'
raise TypeError(msg % s)
|
fastats
|
fastats//scaling/scaling.pyfile:/scaling/scaling.py:function:scale/scale
|
def scale(A):
"""
A no-op data scaling transformation
"""
return A
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/kinesisanalytics.pyfile:/pyboto3/kinesisanalytics.py:function:add_application_input/add_application_input
|
def add_application_input(ApplicationName=None, CurrentApplicationVersionId
=None, Input=None):
"""
Adds a streaming source to your Amazon Kinesis application. For conceptual information, see Configuring Application Input .
You can add a streaming source either when you create an application or you can use this operation to add a streaming source after you create an application. For more information, see CreateApplication .
Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the DescribeApplication operation to find the current application version.
This operation requires permissions to perform the kinesisanalytics:AddApplicationInput action.
See also: AWS API Documentation
:example: response = client.add_application_input(
ApplicationName='string',
CurrentApplicationVersionId=123,
Input={
'NamePrefix': 'string',
'KinesisStreamsInput': {
'ResourceARN': 'string',
'RoleARN': 'string'
},
'KinesisFirehoseInput': {
'ResourceARN': 'string',
'RoleARN': 'string'
},
'InputParallelism': {
'Count': 123
},
'InputSchema': {
'RecordFormat': {
'RecordFormatType': 'JSON'|'CSV',
'MappingParameters': {
'JSONMappingParameters': {
'RecordRowPath': 'string'
},
'CSVMappingParameters': {
'RecordRowDelimiter': 'string',
'RecordColumnDelimiter': 'string'
}
}
},
'RecordEncoding': 'string',
'RecordColumns': [
{
'Name': 'string',
'Mapping': 'string',
'SqlType': 'string'
},
]
}
}
)
:type ApplicationName: string
:param ApplicationName: [REQUIRED]
Name of your existing Amazon Kinesis Analytics application to which you want to add the streaming source.
:type CurrentApplicationVersionId: integer
:param CurrentApplicationVersionId: [REQUIRED]
Current version of your Amazon Kinesis Analytics application. You can use the DescribeApplication operation to find the current application version.
:type Input: dict
:param Input: [REQUIRED]
When you configure the application input, you specify the streaming source, the in-application stream name that is created, and the mapping between the two. For more information, see Configuring Application Input .
NamePrefix (string) -- [REQUIRED]Name prefix to use when creating in-application stream. Suppose you specify a prefix 'MyInApplicationStream'. Kinesis Analytics will then create one or more (as per the InputParallelism count you specified) in-application streams with names 'MyInApplicationStream_001', 'MyInApplicationStream_002' and so on.
KinesisStreamsInput (dict) --If the streaming source is an Amazon Kinesis stream, identifies the stream's Amazon Resource Name (ARN) and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.
ResourceARN (string) -- [REQUIRED]ARN of the input Amazon Kinesis stream to read.
RoleARN (string) -- [REQUIRED]ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to grant the necessary permissions to this role.
KinesisFirehoseInput (dict) --If the streaming source is an Amazon Kinesis Firehose delivery stream, identifies the Firehose delivery stream's ARN and an IAM role that enables Amazon Kinesis Analytics to access the stream on your behalf.
ResourceARN (string) -- [REQUIRED]ARN of the input Firehose delivery stream.
RoleARN (string) -- [REQUIRED]ARN of the IAM role that Amazon Kinesis Analytics can assume to access the stream on your behalf. You need to make sure the role has necessary permissions to access the stream.
InputParallelism (dict) --Describes the number of in-application streams to create.
Data from your source will be routed to these in-application input streams.
(see Configuring Application Input .
Count (integer) --Number of in-application streams to create. For more information, see Limits .
InputSchema (dict) -- [REQUIRED]Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created.
Also used to describe the format of the reference data source.
RecordFormat (dict) -- [REQUIRED]Specifies the format of the records on the streaming source.
RecordFormatType (string) -- [REQUIRED]The type of record format.
MappingParameters (dict) --When configuring application input at the time of creating or updating an application, provides additional mapping information specific to the record format (such as JSON, CSV, or record fields delimited by some delimiter) on the streaming source.
JSONMappingParameters (dict) --Provides additional mapping information when JSON is the record format on the streaming source.
RecordRowPath (string) -- [REQUIRED]Path to the top-level parent that contains the records.
For example, consider the following JSON record:
In the RecordRowPath , '$' refers to the root and path '$.vehicle.Model' refers to the specific 'Model' key in the JSON.
CSVMappingParameters (dict) --Provides additional mapping information when the record format uses delimiters (for example, CSV).
RecordRowDelimiter (string) -- [REQUIRED]Row delimiter. For example, in a CSV format, 'n' is the typical row delimiter.
RecordColumnDelimiter (string) -- [REQUIRED]Column delimiter. For example, in a CSV format, a comma (',') is the typical column delimiter.
RecordEncoding (string) --Specifies the encoding of the records in the streaming source. For example, UTF-8.
RecordColumns (list) -- [REQUIRED]A list of RecordColumn objects.
(dict) --Describes the mapping of each data element in the streaming source to the corresponding column in the in-application stream.
Also used to describe the format of the reference data source.
Name (string) -- [REQUIRED]Name of the column created in the in-application input stream or reference table.
Mapping (string) --Reference to the data element in the streaming input of the reference data source.
SqlType (string) -- [REQUIRED]Type of column created in the in-application input stream or reference table.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/iam.pyfile:/pyboto3/iam.py:function:get_user/get_user
|
def get_user(UserName=None):
"""
Retrieves information about the specified IAM user, including the user's creation date, path, unique ID, and ARN.
If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID used to sign the request to this API.
See also: AWS API Documentation
Examples
The following command gets information about the IAM user named Bob.
Expected Output:
:example: response = client.get_user(
UserName='string'
)
:type UserName: string
:param UserName: The name of the user to get information about.
This parameter is optional. If it is not included, it defaults to the user making the request. This parameter allows (per its regex pattern ) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-
:rtype: dict
:return: {
'User': {
'Path': 'string',
'UserName': 'string',
'UserId': 'string',
'Arn': 'string',
'CreateDate': datetime(2015, 1, 1),
'PasswordLastUsed': datetime(2015, 1, 1)
}
}
"""
pass
|
eth
|
eth//consensus/clique/_utils.pyfile:/consensus/clique/_utils.py:function:is_checkpoint/is_checkpoint
|
def is_checkpoint(block_number: int, epoch_length: int) ->bool:
"""
Return ``True`` if the given ``block_number`` is a checkpoint, otherwise ``False``.
"""
return block_number % epoch_length == 0
|
pydbc
|
pydbc//parser/scanner.pyclass:DbcParser/p_nodes_list
|
@staticmethod
def p_nodes_list(p):
"""nodes_list : IDENT
| IDENT nodes_list"""
try:
p[0] = [p[1]] + p[2]
except IndexError:
p[0] = [p[1]]
|
pymove-1.1.4
|
pymove-1.1.4//pymove/utils/math.pyfile:/pymove/utils/math.py:function:array_sum/array_sum
|
def array_sum(values_array):
"""
Computes the sum of the elements of the array.
Parameters
----------
values_array : list.
The numbers to be added.
Returns
-------
sum_ : float.
The sum of the elements of the array.
"""
sum_ = 0
for item in values_array:
sum_ += item
return sum_
|
Reversi-1.2.1
|
Reversi-1.2.1//src/Reversi/ui_console.pyfile:/src/Reversi/ui_console.py:function:chooseToken/chooseToken
|
def chooseToken(prompt):
"""The player makes a selection based on the prompt.
Returns a string. The first letter identifies the choice."""
return input(prompt)
|
taskflow
|
taskflow//utils/misc.pyfile:/utils/misc.py:function:countdown_iter/countdown_iter
|
def countdown_iter(start_at, decr=1):
"""Generator that decrements after each generation until <= zero.
NOTE(harlowja): we can likely remove this when we can use an
``itertools.count`` that takes a step (on py2.6 which we still support
that step parameter does **not** exist and therefore can't be used).
"""
if decr <= 0:
raise ValueError(
'Decrement value must be greater than zero and not %s' % decr)
while start_at > 0:
yield start_at
start_at -= decr
|
datapipeml-0.8
|
datapipeml-0.8//datapipeml/type_check.pyfile:/datapipeml/type_check.py:function:check_float/check_float
|
def check_float(series, has_null, n):
"""
Infer correct type of series considered as float
"""
if series.nunique() == 2:
unique = series.unique()
if (unique[0] == 0.0 or unique[0] == 1.0) and (unique[1] == 0.0 or
unique[1] == 1.0):
return 'float'
all_integer = series.sample(n).apply(lambda x: x.is_integer()).all()
if all_integer:
if has_null:
return 'nullable int'
else:
return 'int'
else:
return 'float'
|
biobb_structure_manager-1.0.0
|
biobb_structure_manager-1.0.0//biobb_structure_manager/model_utils.pyfile:/biobb_structure_manager/model_utils.py:function:swap_atoms/swap_atoms
|
def swap_atoms(at1, at2):
"""
Swaps names for two given atoms. Useful to fix labelling issues
"""
at1_id = at1.id
at1_full_id = at1.full_id
at1_element = at1.element
at1_name = at1.name
at1_fullname = at1.fullname
at1.id = at2.id
at1.full_id = at2.full_id
at1.element = at2.element
at1.name = at2.name
at1.fullname = at2.fullname
at2.id = at1_id
at2.full_id = at1_full_id
at2.element = at1_element
at2.name = at1_name
at2.fullname = at1_fullname
|
kbodatatools
|
kbodatatools//basic_analysis.pyfile:/basic_analysis.py:function:get_P_IP/get_P_IP
|
def get_P_IP(data):
"""
이닝당 투구수 계산하는 함수 기존의 당일 타율과 누적 타율이 있지만 월별,연도별 계산을 위해 함수 작성
Args:
data(pandas DF): 특정 선수의 투수 데이터
Returns:
output(numeric): 선수의 이닝당 투구수
"""
temp_era = sum(data.inning) + sum(data.restinning) / 3
if temp_era != 0:
return round(sum(data['투구수']) / temp_era, 3)
else:
return 99.99
|
audiotools
|
audiotools//m4a_atoms.pyclass:M4A_Leaf_Atom/parse
|
@classmethod
def parse(cls, name, data_size, reader, parsers):
"""given a 4 byte name, data_size int, BitstreamReader
and dict of {"atom":handler} sub-parsers,
returns an atom of this class"""
return cls(name, reader.read_bytes(data_size))
|
pythingy
|
pythingy//static.pyfile:/static.py:function:write_uint16/write_uint16
|
def write_uint16(data, value, index):
""" Write 16bit value into data string at index and return new string """
data = data.decode('utf-8')
return '{}{:02x}{:02x}{}'.format(data[:index * 4], value & 255, value >>
8, data[index * 4 + 4:])
|
binstarsolver-0.1.3
|
binstarsolver-0.1.3//binstarsolver/utils.pyfile:/binstarsolver/utils.py:function:calc_flux_intg_ratio_from_mags/calc_flux_intg_ratio_from_mags
|
def calc_flux_intg_ratio_from_mags(mag_1, mag_2):
"""Calculate the ratio of integrated fluxes from two magnitudes.
Parameters
----------
mag_1 : float
Magnitude of source 1. Unit is magnitudes.
mag_2 : float
Magnitude of source 2. Unit is magnitudes.
Returns
-------
flux_intg_ratio : float
Ratio of fluxes for sources 1 and 2 as flux_1 / flux_2. Unitless.
Notes
-----
flux_1 / flux_2 = 100**((mag_2 - mag_1)/5)
From equation 3.3 in section 3.2 of [1]_.
References
----------
.. [1] Carroll and Ostlie, 2007, An Introduction to Modern Astrophysics
"""
flux_intg_ratio = 100.0 ** ((mag_2 - mag_1) / 5.0)
return flux_intg_ratio
|
leap.keymanager-0.5.1
|
leap.keymanager-0.5.1//versioneer.pyfile:/versioneer.py:function:render_git_describe_long/render_git_describe_long
|
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
fake-blender-api-2.79-0.3.1
|
fake-blender-api-2.79-0.3.1//bpy/ops/marker.pyfile:/bpy/ops/marker.py:function:delete/delete
|
def delete():
"""Delete selected time marker(s)
"""
pass
|
xicam.gui-2.1.10
|
xicam.gui-2.1.10//versioneer.pyfile:/versioneer.py:function:scan_setup_py/scan_setup_py
|
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open('setup.py', 'r') as f:
for line in f.readlines():
if 'import versioneer' in line:
found.add('import')
if 'versioneer.get_cmdclass()' in line:
found.add('cmdclass')
if 'versioneer.get_version()' in line:
found.add('get_version')
if 'versioneer.VCS' in line:
setters = True
if 'versioneer.versionfile_source' in line:
setters = True
if len(found) != 3:
print('')
print('Your setup.py appears to be missing some important items')
print('(but I might be wrong). Please make sure it has something')
print('roughly like the following:')
print('')
print(' import versioneer')
print(' setup( version=versioneer.get_version(),')
print(' cmdclass=versioneer.get_cmdclass(), ...)')
print('')
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print('now lives in setup.cfg, and should be removed from setup.py')
print('')
errors += 1
return errors
|
zag
|
zag//engines/worker_based/types.pyclass:WorkerFinder/start
|
@staticmethod
def start():
"""Starts the finding process (subclass and implement as needed)."""
|
dropbox-10.1.2
|
dropbox-10.1.2//dropbox/team_log.pyclass:EventDetails/member_send_invite_policy_changed_details
|
@classmethod
def member_send_invite_policy_changed_details(cls, val):
"""
Create an instance of this class set to the
``member_send_invite_policy_changed_details`` tag with value ``val``.
:param MemberSendInvitePolicyChangedDetails val:
:rtype: EventDetails
"""
return cls('member_send_invite_policy_changed_details', val)
|
anymail
|
anymail//backends/sendinblue.pyclass:SendinBluePayload/email_object
|
@staticmethod
def email_object(email):
"""Converts EmailAddress to SendinBlue API array"""
email_object = dict()
email_object['email'] = email.addr_spec
if email.display_name:
email_object['name'] = email.display_name
return email_object
|
consensys_utils
|
consensys_utils//flask/wsgi.pyfile:/flask/wsgi.py:function:apply_middlewares/apply_middlewares
|
def apply_middlewares(app, middlewares=None):
"""Apply WSGI middlewares to a Flasks application
Example:
.. doctest::
>>> from flask import Flask
>>> import base64
>>> app = Flask(__name__)
>>> class AuthMiddleware:
... def __init__(self, wsgi):
... self.wsgi = wsgi
...
... @staticmethod
... def is_authenticated(header):
... if not header:
... return False
... _, encoded = header.split(None, 1)
... decoded = base64.b64decode(encoded).decode('UTF-8')
... username, password = decoded.split(':', 1)
... return username == password
...
... def __call__(self, environ, start_response):
... if self.is_authenticated(environ.get('HTTP_AUTHORIZATION')):
... return self.wsgi(environ, start_response)
... start_response('401 Authentication Required',
... [('Content-Type', 'text/html'),
... ('WWW-Authenticate', 'Basic realm="Login"')])
... return [b'Login']
>>> middlewares = [AuthMiddleware]
>>> apply_middlewares(app, middlewares)
:param app: Flask application
:type app: :class:`flask.Flask`
:param middlewares: WSGI middleware to apply on the application.
Expects a list of elements which are either
- A class taking a wsgi as an argument
- A function that takes a :class:`flask.Flask` as argument and even eventually apply a middleware on it
:type middlewares: list
"""
middlewares = middlewares or []
for middleware in middlewares:
if isinstance(middleware, type):
app.wsgi_app = middleware(app.wsgi_app)
else:
middleware(app)
|
odl-0.7.0
|
odl-0.7.0//odl/phantom/transmission.pyfile:/odl/phantom/transmission.py:function:_modified_shepp_logan_ellipsoids/_modified_shepp_logan_ellipsoids
|
def _modified_shepp_logan_ellipsoids(ellipsoids):
"""Modify ellipsoids to give the modified Shepp-Logan phantom.
Works for both 2d and 3d.
"""
intensities = [1.0, -0.8, -0.2, -0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
intensities[2] += 5e-17
intensities[3] += 5e-17
assert len(ellipsoids) == len(intensities)
for ellipsoid, intensity in zip(ellipsoids, intensities):
ellipsoid[0] = intensity
|
sqreen-1.19.0
|
sqreen-1.19.0//sqreen/sanitizer.pyfile:/sqreen/sanitizer.py:function:sanitize_exceptions/sanitize_exceptions
|
def sanitize_exceptions(exceptions, sensitive_values):
"""
Sanitize sensitive data from a list of exceptions. Return the sanitized
exceptions.
"""
for exc in exceptions:
infos = exc.get('infos')
if infos is not None:
args = infos.get('args')
if args is not None and sensitive_values:
infos.pop('args', None)
waf_infos = infos.get('waf')
if waf_infos is not None and sensitive_values:
waf_infos.pop('args', None)
yield exc
|
phply
|
phply//phpparse.pyfile:/phpparse.py:function:p_top_statement/p_top_statement
|
def p_top_statement(p):
"""top_statement : statement
| function_declaration_statement
| class_declaration_statement
| HALT_COMPILER LPAREN RPAREN SEMI"""
if len(p) == 2:
p[0] = p[1]
else:
pass
|
klass_registry
|
klass_registry//registry.pyclass:BaseRegistry/gen_lookup_key
|
@staticmethod
def gen_lookup_key(key):
"""
Used by :py:meth:`get` to generate a lookup key.
You may override this method in a subclass, for example if you
need to support legacy aliases, etc.
"""
return key
|
tmep-2.0.1
|
tmep-2.0.1//tmep/functions.pyclass:Functions/tmpl_upper
|
@staticmethod
def tmpl_upper(text):
"""Covert a string to upper case
* synopsis: ``%upper{text}``
* description: Convert “text” to UPPERCASE.
"""
return text.upper()
|
xrayutilities
|
xrayutilities//materials/database.pyfile:/materials/database.py:function:add_radius_from_WIKI/add_radius_from_WIKI
|
def add_radius_from_WIKI(db, dfile, verbose=False):
"""
Read radius from Wikipedia radius table and save it to the database.
"""
with open(dfile, 'r') as f:
for line in f.readlines():
s = line.split(',')
ename = s[1]
radius = float(s[3]) / 100.0
if verbose:
print('set element %s' % ename)
db.SetMaterial(ename)
db.SetRadius(radius)
|
indra-1.16.0
|
indra-1.16.0//indra/benchmarks/assembly_eval/batch4/assembly_eval.pyfile:/indra/benchmarks/assembly_eval/batch4/assembly_eval.py:function:is_protein_or_chemical/is_protein_or_chemical
|
def is_protein_or_chemical(agent):
"""Return True if the agent is a protein/protein family or chemical."""
if agent is None:
return True
dbs = set(['UP', 'HGNC', 'CHEBI', 'PFAM-DEF', 'IP', 'INDRA', 'PUBCHEM',
'CHEMBL'])
agent_refs = set(agent.db_refs.keys())
if agent_refs.intersection(dbs):
return True
return False
|
messidge-1.3.1
|
messidge-1.3.1//messidge/client/connection.pyfile:/messidge/client/connection.py:function:cmd/cmd
|
def cmd(required_params, *, needs_reply=False):
"""Create the internal structure describing a command
:param required_params: A list of parameters that must be included with the command.
:param needs_reply: The message needs to be replied to (and must have a uuid)."""
return required_params, needs_reply
|
python-breadcrumbs-0.1.2
|
python-breadcrumbs-0.1.2//breadcrumbs.pyfile:/breadcrumbs.py:function:call/call
|
def call(obj, *args, **kwargs):
""" Implement the call behavior.
"""
return obj(*args, **kwargs)
|
ppln-1.0.3
|
ppln-1.0.3//ppln/fileio/parse.pyfile:/ppln/fileio/parse.py:function:dict_from_file/dict_from_file
|
def dict_from_file(filename, key_type=str):
"""Load a text file and parse the content as a dict.
Each line of the text file will be two or more columns splited by
whitespaces or tabs. The first column will be parsed as dict keys, and
the following columns will be parsed as dict values.
Args:
filename(str): Filename.
key_type(type): Type of the dict's keys. str is user by default and
type conversion will be performed if specified.
Returns:
dict: The parsed contents.
"""
mapping = {}
with open(filename, 'r') as f:
for line in f:
items = line.rstrip('\n').split()
assert len(items) >= 2
key = key_type(items[0])
val = items[1:] if len(items) > 2 else items[1]
mapping[key] = val
return mapping
|
bitcoinutils
|
bitcoinutils//transactions.pyclass:TxInput/copy
|
@classmethod
def copy(cls, txin):
"""Deep copy of TxInput"""
return cls(txin.txid, txin.txout_index, txin.script_sig, txin.sequence)
|
bigfloat-0.4.0
|
bigfloat-0.4.0//bigfloat/formatting.pyfile:/bigfloat/formatting.py:function:format_align/format_align
|
def format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
padding = spec['fill'] * (spec['minimumwidth'] - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding) // 2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field: {!r}'.format(align))
return result
|
htmldiffer
|
htmldiffer//utils.pyfile:/utils.py:function:verified_blacklisted_tag/verified_blacklisted_tag
|
def verified_blacklisted_tag(x, tag):
"""
check for '<' + blacklisted_tag + ' ' or '>'
as in: <head> or <head ...> (should not match <header if checking for <head)
"""
initial = x[0:len(tag) + 1 + 1]
blacklisted_head = '<{0}'.format(tag)
return (initial == blacklisted_head + ' ' or initial ==
blacklisted_head + '>')
|
aima3-1.0.11
|
aima3-1.0.11//aima3/planning.pyclass:Problem/result
|
def result(problem, action):
"""The outcome of applying an action to the current problem"""
if action is not None:
problem.act(action)
return problem
else:
return problem
|
selinon
|
selinon//selective_run_function.pyclass:SelectiveRunFunction/construct_import_name
|
@staticmethod
def construct_import_name(name, import_path):
"""Construct import name that will be used in generated config.
:param name: name of the function that will be imported
:param import_path: import that should be used to import function
:return: string representation of function that will be used in generated config
"""
return '_{import_path}_{name}'.format(import_path=import_path.replace(
'.', '_'), name=name)
|
tbee-0.1
|
tbee-0.1//tbee/error_handling.pyfile:/tbee/error_handling.py:function:empty_onsite/empty_onsite
|
def empty_onsite(onsite):
"""
Check if *onsite* not empty.
:raises RuntimeError: Run method set_onsite first.
"""
if onsite.size == 0:
raise RuntimeError('\n\nRun method set_onsite first\n')
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/snowball.pyfile:/pyboto3/snowball.py:function:create_job/create_job
|
def create_job(JobType=None, Resources=None, Description=None, AddressId=
None, KmsKeyARN=None, RoleARN=None, SnowballCapacityPreference=None,
ShippingOption=None, Notification=None, ClusterId=None, SnowballType=
None, ForwardingAddressId=None):
"""
Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.
See also: AWS API Documentation
Examples
Creates a job to import or export data between Amazon S3 and your on-premises data center. Your AWS account must have the right trust policies and permissions in place to create a job for Snowball. If you're creating a job for a node in a cluster, you only need to provide the clusterId value; the other job attributes are inherited from the cluster.
Expected Output:
:example: response = client.create_job(
JobType='IMPORT'|'EXPORT'|'LOCAL_USE',
Resources={
'S3Resources': [
{
'BucketArn': 'string',
'KeyRange': {
'BeginMarker': 'string',
'EndMarker': 'string'
}
},
],
'LambdaResources': [
{
'LambdaArn': 'string',
'EventTriggers': [
{
'EventResourceARN': 'string'
},
]
},
]
},
Description='string',
AddressId='string',
KmsKeyARN='string',
RoleARN='string',
SnowballCapacityPreference='T50'|'T80'|'T100'|'NoPreference',
ShippingOption='SECOND_DAY'|'NEXT_DAY'|'EXPRESS'|'STANDARD',
Notification={
'SnsTopicARN': 'string',
'JobStatesToNotify': [
'New'|'PreparingAppliance'|'PreparingShipment'|'InTransitToCustomer'|'WithCustomer'|'InTransitToAWS'|'WithAWS'|'InProgress'|'Complete'|'Cancelled'|'Listing'|'Pending',
],
'NotifyAll': True|False
},
ClusterId='string',
SnowballType='STANDARD'|'EDGE',
ForwardingAddressId='string'
)
:type JobType: string
:param JobType: Defines the type of job that you're creating.
:type Resources: dict
:param Resources: Defines the Amazon S3 buckets associated with this job.
With IMPORT jobs, you specify the bucket or buckets that your transferred data will be imported into.
With EXPORT jobs, you specify the bucket or buckets that your transferred data will be exported from. Optionally, you can also specify a KeyRange value. If you choose to export a range, you define the length of the range by providing either an inclusive BeginMarker value, an inclusive EndMarker value, or both. Ranges are UTF-8 binary sorted.
S3Resources (list) --An array of S3Resource objects.
(dict) --Each S3Resource object represents an Amazon S3 bucket that your transferred data will be exported from or imported into. For export jobs, this object can have an optional KeyRange value. The length of the range is defined at job creation, and has either an inclusive BeginMarker , an inclusive EndMarker , or both. Ranges are UTF-8 binary sorted.
BucketArn (string) --The Amazon Resource Name (ARN) of an Amazon S3 bucket.
KeyRange (dict) --For export jobs, you can provide an optional KeyRange within a specific Amazon S3 bucket. The length of the range is defined at job creation, and has either an inclusive BeginMarker , an inclusive EndMarker , or both. Ranges are UTF-8 binary sorted.
BeginMarker (string) --The key that starts an optional key range for an export job. Ranges are inclusive and UTF-8 binary sorted.
EndMarker (string) --The key that ends an optional key range for an export job. Ranges are inclusive and UTF-8 binary sorted.
LambdaResources (list) --The Python-language Lambda functions for this job.
(dict) --Identifies
LambdaArn (string) --An Amazon Resource Name (ARN) that represents an AWS Lambda function to be triggered by PUT object actions on the associated local Amazon S3 resource.
EventTriggers (list) --The array of ARNs for S3Resource objects to trigger the LambdaResource objects associated with this job.
(dict) --The container for the EventTriggerDefinition$EventResourceARN .
EventResourceARN (string) --The Amazon Resource Name (ARN) for any local Amazon S3 resource that is an AWS Lambda function's event trigger associated with this job.
:type Description: string
:param Description: Defines an optional description of this specific job, for example Important Photos 2016-08-11 .
:type AddressId: string
:param AddressId: The ID for the address that you want the Snowball shipped to.
:type KmsKeyARN: string
:param KmsKeyARN: The KmsKeyARN that you want to associate with this job. KmsKeyARN s are created using the CreateKey AWS Key Management Service (KMS) API action.
:type RoleARN: string
:param RoleARN: The RoleARN that you want to associate with this job. RoleArn s are created using the CreateRole AWS Identity and Access Management (IAM) API action.
:type SnowballCapacityPreference: string
:param SnowballCapacityPreference: If your job is being created in one of the US regions, you have the option of specifying what size Snowball you'd like for this job. In all other regions, Snowballs come with 80 TB in storage capacity.
:type ShippingOption: string
:param ShippingOption: The shipping speed for this job. This speed doesn't dictate how soon you'll get the Snowball, rather it represents how quickly the Snowball moves to its destination while in transit. Regional shipping speeds are as follows:
In Australia, you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day.
In the European Union (EU), you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.
In India, Snowballs are delivered in one to seven days.
In the US, you have access to one-day shipping and two-day shipping.
:type Notification: dict
:param Notification: Defines the Amazon Simple Notification Service (Amazon SNS) notification settings for this job.
SnsTopicARN (string) --The new SNS TopicArn that you want to associate with this job. You can create Amazon Resource Names (ARNs) for topics by using the CreateTopic Amazon SNS API action.
You can subscribe email addresses to an Amazon SNS topic through the AWS Management Console, or by using the Subscribe AWS Simple Notification Service (SNS) API action.
JobStatesToNotify (list) --The list of job states that will trigger a notification for this job.
(string) --
NotifyAll (boolean) --Any change in job state will trigger a notification for this job.
:type ClusterId: string
:param ClusterId: The ID of a cluster. If you're creating a job for a node in a cluster, you need to provide only this clusterId value. The other job attributes are inherited from the cluster.
:type SnowballType: string
:param SnowballType: The type of AWS Snowball appliance to use for this job. Currently, the only supported appliance type for cluster jobs is EDGE .
:type ForwardingAddressId: string
:param ForwardingAddressId: The forwarding address ID for a job. This field is not supported in most regions.
:rtype: dict
:return: {
'JobId': 'string'
}
"""
pass
|
QuantStats-0.0.25
|
QuantStats-0.0.25//quantstats/utils.pyfile:/quantstats/utils.py:function:_score_str/_score_str
|
def _score_str(val):
""" Returns + sign for positive values (used in plots) """
return ('' if '-' in val else '+') + str(val)
|
nova_powervm-9.0.0
|
nova_powervm-9.0.0//nova_powervm/virt/powervm/vm.pyfile:/nova_powervm/virt/powervm/vm.py:function:_uuid_set_high_bit/_uuid_set_high_bit
|
def _uuid_set_high_bit(pvm_uuid):
"""Turns on the high bit of a uuid
PowerVM uuids always set the byte 0, bit 0 to 0.
So to convert it to an OpenStack uuid we may have to set the high bit.
:param uuid: A PowerVM compliant uuid
:returns: A standard format uuid string
"""
return '%x%s' % (int(pvm_uuid[0], 16) | 8, pvm_uuid[1:])
|
pypck-0.6.4
|
pypck-0.6.4//pypck/pck_commands.pyclass:PckGenerator/request_bin_sensors_status
|
@staticmethod
def request_bin_sensors_status():
"""Generate a binary-sensors status request.
:return: The PCK command (without address header) as text
:rtype: str
"""
return 'SMB'
|
streetscape-0.2.3
|
streetscape-0.2.3//streetscape/core.pyfile:/streetscape/core.py:function:_image_url/_image_url
|
def _image_url(gsv_point, heading, **kwargs):
""" construct a url for retrieving gsv metadata
at obs point location"""
npics = kwargs.get('npics', 6)
size = kwargs.get('size', 400)
pad = kwargs.get('pad', 0)
lat = gsv_point.geometry.y
lng = gsv_point.geometry.x
base = 'https://maps.googleapis.com/maps/api/streetview?pitch=-5'
size = 'size={}x{}'.format(size, size)
heading = 'heading={}'.format(heading)
fov = 'fov={}'.format(360.0 / npics + 2 * pad)
latlng = 'location={},{}'.format(lat, lng)
source = 'source=outdoor'
url = '{}&{}&{}&{}&{}&{}'.format(base, size, heading, fov, latlng, source)
return url
|
bs_ds
|
bs_ds//capstone.pyfile:/capstone.py:function:display_random_tweets/display_random_tweets
|
def display_random_tweets(df_tokenize, n=5, display_cols=['content',
'text_for_vectors', 'tokens'], group_labels=[], verbose=True):
"""Takes df_tokenize['text_for_vectors']"""
import numpy as np
import pandas as pd
from IPython.display import display
if len(group_labels) == 0:
group_labels = display_cols
random_tweets = {}
idx = np.random.choice(range(len(df_tokenize)), n)
for i in range(len(display_cols)):
group_name = str(group_labels[i])
random_tweets[group_name] = {}
df_col = df_tokenize[display_cols[i]]
tweet_group = {}
tweet_group['index'] = idx
chosen_tweets = df_col[idx]
tweet_group['text'] = chosen_tweets
if verbose > 0:
with pd.option_context('max_colwidth', 300):
df_display = pd.DataFrame.from_dict(tweet_group)
display(df_display.style.set_caption(f'Group: {group_name}'))
random_tweets[group_name] = tweet_group
if verbose == 0:
return random_tweets
else:
return
|
sphinx_cjkspace
|
sphinx_cjkspace//utils.pyfile:/utils.py:function:is_ascii/is_ascii
|
def is_ascii(c):
"""Check if an character is in ASCII.
>>> is_ascii('E')
True
>>> is_ascii('?')
True
>>> is_ascii('中')
False
>>> is_ascii('?')
False
"""
return ord(c) < 128
|
ironic
|
ironic//drivers/modules/redfish/management.pyclass:RedfishManagement/_get_sensors_drive
|
@classmethod
def _get_sensors_drive(cls, system):
"""Get storage drive sensors reading.
:param chassis: Redfish `system` object
:returns: returns a dict of sensor data.
"""
sensors = {}
for storage in system.simple_storage.get_members():
for drive in storage.devices:
sensor = cls._sensor2dict(drive, 'name', 'model', 'capacity_bytes')
sensor.update(cls._sensor2dict(drive.status, 'state', 'health'))
unique_name = '%s:%s@%s' % (drive.name, storage.identity,
system.identity)
sensors[unique_name] = sensor
return sensors
|
cassandra-driver-3.23.0
|
cassandra-driver-3.23.0//cassandra/cqlengine/connection.pyfile:/cassandra/cqlengine/connection.py:function:format_log_context/format_log_context
|
def format_log_context(msg, connection=None, keyspace=None):
"""Format log message to add keyspace and connection context"""
connection_info = connection or 'DEFAULT_CONNECTION'
if keyspace:
msg = '[Connection: {0}, Keyspace: {1}] {2}'.format(connection_info,
keyspace, msg)
else:
msg = '[Connection: {0}] {1}'.format(connection_info, msg)
return msg
|
dbschema-1.4.3
|
dbschema-1.4.3//src/schema_change.pyfile:/src/schema_change.py:function:delete_migration/delete_migration
|
def delete_migration(connection, basename):
""" Delete a migration in `migrations_applied` table """
sql = 'DELETE FROM migrations_applied WHERE name = %s'
with connection.cursor() as cursor:
cursor.execute(sql, (basename,))
connection.commit()
return True
|
zc.sourcefactory-1.1
|
zc.sourcefactory-1.1//src/zc/sourcefactory/interfaces.pyclass:IContextualTokenPolicy/getToken
|
def getToken(context, value):
"""Return a token for the value."""
|
merlinwf-1.4.1
|
merlinwf-1.4.1//merlin/study/celeryadapter.pyfile:/merlin/study/celeryadapter.py:function:launch_celery_workers/launch_celery_workers
|
def launch_celery_workers(spec, steps=None, worker_args='',
just_return_command=False):
"""
Launch celery workers for the specified MerlinStudy.
spec MerlinSpec object
steps The steps in the spec to tie the workers to
worker_args Optional celery arguments for the workers
just_return_command Don't execute, just return the command
"""
queues = spec.make_queue_string(steps)
worker_command = ' '.join(['celery worker -A merlin', worker_args, '-Q',
queues])
if just_return_command:
return worker_command
else:
pass
|
mucor-1.51
|
mucor-1.51//mucor.pyfile:/mucor.py:function:filterRow/filterRow
|
def filterRow(row, fieldId, filters, kind):
"""
returning True means this row will be filtered out [masked]
returning False means the row will not be filtered out [pass filter]
"""
if kind in ['vcf', 'vcf.gz']:
for rowFilter in str(row[fieldId['FILTER']]).split(';'):
if rowFilter not in filters:
return True
break
if str(kind) == 'out':
for rowFilter in str(row[fieldId['judgement']]).split(';'):
if rowFilter not in filters:
return True
break
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.