repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
red_connector_ssh
|
red_connector_ssh//mount_dir.pyfile:/mount_dir.py:function:split_to_length/split_to_length
|
def split_to_length(s, n):
"""
Splits the given string s into a list of string, where each string has a maximal length n.
:param s: The string to split
:param n: The maximum length of each string in the result
:return: A list of strings with maximum length n
"""
result = []
for i in range(0, len(s), n):
result.append(s[i:i + n])
return result
|
pyQRZ-0.1.0
|
pyQRZ-0.1.0//qrz_example.pyfile:/qrz_example.py:function:print_keys/print_keys
|
def print_keys(key_names, query_result):
"""
Prints results and does not throw exception on queries
like W1AW where fname key does not exist
"""
info = ''
for key_name in key_names:
if key_name in query_result:
info += query_result[key_name] + ' '
print(info)
|
ttach
|
ttach//functional.pyfile:/functional.py:function:crop_lt/crop_lt
|
def crop_lt(x, crop_h, crop_w):
"""crop left top corner"""
return x[:, :, 0:crop_h, 0:crop_w]
|
_pytest
|
_pytest//hookspec.pyfile:/hookspec.py:function:pytest_assertion_pass/pytest_assertion_pass
|
def pytest_assertion_pass(item, lineno, orig, expl):
"""
**(Experimental)**
.. versionadded:: 5.0
Hook called whenever an assertion *passes*.
Use this hook to do some processing after a passing assertion.
The original assertion information is available in the `orig` string
and the pytest introspected assertion information is available in the
`expl` string.
This hook must be explicitly enabled by the ``enable_assertion_pass_hook``
ini-file option:
.. code-block:: ini
[pytest]
enable_assertion_pass_hook=true
You need to **clean the .pyc** files in your project directory and interpreter libraries
when enabling this option, as assertions will require to be re-written.
:param _pytest.nodes.Item item: pytest item object of current test
:param int lineno: line number of the assert statement
:param string orig: string with original assertion
:param string expl: string with assert explanation
.. note::
This hook is **experimental**, so its parameters or even the hook itself might
be changed/removed without warning in any future pytest release.
If you find this hook useful, please share your feedback opening an issue.
"""
|
smalisca-0.2
|
smalisca-0.2//smalisca/analysis/analysis_shell.pyfile:/smalisca/analysis/analysis_shell.py:function:extract_range/extract_range
|
def extract_range(s):
""" Extract range from string"""
ranges = s.split(',')
if len(ranges) > 1:
return int(ranges[0]), int(ranges[1])
else:
return int(ranges[0]), None
|
symspellpy
|
symspellpy//helpers.pyfile:/helpers.py:function:prefix_suffix_prep/prefix_suffix_prep
|
def prefix_suffix_prep(string1, string2):
"""Calculates starting position and lengths of two strings such
that common prefix and suffix substrings are excluded.
Expects len(string1) <= len(string2)
Parameters
----------
string_1 : str
Base string.
string_2 : str
The string to compare.
Returns
-------
len1, len2, start : (int, int, int)
`len1` and len2` are lengths of the part excluding common
prefix and suffix, and `start` is the starting position.
"""
len1 = len(string1)
len2 = len(string2)
while len1 != 0 and string1[len1 - 1] == string2[len2 - 1]:
len1 -= 1
len2 -= 1
start = 0
while start != len1 and string1[start] == string2[start]:
start += 1
if start != 0:
len1 -= start
len2 -= start
return len1, len2, start
|
dicom2nifti-2.2.8
|
dicom2nifti-2.2.8//dicom2nifti/common.pyfile:/dicom2nifti/common.py:function:set_tr_te/set_tr_te
|
def set_tr_te(nifti_image, repetition_time, echo_time):
"""
Set the tr and te in the nifti headers
:param echo_time: echo time
:param repetition_time: repetition time
:param nifti_image: nifti image to set the info to
"""
nifti_image.header.structarr['pixdim'][4] = repetition_time / 1000.0
nifti_image.header.structarr['db_name'] = '?TR:%.3f TE:%d' % (
repetition_time, echo_time)
return nifti_image
|
isatools
|
isatools//utils.pyfile:/utils.py:function:format_report_csv/format_report_csv
|
def format_report_csv(report):
"""Format JSON validation report as CSV string
:param report: JSON report output from validator
:return: string representing csv formatted report
"""
output = ''
if report['validation_finished']:
output = 'Validation=success\n'
for warning in report['warnings']:
output += str('{},{},{}\n').format(warning['code'], warning[
'message'], warning['supplemental'])
for error in report['errors']:
output += str('{},{},{}\n').format(error['code'], error['message'],
error['supplemental'])
return output
|
xoutil-2.1.8
|
xoutil-2.1.8//xotl/tools/versions.pyfile:/xotl/tools/versions.py:function:_check/_check
|
def _check(info):
"""Validate a version info.
:param info: could be a string, an integer, float, or any integer
collection (only first three valid integers are used).
:returns: a valid tuple or an error if invalid.
"""
from collections import Iterable
from distutils.version import LooseVersion, StrictVersion
MAX_COUNT = 3
if isinstance(info, (int, float)):
aux = str(info)
elif isinstance(info, Iterable) and not isinstance(info, str):
aux = '.'.join(map(str, info))
else:
aux = info
if isinstance(aux, str):
try:
essay = StrictVersion(aux)
except (TypeError, ValueError):
essay = LooseVersion(aux)
res = essay.version[:MAX_COUNT]
if any(res):
return tuple(res)
else:
raise ValueError("invalid version value '{}'".format(info))
else:
msg = "Invalid type '{}' for version '{}'"
raise TypeError(msg.format(type(info).__name__, info))
|
autoarray-0.10.6
|
autoarray-0.10.6//autoarray/util/regularization_util.pyfile:/autoarray/util/regularization_util.py:function:adaptive_regularization_weights_from_pixel_signals/adaptive_regularization_weights_from_pixel_signals
|
def adaptive_regularization_weights_from_pixel_signals(inner_coefficient,
outer_coefficient, pixel_signals):
"""Compute the regularization weights, which are the effective regularization coefficient of every pixel. They are computed using the (hyper) pixel-signal of each pixel.
Two regularization coefficients are used, corresponding to the:
1) (pixel_signals) - pixels with a high pixel-signal (i.e. where the signal is located in the pixelization).
2) (1.0 - pixel_signals) - pixels with a low pixel-signal (i.e. where the signal is not located in the pixelization).
Parameters
----------
coefficients : (float, float)
The regularization coefficients which controls the degree of smoothing of the inversion reconstruction.
pixel_signals : ndarray
The estimated signal in every pixelization pixel, used to change the regularization weighting of high signal and low signal pixelizations.
"""
return (inner_coefficient * pixel_signals + outer_coefficient * (1.0 -
pixel_signals)) ** 2.0
|
pyvo-1.0
|
pyvo-1.0//pyvo/dal/tap.pyfile:/pyvo/dal/tap.py:function:escape/escape
|
def escape(term):
"""
escapes a term for use in ADQL
"""
return str(term).replace("'", "''")
|
findex_gui
|
findex_gui//bin/startup.pyfile:/bin/startup.py:function:check_version/check_version
|
def check_version():
"""Checks version of Findex."""
return
|
consul_kv-0.7.3
|
consul_kv-0.7.3//consul_kv/utils.pyfile:/consul_kv/utils.py:function:get_after_slash/get_after_slash
|
def get_after_slash(string):
"""
Get the part of a string after the first slash
:param str string: String to return the part after the first
slash of
:return str part_after_first_slash: Part of the string after
the first slash
"""
return '/'.join(string.split('/')[1:])
|
bpy
|
bpy//ops/armature.pyfile:/ops/armature.py:function:select_mirror/select_mirror
|
def select_mirror(only_active: bool=False, extend: bool=False):
"""Mirror the bone selection
:param only_active: Active Only, Only operate on the active bone
:type only_active: bool
:param extend: Extend, Extend the selection
:type extend: bool
"""
pass
|
cwmud-0.4.0
|
cwmud-0.4.0//cwmud/core/shells.pyclass:Shell/get_command
|
@classmethod
def get_command(cls, verb):
"""Get a command in this shell by its verb.
:param str verb: The verb of the command to get
:returns Command|None: The command with that verb or None
"""
return cls._verbs.get(verb)
|
wetb-0.0.21
|
wetb-0.0.21//wetb/prepost/dlcdefs.pyfile:/wetb/prepost/dlcdefs.py:function:casedict2xlsx/casedict2xlsx
|
def casedict2xlsx():
"""
Convert a full Cases.cases dict to Excel spreadsheets
"""
|
stig
|
stig//utils/expandtabs.pyfile:/utils/expandtabs.py:function:_explode/_explode
|
def _explode(lines, indent):
"""Split each line in `lines` at tabstops
Tabstops at the beginning of each line are expanded to 2 spaces.
Return list of rows (a row being a list of strings, i.e. cells)
"""
exploded = []
for line in lines:
indentions = 0
while line.startswith('\t'):
indentions += 1
line = line[1:]
splitline = line.split('\t')
for i in range(indentions):
splitline.insert(0, ' ' * indent)
exploded.append(splitline)
return exploded
|
mathematics_dataset
|
mathematics_dataset//modules/calculus.pyfile:/modules/calculus.py:function:test_extra/test_extra
|
def test_extra():
"""Returns dict of extrapolation testing modules."""
return {}
|
astor
|
astor//source_repr.pyfile:/source_repr.py:function:split_group/split_group
|
def split_group(source, pos, maxline):
""" Split a group into two subgroups. The
first will be appended to the current
line, the second will start the new line.
Note that the first group must always
contain at least one item.
The original group may be destroyed.
"""
first = []
source.reverse()
while source:
tok = source.pop()
first.append(tok)
pos += len(tok)
if source:
tok = source[-1]
allowed = maxline + 1 if tok.endswith(' ') else maxline - 4
if pos + len(tok) > allowed:
break
source.reverse()
return first, source
|
guillotina
|
guillotina//interfaces/common.pyclass:IMapping/setdefault
|
def setdefault(key, default=None):
"""D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D"""
|
binstarsolver-0.1.3
|
binstarsolver-0.1.3//binstarsolver/utils.pyfile:/binstarsolver/utils.py:function:calc_mass_ratio_from_velrs/calc_mass_ratio_from_velrs
|
def calc_mass_ratio_from_velrs(velr_1, velr_2):
"""Calculate ratio of stellar masses from observed radial velocities.
Assumes circular orbit.
Parameters
----------
velr_1 : float
Observed radial velocity of star 1. Unit is m/s.
velr_2 : float
Observed radial velocity of star 2. Unit is m/s.
Returns
-------
mass_ratio : float
Ratio of stellar masses for stars 1 and 2 as mass1 / mass2. Unitless.
Notes
-----
m1 / m2 = a2 / a1, where a is semi-major axis of low-eccentricty orbit.
v = 2*pi*a / P, where v is orbital velocity, P is orbital period.
= vr / sin(i), where vr is observed radial orbital velocity, i is orbital inclination.
=> m1 / m2 = v2r / v1r
From equation 7.5 in section 7.3 of [1]_.
References
----------
.. [1] Carroll and Ostlie, 2007, An Introduction to Modern Astrophysics
"""
mass_ratio = velr_2 / velr_1
return mass_ratio
|
elifetools
|
elifetools//xmlio.pyfile:/xmlio.py:function:get_first_element_index/get_first_element_index
|
def get_first_element_index(root, tag_name):
"""
In order to use Element.insert() in a convenient way,
this function will find the first child tag with tag_name
and return its index position
The index can then be used to insert an element before or after the
found tag using Element.insert()
"""
tag_index = 1
for tag in root:
if tag.tag == tag_name:
return tag_index
tag_index = tag_index + 1
return None
|
matematik
|
matematik//formula/arithmetic.pyfile:/formula/arithmetic.py:function:tup_divide/tup_divide
|
def tup_divide(tuple):
"""
definition: func(tuple(...)) {...}
objective : divides the elements of the tuple from left to right
"""
first_element = tuple[0]
for i in range(1, len(tuple), 1):
first_element /= tuple[i]
return first_element
|
cis_interface
|
cis_interface//communication/FileComm.pyclass:FileComm/is_installed
|
@classmethod
def is_installed(cls, language=None):
"""Determine if the necessary libraries are installed for this
communication class.
Args:
language (str, optional): Specific language that should be checked
for compatibility. Defaults to None and all languages supported
on the current platform will be checked.
Returns:
bool: Is the comm installed.
"""
return True
|
imgroi-0.0.3
|
imgroi-0.0.3//versioneer.pyfile:/versioneer.py:function:plus_or_dot/plus_or_dot
|
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if '+' in pieces.get('closest-tag', ''):
return '.'
return '+'
|
lollylib
|
lollylib//lolly_helpers.pyfile:/lolly_helpers.py:function:extract_first_value_in_quotes/extract_first_value_in_quotes
|
def extract_first_value_in_quotes(line, quote_mark):
"""
Extracts first value in quotes (single or double) from a string.
Line is left-stripped from whitespaces before extraction.
:param line: string
:param quote_mark: type of quotation mark: ' or "
:return: Dict: 'value': extracted value;
'remainder': the remainder after extraction
'error' empty string if success or 'syntax' otherwise;
"""
line = line.lstrip()
result = {'value': '', 'remainder': line, 'error': 'syntax'}
if len(line) < 2:
return result
if line[0] != quote_mark:
return result
next_qm_pos = line.find(quote_mark, 1)
if next_qm_pos == -1:
return result
result['value'] = line[1:next_qm_pos]
result['remainder'] = line[next_qm_pos + 1:]
result['error'] = ''
return result
|
dankbot
|
dankbot//memes.pyclass:ImgurMeme/set_credentials
|
@classmethod
def set_credentials(cls, client_id, client_secret):
"""
Class method for setting the Imgur API client ID and client secret
"""
cls.client_id = client_id
cls.client_secret = client_secret
|
tensorlayer-2.2.2
|
tensorlayer-2.2.2//tensorlayer/prepro.pyfile:/tensorlayer/prepro.py:function:obj_box_coords_rescale/obj_box_coords_rescale
|
def obj_box_coords_rescale(coords=None, shape=None):
"""Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
Parameters
------------
coords : list of list of 4 ints or None
For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...].
shape : list of 2 int or None
【height, width].
Returns
-------
list of list of 4 numbers
A list of new bounding boxes.
Examples
---------
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
>>> print(coords)
[[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
>>> print(coords)
[[0.3, 0.8, 0.5, 1.0]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
>>> print(coords)
[[0.15, 0.4, 0.25, 0.5]]
Returns
-------
list of 4 numbers
New coordinates.
"""
if coords is None:
coords = []
if shape is None:
shape = [100, 200]
imh, imw = shape[0], shape[1]
imh = imh * 1.0
imw = imw * 1.0
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError('coordinate should be 4 values : [x, y, w, h]'
)
x = coord[0] / imw
y = coord[1] / imh
w = coord[2] / imw
h = coord[3] / imh
coords_new.append([x, y, w, h])
return coords_new
|
kolibri-0.13.2
|
kolibri-0.13.2//kolibri/dist/more_itertools/more.pyfile:/kolibri/dist/more_itertools/more.py:function:one/one
|
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. If there is more than one, both items will be discarded.
See :func:`spy` or :func:`peekable` to check iterable contents less
destructively.
"""
it = iter(iterable)
try:
value = next(it)
except StopIteration:
raise (too_short or ValueError(
'too few items in iterable (expected 1)'))
try:
next(it)
except StopIteration:
pass
else:
raise (too_long or ValueError(
'too many items in iterable (expected 1)'))
return value
|
monasca_transform
|
monasca_transform//processor/pre_hourly_processor.pyclass:PreHourlyProcessor/_parse_saved_offsets
|
@staticmethod
def _parse_saved_offsets(app_name, topic, saved_offset_spec):
"""get dict representing saved offsets."""
offset_dict = {}
for key, value in saved_offset_spec.items():
if key.startswith('%s_%s' % (app_name, topic)):
spec_app_name = value.get_app_name()
spec_topic = value.get_topic()
spec_partition = int(value.get_partition())
spec_from_offset = value.get_from_offset()
spec_until_offset = value.get_until_offset()
key = '_'.join((spec_topic, str(spec_partition)))
offset_dict[key] = (spec_app_name, spec_topic, spec_partition,
spec_from_offset, spec_until_offset)
return offset_dict
|
nits
|
nits//file.pyclass:File/read
|
@classmethod
def read(cls, filename):
"""
return file elements in a generator
"""
assert False
|
aistac-foundation-2.6.49
|
aistac-foundation-2.6.49//aistac/components/abstract_ledger_component.pyclass:AbstractLedger/from_env
|
@classmethod
def from_env(cls, task_name: str=None, default_save=None, reset_templates:
bool=None, align_connectors: bool=None, default_save_intent: bool=None,
default_intent_level: bool=None, order_next_available: bool=None,
default_replace_intent: bool=None, **kwargs):
""" Class Factory Method that builds the connector handlers taking the property contract path from
the os.environ['AISTAC_PM_PATH'] or, if not found, uses the system default,
for Linux and IOS '/tmp/components/contracts
for Windows 'os.environ['AppData']\\components\\contracts'
The following environment variables can be set:
'AISTAC_PM_PATH': the property contract path, if not found, uses the system default
'AISTAC_PM_TYPE': a file type for the property manager. If not found sets as 'json'
'AISTAC_PM_MODULE': a default module package, if not set uses component default
'AISTAC_PM_HANDLER': a default handler. if not set uses component default
This method calls to the Factory Method 'from_uri(...)' returning the initialised class instance
:param task_name: The reference name that uniquely identifies a task or subset of the property manager
:param default_save: (optional) if the configuration should be persisted
:param reset_templates: (optional) reset connector templates from environ variables. Default True
(see `report_environ()`)
:param align_connectors: (optional) resets aligned connectors to the template. default Default True
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param order_next_available: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
:param kwargs: to pass to the property ConnectorContract as its kwargs
:return: the initialised class instance
"""
task_name = task_name if isinstance(task_name, str) else 'base'
return super().from_env(task_name=task_name, default_save=default_save,
reset_templates=reset_templates, align_connectors=align_connectors,
default_save_intent=default_save_intent, default_intent_level=
default_intent_level, order_next_available=order_next_available,
default_replace_intent=default_replace_intent)
|
fraciso-0.0.7
|
fraciso-0.0.7//fraciso/matrices.pyfile:/fraciso/matrices.py:function:_swap/_swap
|
def _swap(obj, i, j):
"""Swaps the elements at index `i` and index `j` in `obj`.
If `obj` has a ``copy()`` method, ``obj[j]`` gets a copy of ``obj[i]``.
"""
temp = obj[i]
if hasattr(temp, 'copy'):
temp = temp.copy()
obj[i] = obj[j]
obj[j] = temp
return obj
|
nngt-1.3.2
|
nngt-1.3.2//nngt/lib/io_tools.pyfile:/nngt/lib/io_tools.py:function:_get_edges_elist/_get_edges_elist
|
def _get_edges_elist(line, attributes, separator, secondary, edges,
di_attributes, di_convert):
"""
Add edges and attributes to `edges` and `di_attributes` for the "neighbour"
format.
"""
data = line.split(separator)
source, target = int(data[0]), int(data[1])
edges.append((source, target))
if len(data) == 3:
attr_data = data[2].split(secondary)
for name, val in zip(attributes, attr_data):
di_attributes[name].append(di_convert[name](val))
|
libtools-0.3.3
|
libtools-0.3.3//libtools/userinput.pyfile:/libtools/userinput.py:function:range_bind/range_bind
|
def range_bind(min_value, max_value, value):
""" binds number to a type and range """
if value not in range(min_value, max_value + 1):
value = min(value, max_value)
value = max(min_value, value)
return int(value)
|
django-anymail-7.1.0
|
django-anymail-7.1.0//anymail/backends/mailgun.pyfile:/anymail/backends/mailgun.py:function:isascii/isascii
|
def isascii(s):
"""Returns True if str s is entirely ASCII characters.
(Compare to Python 3.7 `str.isascii()`.)
"""
try:
s.encode('ascii')
except UnicodeEncodeError:
return False
return True
|
scipion-em-1.0.1
|
scipion-em-1.0.1//pwem/viewers/viewer_chimera.pyclass:Chimera/getSymmetry
|
@classmethod
def getSymmetry(cls, scipionSym):
""" Return the equivalent Chimera symmetry from Scipion one. """
return cls._symmetryMap[scipionSym]
|
shot_detector
|
shot_detector//selectors/point/base_point_selector.pyclass:BasePointSelector/select_point
|
@staticmethod
def select_point(event, video_state=None, **_kwargs):
"""
Should be implemented
:param event:
:param video_state:
"""
return event, video_state
|
certbot_dns_inwx
|
certbot_dns_inwx//inwx.pyclass:prettyprint/domain_check
|
@staticmethod
def domain_check(checks):
"""
list checks: The list of domain checks to be pretty printed.
"""
if 'resData' in checks:
checks = checks['resData']
count, total = 0, len(checks)
output = '\n%i domain check(s):\n' % total
for check in checks['domain']:
count += 1
output += '%s = %s' % (check['domain'], check['status'])
return output
|
realfast
|
realfast//controllers.pyfile:/controllers.py:function:runingest/runingest
|
def runingest(sdms):
""" Call archive tool or move data to trigger archiving of sdms.
This function will ultimately be triggered by candidate portal.
"""
NotImplementedError
|
mpltools-0.2.0
|
mpltools-0.2.0//mpltools/style/core.pyfile:/mpltools/style/core.py:function:update_nested_dict/update_nested_dict
|
def update_nested_dict(main_dict, new_dict):
"""Update nested dict (only level of nesting) with new values.
Unlike dict.update, this assumes that the values of the parent dict are
dicts, so you shouldn't replace the nested dict if it already exists.
Instead you should update the sub-dict.
"""
for name, rc_dict in new_dict.items():
if name in main_dict:
main_dict[name].update(rc_dict)
else:
main_dict[name] = rc_dict
return main_dict
|
dropbox
|
dropbox//team_log.pyclass:EventType/no_password_link_view_create_report
|
@classmethod
def no_password_link_view_create_report(cls, val):
"""
Create an instance of this class set to the
``no_password_link_view_create_report`` tag with value ``val``.
:param NoPasswordLinkViewCreateReportType val:
:rtype: EventType
"""
return cls('no_password_link_view_create_report', val)
|
click_demultiplex
|
click_demultiplex//commands.pyfile:/commands.py:function:close_file_handles/close_file_handles
|
def close_file_handles(file_handles):
"""Close every opened file."""
for file_cell_handles in file_handles.values():
file_cell_handles['r1'].close()
file_cell_handles['r2'].close()
|
pygfunction-1.1.0
|
pygfunction-1.1.0//pygfunction/boreholes.pyfile:/pygfunction/boreholes.py:function:_path_to_inlet/_path_to_inlet
|
def _path_to_inlet(bore_connectivity, bore_index):
"""
Returns the path from a borehole to the bore field inlet.
This function raises an error if the supplied borehole connectivity is
invalid.
Parameters
----------
bore_connectivity : list
Index of fluid inlet into each borehole. -1 corresponds to a borehole
connected to the bore field inlet.
bore_index : int
Index of borehole to evaluate path.
Returns
-------
path : list
List of boreholes leading to the bore field inlet, starting from
borehole bore_index
"""
path = [bore_index]
index_in = bore_connectivity[bore_index]
while not index_in == -1:
path.append(index_in)
index_in = bore_connectivity[index_in]
return path
|
storyscript-0.26.3
|
storyscript-0.26.3//storyscript/compiler/semantics/types/Types.pyfile:/storyscript/compiler/semantics/types/Types.py:function:implicit_cast/implicit_cast
|
def implicit_cast(t1, t2):
"""
Checks whether two types can be implicitly casted
to one of each other.
Returns `None` if no implicit cast can be performed.
"""
t1_t2 = t1.implicit_to(t2)
if t1_t2 is not None:
return t1_t2
t2_t1 = t2.implicit_to(t1)
if t2_t1 is not None:
return t2_t1
return None
|
sympathy
|
sympathy//app/version.pyfile:/app/version.py:function:application_url/application_url
|
def application_url():
"""Return the URL to the developer website."""
return 'https://www.sympathyfordata.com/'
|
fake-bpy-module-2.80-20200428
|
fake-bpy-module-2.80-20200428//bpy/ops/file.pyfile:/bpy/ops/file.py:function:filenum/filenum
|
def filenum(increment: int=1):
"""Increment number in filename
:param increment: Increment
:type increment: int
"""
pass
|
python-django-social-0.8.3
|
python-django-social-0.8.3//social/get.pyfile:/social/get.py:function:tweets/tweets
|
def tweets(url):
"""
.. py:function:: tweets(url : string)
Get the number of tweets containing the provided URL.
This was deprecated as of November 20. 2015. See here:
https://blog.twitter.com/2015/hard-decisions-for-a-sustainable-platform
Hence, this function always returns (0,) until implemented otherwise.
:param str url: The url to query Twitter for.
:return: the number of tweets
:rtype: Tuple
:raises Exception: none.
.. todo: Implement an own counter for URLs through the recommended
"filter" Streaming-API.
Old query:
query = "http://urls.api.twitter.com/1/urls/count.json?url=%s" % (url)
"""
return 0,
|
discord
|
discord//colour.pyclass:Colour/blue
|
@classmethod
def blue(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x3498db``."""
return cls(3447003)
|
dynamorm
|
dynamorm//table.pyclass:DynamoTable3/get_table
|
@classmethod
def get_table(cls, name):
"""Return the boto3 Table object for this model, create it if it doesn't exist
The Table is stored on the class for each model, so it is shared between all instances of a given model.
"""
try:
return cls._table
except AttributeError:
pass
cls._table = cls.get_resource().Table(name)
return cls._table
|
gocept.month-2.1
|
gocept.month-2.1//src/gocept/month/interfaces.pyclass:IMonth/__add__
|
def __add__(months):
"""Add a given number of months."""
|
epyqlib
|
epyqlib//utils/canlog.pyfile:/utils/canlog.py:function:to_trc_v1_3/to_trc_v1_3
|
def to_trc_v1_3(messages, bus):
"""`messages` should be a dict. Keys are bus numbers and values are
iterables of messages"""
raise Exception('Not implemented')
|
gameanalysis-8.0.3
|
gameanalysis-8.0.3//gameanalysis/paygame.pyfile:/gameanalysis/paygame.py:function:_unpack_obs_player/_unpack_obs_player
|
def _unpack_obs_player(role, strategy, payoff, **_):
"""Unpack an observation player"""
return role, strategy, payoff
|
skymapping_tools-0.13.0
|
skymapping_tools-0.13.0//skymapper/core.pyfile:/skymapper/core.py:function:lr_schedule/lr_schedule
|
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 10, 20, 30, 50 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 0.001
if epoch > 60:
lr *= 0.0001
elif epoch > 55:
lr *= 0.001
elif epoch > 40:
lr *= 0.01
elif epoch > 20:
lr *= 0.1
print('Learning rate: ', lr)
return lr
|
dmba
|
dmba//featureSelection.pyfile:/featureSelection.py:function:stepwise_selection/stepwise_selection
|
def stepwise_selection(variables, train_model, score_model, direction=
'both', verbose=True):
""" Variable selection using forward and/or backward selection
Input:
variables: complete list of variables to consider in model building
train_model: function that returns a fitted model for a given set of variables
score_model: function that returns the score of a model; better models have lower scores
direction: use it to limit stepwise selection to either 'forward' or 'backward'
Returns:
(best_model, best_variables)
"""
FORWARD = 'forward'
BACKWARD = 'backward'
directions = [FORWARD, BACKWARD]
if direction.lower() == FORWARD:
directions = [FORWARD]
if direction.lower() == BACKWARD:
directions = [BACKWARD]
best_variables = [] if 'forward' in directions else list(variables)
best_model = train_model(best_variables)
best_score = score_model(best_model, best_variables)
if verbose:
print('Variables: ' + ', '.join(variables))
print('Start: score={:.2f}, constant'.format(best_score))
while True:
step = [(best_score, None, best_model, 'unchanged')]
if FORWARD in directions:
for variable in variables:
if variable in best_variables:
continue
step_var = list(best_variables)
step_var.append(variable)
step_model = train_model(step_var)
step_score = score_model(step_model, step_var)
step.append((step_score, variable, step_model, 'add'))
if 'backward' in directions:
for variable in best_variables:
step_var = list(best_variables)
step_var.remove(variable)
step_model = train_model(step_var)
step_score = score_model(step_model, step_var)
step.append((step_score, variable, step_model, 'remove'))
step.sort(key=lambda x: x[0])
best_score, chosen_variable, best_model, direction = step[0]
if verbose:
print('Step: score={:.2f}, {} {}'.format(best_score, direction,
chosen_variable))
if chosen_variable is None:
break
if direction == 'add':
best_variables.append(chosen_variable)
else:
best_variables.remove(chosen_variable)
return best_model, best_variables
|
CleanAdminDjango-1.5.3.1
|
CleanAdminDjango-1.5.3.1//django/core/files/images.pyfile:/django/core/files/images.py:function:get_image_dimensions/get_image_dimensions
|
def get_image_dimensions(file_or_path, close=False):
"""
Returns the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
try:
from PIL import ImageFile as PILImageFile
except ImportError:
import ImageFile as PILImageFile
p = PILImageFile.Parser()
if hasattr(file_or_path, 'read'):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
file = open(file_or_path, 'rb')
close = True
try:
chunk_size = 1024
while 1:
data = file.read(chunk_size)
if not data:
break
p.feed(data)
if p.image:
return p.image.size
chunk_size = chunk_size * 2
return None
finally:
if close:
file.close()
else:
file.seek(file_pos)
|
qcodes-0.13.0
|
qcodes-0.13.0//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old
|
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
nms
|
nms//felzenszwalb.pyfile:/felzenszwalb.py:function:poly_compare/poly_compare
|
def poly_compare(poly1, poly2, area):
"""Calculate the ratio of overlap between two polygons and the given area
:param poly1: polygon specified by its verticies
:type poly1: list
:param poly2: polygon specified by its verticies
:type poly2: list
:param area: the area to compare the overlap of poly1 and poly2
:type area: float
:return: the ratio of overlap of poly1 and poly2 to the area e.g. overlap(poly1, poly2)/area
:rtype: float
"""
assert area > 0
intersection_area = help.polygon_intersection_area([poly1, poly2])
return intersection_area / area
|
xskillscore-0.0.15
|
xskillscore-0.0.15//xskillscore/core/deterministic.pyfile:/xskillscore/core/deterministic.py:function:_determine_input_core_dims/_determine_input_core_dims
|
def _determine_input_core_dims(dim, weights):
"""
Determine input_core_dims based on type of dim and weights.
Parameters
----------
dim : str, list
The dimension(s) to apply the correlation along.
weights : xarray.Dataset or xarray.DataArray or None
Weights matching dimensions of ``dim`` to apply during the function.
Returns
-------
list of lists
input_core_dims used for xr.apply_ufunc.
"""
if not isinstance(dim, list):
dim = [dim]
if weights is None:
input_core_dims = [dim, dim, [None]]
else:
input_core_dims = [dim, dim, dim]
return input_core_dims
|
pyknotid-0.5.3
|
pyknotid-0.5.3//pyknotid/io.pyfile:/pyknotid/io.py:function:from_csv/from_csv
|
def from_csv(filen, index_col=None, header_row=None, **kwargs):
"""
Loads an array of points from the given filename parsed
as a csv.
.. note:: This function requires pandas to be installed.
.. note:: For data in a space-separated column format,
pass the argument `sep=' '` to read in as a csv.
Parameters
----------
filen : str
The (relative) filename to load from.
index_col: int or None
The column that indexes the rows, defaults to None
meaning no such column is present.
header_row: int or None
The row with header information (i.e. column names), with
all previous rows ignored. Defaults to None, which means
no such header exists.
**kwargs :
Passed directly to pandas.DataFrame.from_csv.
"""
import pandas as pn
df = pn.DataFrame.from_csv(filen, index_col=index_col, header_row=
header_row, **kwargs)
return df.as_matrix()
|
mpmath-1.1.0
|
mpmath-1.1.0//mpmath/libmp/libmpf.pyfile:/mpmath/libmp/libmpf.py:function:mpf_shift/mpf_shift
|
def mpf_shift(s, n):
"""Quickly multiply the raw mpf s by 2**n without rounding."""
sign, man, exp, bc = s
if not man:
return s
return sign, man, exp + n, bc
|
pyphinb-2.9.4
|
pyphinb-2.9.4//pyphinb/validate.pyfile:/pyphinb/validate.py:function:time_scale/time_scale
|
def time_scale(time_scale):
"""Validate a macro temporal time scale."""
if time_scale <= 0 or isinstance(time_scale, float):
raise ValueError('time scale must be a positive integer')
|
gj
|
gj//follow.pyfile:/follow.py:function:chunker/chunker
|
def chunker(reader, width):
"""Yield chunks from lines in reader with given width."""
for line in reader:
line = line[:-1]
if line == '':
yield ''
continue
for offset in range(0, len(line), width):
yield line[offset:offset + width]
|
dropbox
|
dropbox//team.pyclass:UserSelectorArg/email
|
@classmethod
def email(cls, val):
"""
Create an instance of this class set to the ``email`` tag with value
``val``.
:param str val:
:rtype: UserSelectorArg
"""
return cls('email', val)
|
pyNastran
|
pyNastran//bdf/cards/dmig.pyfile:/bdf/cards/dmig.py:function:_get_row_col_map_2d/_get_row_col_map_2d
|
def _get_row_col_map_2d(matrix, GCi, GCj, ifo):
"""helper for ``get_row_col_map``"""
rows = {}
rows_reversed = {}
cols = {}
cols_reversed = {}
i = 0
for nid, comp in GCi:
gci = nid, comp
if gci not in rows:
rows[gci] = i
rows_reversed[i] = gci
i += 1
if ifo == 6:
for nid, comp in GCj:
gcj = nid, comp
if gcj not in rows:
rows[gcj] = i
rows_reversed[i] = gcj
i += 1
cols = rows
cols_reversed = rows_reversed
else:
j = 0
for nid, comp in GCj:
gcj = nid, comp
if gcj not in cols:
cols[gcj] = j
cols_reversed[j] = gcj
j += 1
return rows, cols, rows_reversed, cols_reversed
|
transformations
|
transformations//transformations.pyfile:/transformations.py:function:quaternion_real/quaternion_real
|
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
|
horae.workflow-1.0a1
|
horae.workflow-1.0a1//horae/workflow/interfaces.pyclass:IWorkflow/inherited_states
|
def inherited_states(deactivated=False):
""" Returns a list of inherited :py:class:`IState` s
"""
|
Products.CMFPlone-5.2.1
|
Products.CMFPlone-5.2.1//Products/CMFPlone/interfaces/controlpanel.pyclass:IControlPanel/enumConfiglets
|
def enumConfiglets(group=None):
""" lists the Configlets of a group, returns them as dicts by
calling .getAction() on each of them """
|
bpy
|
bpy//ops/ui.pyfile:/ops/ui.py:function:override_remove_button/override_remove_button
|
def override_remove_button(all: bool=True):
"""Remove an override operation
:param all: All, Reset to default values all elements of the array
:type all: bool
"""
pass
|
avocado
|
avocado//utils/process.pyfile:/utils/process.py:function:get_parent_pid/get_parent_pid
|
def get_parent_pid(pid):
"""
Returns the parent PID for the given process
:note: This is currently Linux specific.
:param pid: The PID of child process
:returns: The parent PID
:rtype: int
"""
with open('/proc/%d/stat' % pid, 'rb') as proc_stat:
parent_pid = proc_stat.read().split(b' ')[-49]
return int(parent_pid)
|
wbdatapy-0.1
|
wbdatapy-0.1//wbdatapy/api.pyfile:/wbdatapy/api.py:function:parse_value_or_iterable/parse_value_or_iterable
|
def parse_value_or_iterable(arg):
"""
If arg is a single value, return it as a string; if an iterable, return
a ;-joined string of all values
"""
if str(arg) == arg:
return arg
if type(arg) == int:
return str(arg)
return ';'.join(arg)
|
pyaudiogame
|
pyaudiogame//inputs.pyclass:AppKitKeyboardListener/_get_flag_value
|
@staticmethod
def _get_flag_value(event):
"""Note, this may be able to be made more accurate,
i.e. handle two modifier keys at once."""
flags = event.modifierFlags()
if flags == 256:
value = 0
else:
value = 1
return value
|
arvados-python-client-2.0.2
|
arvados-python-client-2.0.2//arvados/arvfile.pyfile:/arvados/arvfile.py:function:split/split
|
def split(path):
"""split(path) -> streamname, filename
Separate the stream name and file name in a /-separated stream path and
return a tuple (stream_name, file_name). If no stream name is available,
assume '.'.
"""
try:
stream_name, file_name = path.rsplit('/', 1)
except ValueError:
stream_name, file_name = '.', path
return stream_name, file_name
|
thoraxe
|
thoraxe//subexons/rescue.pyfile:/subexons/rescue.py:function:_get_subexons_to_rescue/_get_subexons_to_rescue
|
def _get_subexons_to_rescue(subexon_table):
"""Return a DataFrame with the subexons to rescue."""
return subexon_table.loc[subexon_table['Cluster'] < 0, [
'SubexonIDCluster', 'Cluster', 'SubexonProteinSequence']
].drop_duplicates(subset='SubexonIDCluster').to_dict('records')
|
algorithms
|
algorithms//utils/tools.pyfile:/utils/tools.py:function:get_byte_array/get_byte_array
|
def get_byte_array(padded_encoded_string):
"""Convert padded encoded string into bytes"""
byte_array = bytearray()
length = len(padded_encoded_string)
for i in range(0, length, 8):
byte = padded_encoded_string[i:i + 8]
byte_array.append(int(byte, 2))
return byte_array
|
reclaimer-2.9.0
|
reclaimer-2.9.0//reclaimer/hek/defs/objs/mod2.pyfile:/reclaimer/hek/defs/objs/mod2.py:function:delocalize_compressed_verts/delocalize_compressed_verts
|
def delocalize_compressed_verts(comp_verts, local_nodes):
"""TODO: Update this function to also work on parsed vert data."""
local_node_ct = len(local_nodes) * 3
for i in range(28, len(comp_verts), 32):
if comp_verts[i] < local_node_ct:
comp_verts[i] = local_nodes[comp_verts[i] // 3] * 3
i += 1
if comp_verts[i] < local_node_ct:
comp_verts[i] = local_nodes[comp_verts[i] // 3] * 3
|
cubicweb-seda-0.17.5
|
cubicweb-seda-0.17.5//cubicweb_seda/entities/itree.pyfile:/cubicweb_seda/entities/itree.py:function:next_child_ordering/next_child_ordering
|
def next_child_ordering(cnx, parent_eid, rtype):
"""Return value for the `ordering` attribute of a child freshly appended through
`rtype` to parent entity with the given eid.
"""
rql = 'Any MAX(O) WHERE X {rtype} P, P eid %(p)s, X ordering O'
ordering = cnx.execute(rql.format(rtype=rtype), {'p': parent_eid})[0][0]
return 1 if ordering is None else ordering + 1
|
linebot
|
linebot//models/base.pyclass:Base/get_or_new_from_json_dict_with_types
|
@staticmethod
def get_or_new_from_json_dict_with_types(data, cls_map, type_key='type'):
"""Get `cls` object w/ deserialization from json by using type key hint if needed.
If data is instance of one of cls, return data.
Else if data is instance of dict, create instance from dict.
Else, return None.
:param data:
:param cls_map:
:param type_key:
:rtype: object
"""
if isinstance(data, tuple(cls_map.values())):
return data
elif isinstance(data, dict):
type_val = data[type_key]
if type_val in cls_map:
return cls_map[type_val].new_from_json_dict(data)
return None
|
elfhex-0.0.8
|
elfhex-0.0.8//elfhex/program.pyclass:Byte/get_size
|
@staticmethod
def get_size():
"""Returns the size of a byte (one byte)."""
return 1
|
biocircuits
|
biocircuits//reg.pyfile:/reg.py:function:rr_or/rr_or
|
def rr_or(x, y, nx, ny):
"""Dimensionless production rate for a gene regulated by two
repressors with OR logic in the absence of leakage.
Parameters
----------
x : float or NumPy array
Concentration of first repressor.
y : float or NumPy array
Concentration of second repressor.
nx : float
Hill coefficient for first repressor.
ny : float
Hill coefficient for second repressor.
Returns
-------
output : NumPy array or float
(1 + x**nx + y**ny) / (1 + x**nx) / (1 + y**ny)
"""
return (1 + x ** nx + y ** ny) / (1 + x ** nx) / (1 + y ** ny)
|
predf-0.1
|
predf-0.1//pre/util.pyfile:/pre/util.py:function:remove_suffix/remove_suffix
|
def remove_suffix(string):
"""Removes suffixes
Args:
string:str
Returns:
returns str
"""
suffixes = ['Esq', 'Ii', 'Iii', 'Iiii', 'Iv', 'Jnr', 'Jr', 'Sr']
string = string.replace(' ', '')
string = string.replace('.', '')
string = string.replace(',', '')
for suffix in suffixes:
if string.endswith(suffix):
string = string[:-len(suffix)]
return string
return string
|
smappPy-0.1.32
|
smappPy-0.1.32//smappPy/entities.pyfile:/smappPy/entities.py:function:contains_url/contains_url
|
def contains_url(tweet):
"""Returns True is tweet has at least one 'urls' entity"""
if 'entities' not in tweet:
return False
if 'urls' in tweet['entities'] and len(tweet['entities']['urls']) > 0:
return True
return False
|
pynufft
|
pynufft//src/_re_subroutine/re_subroutine.pyfile:/src/_re_subroutine/re_subroutine.py:function:cHypot/cHypot
|
def cHypot():
"""
Return the kernel code for hypot, which computes the sqrt(x*x + y*y) without intermediate overflow.
"""
R = """
KERNEL void cHypot(GLOBAL_MEM float2 *x,
GLOBAL_MEM const float2 *y)
{
const unsigned int gid = get_global_id(0);
float2 tmp_x;
float2 tmp_y;
tmp_x = x[gid];
tmp_y = y[gid];
tmp_x.x = hypot( tmp_x.x, tmp_x.y); // sqrt( tmp_x.x*tmp_x.x + tmp_x.y*tmp_x.y);
tmp_y.x = hypot( tmp_y.x, tmp_y.y); // sqrt( tmp_y.x*tmp_y.x + tmp_y.y*tmp_y.y);
x[gid].x = hypot(tmp_x.x, tmp_y.x);
x[gid].y = 0.0;
};
"""
return R
|
gerrychain-0.2.12
|
gerrychain-0.2.12//versioneer.pyfile:/versioneer.py:function:scan_setup_py/scan_setup_py
|
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open('setup.py', 'r') as f:
for line in f.readlines():
if 'import versioneer' in line:
found.add('import')
if 'versioneer.get_cmdclass()' in line:
found.add('cmdclass')
if 'versioneer.get_version()' in line:
found.add('get_version')
if 'versioneer.VCS' in line:
setters = True
if 'versioneer.versionfile_source' in line:
setters = True
if len(found) != 3:
print('')
print('Your setup.py appears to be missing some important items')
print('(but I might be wrong). Please make sure it has something')
print('roughly like the following:')
print('')
print(' import versioneer')
print(' setup( version=versioneer.get_version(),')
print(' cmdclass=versioneer.get_cmdclass(), ...)')
print('')
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print('now lives in setup.cfg, and should be removed from setup.py')
print('')
errors += 1
return errors
|
geepal
|
geepal//get_events.pyfile:/get_events.py:function:midnight_datetime/midnight_datetime
|
def midnight_datetime(dtime):
""" Takes a datetime object and returns a datetime at midnight of the
given day """
midnightDt = dtime.replace(minute=0, hour=0, second=0, microsecond=0)
return midnightDt
|
mailman-3.3.1
|
mailman-3.3.1//src/mailman/interfaces/mailinglist.pyclass:IAcceptableAliasSet/remove
|
def remove(alias):
"""Remove the given address as an acceptable aliases for posting.
:param alias: The email address to no longer accept as a recipient for
implicit destination posting purposes.
:type alias: string
"""
|
dkUtil-0.1.11
|
dkUtil-0.1.11//src/functionapi.pyclass:functionapi/getConfig
|
@staticmethod
def getConfig(conf, section, key):
"""
获取指定section下面的key
:param conf:
:param section:
:param key:
:return:
"""
return conf.get(section, key)
|
cogent3
|
cogent3//align/align.pyfile:/align/align.py:function:make_generic_scoring_dict/make_generic_scoring_dict
|
def make_generic_scoring_dict(match, mtype):
"""returns scoring dict for alignment
Parameters
----------
match : int
value for a match, mismatches default to -1
mtype
MolType instance or string that can be used to get_moltype
"""
from cogent3 import get_moltype
mtype = get_moltype(mtype)
S = {}
for a in mtype:
for b in mtype:
if a == b:
score = match
else:
score = -1
S[a, b] = score
return S
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/lambda_.pyfile:/pyboto3/lambda_.py:function:get_event_source_mapping/get_event_source_mapping
|
def get_event_source_mapping(UUID=None):
"""
Returns configuration information for the specified event source mapping (see CreateEventSourceMapping ).
This operation requires permission for the lambda:GetEventSourceMapping action.
See also: AWS API Documentation
Examples
This operation retrieves a Lambda function's event source mapping
Expected Output:
:example: response = client.get_event_source_mapping(
UUID='string'
)
:type UUID: string
:param UUID: [REQUIRED]
The AWS Lambda assigned ID of the event source mapping.
:rtype: dict
:return: {
'UUID': 'string',
'BatchSize': 123,
'EventSourceArn': 'string',
'FunctionArn': 'string',
'LastModified': datetime(2015, 1, 1),
'LastProcessingResult': 'string',
'State': 'string',
'StateTransitionReason': 'string'
}
"""
pass
|
simplequi
|
simplequi//_canvas.pyclass:Canvas/__ensure_int_values
|
@staticmethod
def __ensure_int_values(*values):
"""Casts values to int.
This is to ensure Py2 compatibility with scripts that have problems with floor division -> true division on Py3.
If a single value is passed, returns a single value. If more than one is passed, returns a list of values.
:param values: arbitrary number of values that can be cast to int
:return: single value or list of values, depending on number of ``values`` passed
"""
res = []
for val in values:
res.append(int(val))
return res[0] if len(res) == 1 else res
|
edk2toolext
|
edk2toolext//capsule/capsule_helper.pyfile:/capsule/capsule_helper.py:function:get_normalized_version_string/get_normalized_version_string
|
def get_normalized_version_string(version_string):
"""takes in a version string and returns a normalized version that is compatible with inf and cat files"""
while version_string.count('.') < 3:
version_string += '.0'
return version_string
|
graphql-example-0.4.4
|
graphql-example-0.4.4//vendor/pip/operations/check.pyfile:/vendor/pip/operations/check.py:function:get_incompatible_reqs/get_incompatible_reqs
|
def get_incompatible_reqs(dist, installed_dists):
"""Return all of the requirements of `dist` that are present in
`installed_dists`, but have incompatible versions.
"""
installed_dists_by_name = {}
for installed_dist in installed_dists:
installed_dists_by_name[installed_dist.project_name] = installed_dist
for requirement in dist.requires():
present_dist = installed_dists_by_name.get(requirement.project_name)
if present_dist and present_dist not in requirement:
yield requirement, present_dist
|
pypi-publisher-0.0.4
|
pypi-publisher-0.0.4//ppp/ppp.pyfile:/ppp/ppp.py:function:verify/verify
|
def verify(server_name):
"""
Aim is to verify the release on this server (try to install it and run tests if relevant).
:param server_name:
:return:
"""
raise NotImplementedError
|
databroker-1.0.2
|
databroker-1.0.2//databroker/utils.pyfile:/databroker/utils.py:function:get_fields/get_fields
|
def get_fields(header, name=None):
"""
Return the set of all field names (a.k.a "data keys") in a header.
Parameters
----------
header : Header
name : string, optional
Get field from only one "event stream" with this name. If None
(default) get fields from all event streams.
Returns
-------
fields : set
"""
fields = set()
for descriptor in header['descriptors']:
if name is not None and name != descriptor.get('name', 'primary'):
continue
for field in descriptor['data_keys'].keys():
fields.add(field)
return fields
|
obs
|
obs//libs/credential.pyfile:/libs/credential.py:function:status/status
|
def status(client, access_key, status=True):
"""Set security credentials status."""
status = client.user.credentials.status(method='POST', accessKey=
access_key, isActive=status)
return status
|
openstack-congress-10.0.0
|
openstack-congress-10.0.0//antlr3runtime/Python/antlr3/dfa.pyclass:DFA/unpack
|
def unpack(cls, string):
"""@brief Unpack the runlength encoded table data.
Terence implemented packed table initializers, because Java has a
size restriction on .class files and the lookup tables can grow
pretty large. The generated JavaLexer.java of the Java.g example
would be about 15MB with uncompressed array initializers.
Python does not have any size restrictions, but the compilation of
such large source files seems to be pretty memory hungry. The memory
consumption of the python process grew to >1.5GB when importing a
15MB lexer, eating all my swap space and I was to impacient to see,
if it could finish at all. With packed initializers that are unpacked
at import time of the lexer module, everything works like a charm.
"""
ret = []
for i in range(len(string) / 2):
n, v = ord(string[i * 2]), ord(string[i * 2 + 1])
if v == 65535:
v = -1
ret += [v] * n
return ret
|
pytzer
|
pytzer//parameters.pyfile:/parameters.py:function:psi_K_Na_OH_HMW84/psi_K_Na_OH_HMW84
|
def psi_K_Na_OH_HMW84(T, P):
"""c-c'-a: potassium sodium hydroxide [HMW84]."""
psi = 0
valid = T == 298.15
return psi, valid
|
hgext
|
hgext//rebase.pyfile:/rebase.py:function:clearcollapsemsg/clearcollapsemsg
|
def clearcollapsemsg(repo):
"""Remove collapse message file"""
repo.vfs.unlinkpath(b'last-message.txt', ignoremissing=True)
|
mercurial
|
mercurial//interfaces/repository.pyclass:imanifestdict/hasdir
|
def hasdir(dir):
"""Returns a bool indicating if a directory is in this manifest."""
|
FluorSeg-0.0.26.dev0
|
FluorSeg-0.0.26.dev0//fluorseg/liffile.pyfile:/fluorseg/liffile.py:function:get_image_xml_meta/get_image_xml_meta
|
def get_image_xml_meta(root):
"""gets all Image XML information from ETree root object. Returns list"""
img_el = root.findall(
'{http://www.openmicroscopy.org/Schemas/OME/2016-06}Image')
return img_el
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.