repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
dropbox
|
dropbox//team_log.pyclass:EventDetails/secondary_mails_policy_changed_details
|
@classmethod
def secondary_mails_policy_changed_details(cls, val):
"""
Create an instance of this class set to the
``secondary_mails_policy_changed_details`` tag with value ``val``.
:param SecondaryMailsPolicyChangedDetails val:
:rtype: EventDetails
"""
return cls('secondary_mails_policy_changed_details', val)
|
contra-0.1.0
|
contra-0.1.0//versioneer.pyfile:/versioneer.py:function:plus_or_dot/plus_or_dot
|
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if '+' in pieces.get('closest-tag', ''):
return '.'
return '+'
|
sas-dlpy-1.2.0
|
sas-dlpy-1.2.0//dlpy/network.pyfile:/dlpy/network.py:function:get_num_configs/get_num_configs
|
def get_num_configs(keys, layer_type_prefix, layer_table):
"""
Extract the numerical options from the model table
Parameters
----------
keys : list-of-strings
Specifies the list of numerical variables
layer_type_prefix : string
Specifies the prefix of the options in the model table
layer_table : table
Specifies the selection of table containing the information
for the layer.
Returns
-------
:class:`dict`
Options that can be passed to layer definition
"""
layer_config = dict()
for key in keys:
try:
layer_config[key] = layer_table['_DLNumVal_'][layer_table[
'_DLKey1_'] == layer_type_prefix + '.' + key.lower().
replace('_', '')].tolist()[0]
except IndexError:
pass
return layer_config
|
pya2l-0.0.1
|
pya2l-0.0.1//pya2l/parser/grammar/parser.pyclass:A2lParser/p_compu_vtab_optional_list
|
@staticmethod
def p_compu_vtab_optional_list(p):
"""compu_vtab_optional_list : compu_vtab_optional
| compu_vtab_optional compu_vtab_optional_list"""
try:
p[0] = [p[1]] + p[2]
except IndexError:
p[0] = [p[1]]
|
explorator
|
explorator//functions.pyfile:/functions.py:function:filtered_string_DEPRECATED/filtered_string_DEPRECATED
|
def filtered_string_DEPRECATED(content, delete_words=None):
"""
Эта функция делает следующие преобразования со строкой:
- Удаляет пробелы по краям строки
- Удаляет подстроки из списка delete_words
- Удаляет пробелы подряд в строке, оставляя один
Parameters
----------
content: (str) - целевая строка
delete_words: (list of str) - подстроки, которые надо удалить из целевой строки
Returns
-------
(str) - обработанная строка
"""
if delete_words is None:
delete_words = []
if content != float('nan'):
content = str(content)
if delete_words:
for word in delete_words:
content = content.replace(word, '')
content = content.strip()
tmp = ''
i = 0
while i < len(content):
if i < len(content) - 1 and content[i] + content[i + 1] != ' ':
tmp += content[i]
elif i == len(content) - 1:
tmp += content[i]
i += 1
content = tmp
return content
|
PyCO2SYS
|
PyCO2SYS//equilibria/p1atm.pyfile:/equilibria/p1atm.py:function:kSi_NBS_SMB64/kSi_NBS_SMB64
|
def kSi_NBS_SMB64(TempK, Sal):
"""Silicate dissociation constant following SMB64."""
return 4e-10
|
skilletlib
|
skilletlib//skillet/base.pyclass:Skillet/__initialize_variables
|
@staticmethod
def __initialize_variables(vars_dict: dict) ->dict:
"""
Ensure the proper default values are configured for each type of variable that may be present in the skillet
:param vars_dict: Skillet 'variables' stanza
:return: variables stanza with default values correctly parsed
"""
for variable in vars_dict:
default = variable.get('default', '')
type_hint = variable.get('type_hint', 'text')
if type_hint == 'dropdown' and 'dd_list' in variable:
for item in variable.get('dd_list', []):
if 'key' in item and 'value' in item:
if default == item['key'] and default != item['value']:
variable['default'] = item['value']
elif type_hint == 'radio' and 'rad_list' in variable:
rad_list = variable['rad_list']
for item in rad_list:
if 'key' in item and 'value' in item:
if default == item['key'] and default != item['value']:
variable['default'] = item['value']
elif type_hint == 'checkbox' and 'cbx_list' in variable:
cbx_list = variable['cbx_list']
for item in cbx_list:
if 'key' in item and 'value' in item:
if default == item['key'] and default != item['value']:
variable['default'] = item['value']
return vars_dict
|
fmn-2.1.1
|
fmn-2.1.1//fmn/util.pyfile:/fmn/util.py:function:new_packager/new_packager
|
def new_packager(topic, msg):
""" Returns a username if the message is about a new packager in FAS. """
if '.fas.group.member.sponsor' in topic:
group = msg['msg']['group']
if group == 'packager':
return msg['msg']['user']
return None
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/storagegateway.pyfile:/pyboto3/storagegateway.py:function:delete_tape/delete_tape
|
def delete_tape(GatewayARN=None, TapeARN=None):
"""
Deletes the specified virtual tape. This operation is only supported in the tape gateway architecture.
See also: AWS API Documentation
Examples
This example deletes the specified virtual tape.
Expected Output:
:example: response = client.delete_tape(
GatewayARN='string',
TapeARN='string'
)
:type GatewayARN: string
:param GatewayARN: [REQUIRED]
The unique Amazon Resource Name (ARN) of the gateway that the virtual tape to delete is associated with. Use the ListGateways operation to return a list of gateways for your account and region.
:type TapeARN: string
:param TapeARN: [REQUIRED]
The Amazon Resource Name (ARN) of the virtual tape to delete.
:rtype: dict
:return: {
'TapeARN': 'string'
}
"""
pass
|
fake-bpy-module-2.80-20200428
|
fake-bpy-module-2.80-20200428//mathutils/geometry.pyfile:/mathutils/geometry.py:function:normal/normal
|
def normal(vectors: list) ->'mathutils.Vector':
"""Returns the normal of a 3D polygon.
:param vectors: Vectors to calculate normals with
:type vectors: list
"""
pass
|
pykwalify-1.7.0
|
pykwalify-1.7.0//pykwalify/cli.pyfile:/pykwalify/cli.py:function:run/run
|
def run(cli_args):
"""
Split the functionality into 2 methods.
One for parsing the cli and one that runs the application.
"""
from .core import Core
c = Core(source_file=cli_args['--data-file'], schema_files=cli_args[
'--schema-file'], extensions=cli_args['--extension'],
strict_rule_validation=cli_args['--strict-rule-validation'],
fix_ruby_style_regex=cli_args['--fix-ruby-style-regex'],
allow_assertions=cli_args['--allow-assertions'])
c.validate()
return c
|
fake-bpy-module-2.79-20200428
|
fake-bpy-module-2.79-20200428//bpy/ops/file.pyfile:/bpy/ops/file.py:function:bookmark_add/bookmark_add
|
def bookmark_add():
"""Add a bookmark for the selected/active directory
"""
pass
|
backwork-backup-mongo-0.3.0
|
backwork-backup-mongo-0.3.0//mongo/mongo.pyclass:MongoRestore/parse_args
|
@classmethod
def parse_args(cls, subparsers):
"""Create the `mongo` subparser for the `backup` command."""
subparsers.add_parser(cls.command, description=cls.__doc__)
|
dirty_cat
|
dirty_cat//count_3_grams.pyfile:/count_3_grams.py:function:number_of_common_3grams/number_of_common_3grams
|
def number_of_common_3grams(string1, string2):
""" Return the number of common tri-grams in two strings
"""
tri_grams = set(zip(string1, string1[1:], string1[2:]))
tri_grams = tri_grams.intersection(zip(string2, string2[1:], string2[2:]))
return len(tri_grams)
|
bpy
|
bpy//ops/object.pyfile:/ops/object.py:function:skin_root_mark/skin_root_mark
|
def skin_root_mark():
"""Mark selected vertices as roots
"""
pass
|
reportlab-3.5.42
|
reportlab-3.5.42//src/reportlab/platypus/para.pyfile:/src/reportlab/platypus/para.py:function:simpleJustifyAlign/simpleJustifyAlign
|
def simpleJustifyAlign(line, currentLength, maxLength):
"""simple justification with only strings"""
strings = []
for x in line[:-1]:
if isinstance(x, str):
strings.append(x)
nspaces = len(strings) - 1
slack = maxLength - currentLength
text = ' '.join(strings)
if nspaces > 0 and slack > 0:
wordspacing = slack / float(nspaces)
result = [('wordSpacing', wordspacing), text, maxLength, (
'wordSpacing', 0)]
else:
result = [text, currentLength, ('nextLine', 0)]
nextlinemark = 'nextLine', 0
if line and line[-1] == nextlinemark:
result.append(nextlinemark)
return result
|
pynidm-3.5.5
|
pynidm-3.5.5//nidm/experiment/Utils.pyfile:/nidm/experiment/Utils.py:function:find_in_namespaces/find_in_namespaces
|
def find_in_namespaces(search_uri, namespaces):
"""
Looks through namespaces for search_uri
:return: URI if found else False
"""
for uris in namespaces:
if uris.uri == search_uri:
return uris
return False
|
black-19.10b0
|
black-19.10b0//black.pyfile:/black.py:function:make_comment/make_comment
|
def make_comment(content: str) ->str:
"""Return a consistently formatted comment from the given `content` string.
All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single
space between the hash sign and the content.
If `content` didn't start with a hash sign, one is provided.
"""
content = content.rstrip()
if not content:
return '#'
if content[0] == '#':
content = content[1:]
if content and content[0] not in " !:#'%":
content = ' ' + content
return '#' + content
|
motmetrics
|
motmetrics//metrics.pyfile:/metrics.py:function:num_migrate/num_migrate
|
def num_migrate(df):
"""Total number of track migrate."""
return df.extra.Type.isin(['MIGRATE']).sum()
|
python-bitcoinlib-0.11.0
|
python-bitcoinlib-0.11.0//bitcoin/segwit_addr.pyfile:/bitcoin/segwit_addr.py:function:bech32_polymod/bech32_polymod
|
def bech32_polymod(values):
"""Internal function that computes the Bech32 checksum."""
generator = [996825010, 642813549, 513874426, 1027748829, 705979059]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 33554431) << 5 ^ value
for i in range(5):
chk ^= generator[i] if top >> i & 1 else 0
return chk
|
homeassistant-0.109.6
|
homeassistant-0.109.6//homeassistant/util/color.pyfile:/homeassistant/util/color.py:function:_bound/_bound
|
def _bound(color_component: float, minimum: float=0, maximum: float=255
) ->float:
"""
Bound the given color component value between the given min and max values.
The minimum and maximum values will be included in the valid output.
i.e. Given a color_component of 0 and a minimum of 10, the returned value
will be 10.
"""
color_component_out = max(color_component, minimum)
return min(color_component_out, maximum)
|
fake-blender-api-2.79-0.3.1
|
fake-blender-api-2.79-0.3.1//bpy/ops/particle.pyfile:/bpy/ops/particle.py:function:shape_cut/shape_cut
|
def shape_cut():
"""Cut hair to conform to the set shape object
"""
pass
|
kukibanshee-0.3.6
|
kukibanshee-0.3.6//kukibanshee/drone.pyfile:/kukibanshee/drone.py:function:dict2cknv/dict2cknv
|
def dict2cknv(ckpd):
"""
ckpd = {"TS": "0105b666"}
ckname,ckvalue = dict2cknv(ckpd)
ckname
ckvalue
"""
ckname = list(ckpd.keys())[0]
ckvalue = list(ckpd.values())[0]
return ckname, ckvalue
|
Swoop-0.6.5
|
Swoop-0.6.5//Swoop/Swoop.pyclass:Variant/_from_et
|
@classmethod
def _from_et(cls, root, parent):
"""
Create a :class:`Variant` from a :code:`variant` element.
:param root: The element tree tree to parse.
:param parent: :class:`EagleFilePart` that should hold the resulting :class:`EagleFilePart`
:rtype: :class:`Variant`
"""
n = cls()
n._init_from_et(root, parent)
return n
|
graphene-django-plus-2.0
|
graphene-django-plus-2.0//graphene_django_plus/mutations.pyclass:BaseModelMutation/before_delete
|
@classmethod
def before_delete(cls, info, instance):
"""Perform "before delete" operations.
Override this to perform any operation on the instance
before its `.delete()` method is called.
"""
pass
|
brainload-0.3.5
|
brainload-0.3.5//src/brainload/nitools.pyfile:/src/brainload/nitools.py:function:write_lines_to_text_file/write_lines_to_text_file
|
def write_lines_to_text_file(lines, file_name, line_sep='\n'):
"""
Write the lines to a text file.
Write the lines to a text file, overwriting it in case it exists.
Parameters
----------
lines: list of str
The lines, must not contain line ending.
file_name: str
Path to new text file to create (or overwrite if it exists).
line_sep: str, optional
Line separator. Defaults to "
".
"""
with open(file_name, 'w') as f:
for l in lines:
f.write('%s%s' % (l, line_sep))
|
g2p-0.5.20200421
|
g2p-0.5.20200421//g2p/mappings/utils.pyfile:/g2p/mappings/utils.py:function:expand_abbreviations/expand_abbreviations
|
def expand_abbreviations(data):
""" Exapand a flattened DefaultDict into a CSV-formatted list of lists
"""
lines = []
if data:
for key in data.keys():
line = [key]
for col in data[key]:
line.append(col)
lines.append(line)
if not lines:
while len(lines) < 10:
lines.append(['', '', '', '', '', ''])
return lines
|
anvio-6.2
|
anvio-6.2//anvio/utils.pyfile:/anvio/utils.py:function:convert_SSM_to_single_accession/convert_SSM_to_single_accession
|
def convert_SSM_to_single_accession(matrix_data):
"""
The substitution scores from the SSM dictionaries created in anvio.data.SSMs are accessed via a dictionary of
dictionaries, e.g. data["Ala"]["Trp"]. This returns a new dictionary accessed via the concatenated sequence element
pair, e.g. data["AlaTrp"], data["AT"], etc. where they are ordered alphabetically.
"""
items = matrix_data.keys()
new_data = {}
for row in items:
for column in items:
if row > column:
continue
new_data[''.join([row, column])] = matrix_data[row][column]
return new_data
|
odoo
|
odoo//api.pyfile:/api.py:function:cr_context/cr_context
|
def cr_context(method):
""" Decorate a traditional-style method that takes ``cr``, ``context`` as parameters. """
method._api = 'cr_context'
return method
|
giganticode-dataprep-1.0.0a12
|
giganticode-dataprep-1.0.0a12//dataprep/bpepkg/wild_bpe.pyfile:/dataprep/bpepkg/wild_bpe.py:function:are_symmetric/are_symmetric
|
def are_symmetric(pair1: str, pair2: str):
"""
>>> are_symmetric("abc dcba", "dcba abc")
True
>>> are_symmetric("abc dfe", "efd cba")
False
>>> are_symmetric("a c", "ac")
False
"""
if len(pair1) != len(pair2):
return False
split1 = pair1.split(' ')
split2 = pair2.split(' ')
return split1[0] == split2[1] and split1[1] == split2[0]
|
coremltools
|
coremltools//converters/keras/_layers2.pyfile:/converters/keras/_layers2.py:function:convert_dense/convert_dense
|
def convert_dense(builder, layer, input_names, output_names, keras_layer):
"""
Convert a dense layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = input_names[0], output_names[0]
has_bias = keras_layer.use_bias
W = keras_layer.get_weights()[0].T
Wb = keras_layer.get_weights()[1].T if has_bias else None
output_channels, input_channels = W.shape
builder.add_inner_product(name=layer, W=W, b=Wb, input_channels=
input_channels, output_channels=output_channels, has_bias=has_bias,
input_name=input_name, output_name=output_name)
|
pytzer
|
pytzer//parameters.pyfile:/parameters.py:function:psi_Cs_H_Cl_PK74/psi_Cs_H_Cl_PK74
|
def psi_Cs_H_Cl_PK74(T, P):
"""c-c'-a: caesium hydrogen chloride [PK74]."""
psi = -0.019
valid = T == 298.15
return psi, valid
|
inary-1.0.1
|
inary-1.0.1//inary/util.pyfile:/inary/util.py:function:join_path/join_path
|
def join_path(a, *p):
"""Join two or more pathname components.
Python os.path.join cannot handle '/' at the start of latter components.
"""
path = a
for b in p:
b = b.lstrip('/')
if path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
return path
|
AccessControl-4.2
|
AccessControl-4.2//src/AccessControl/interfaces.pyclass:ISecurityPolicy/checkPermission
|
def checkPermission(permission, object, context):
"""Check whether the current user has a permission w.r.t. an object.
"""
|
Products.listen-0.7.1
|
Products.listen-0.7.1//Products/listen/interfaces/mail_message.pyclass:IMailFromString/createMailFromMessage
|
def createMailFromMessage(message_string, attachments=False):
"""
Automatically sets the properties of the message object
based on an input mail message. May optionally include
attachments.
"""
|
bpy
|
bpy//ops/clip.pyfile:/ops/clip.py:function:track_settings_as_default/track_settings_as_default
|
def track_settings_as_default():
"""Copy tracking settings from active track to default settings
"""
pass
|
recourse
|
recourse//cplex_helper.pyfile:/cplex_helper.py:function:toggle_cpx_preprocessing/toggle_cpx_preprocessing
|
def toggle_cpx_preprocessing(cpx, toggle=True):
"""toggles pre-processing on/off for debugging / computational experiments"""
if toggle:
cpx.parameters.preprocessing.aggregator.reset()
cpx.parameters.preprocessing.reduce.reset()
cpx.parameters.preprocessing.presolve.reset()
cpx.parameters.preprocessing.coeffreduce.reset()
cpx.parameters.preprocessing.boundstrength.reset()
else:
cpx.parameters.preprocessing.aggregator.set(0)
cpx.parameters.preprocessing.reduce.set(0)
cpx.parameters.preprocessing.presolve.set(0)
cpx.parameters.preprocessing.coeffreduce.set(0)
cpx.parameters.preprocessing.boundstrength.set(0)
return cpx
|
pyqode
|
pyqode//core/dialogs/encodings.pyclass:DlgEncodingsChoice/choose_encoding
|
@classmethod
def choose_encoding(cls, parent, path, encoding):
"""
Show the encodings dialog and returns the user choice.
:param parent: parent widget.
:param path: file path
:param encoding: current file encoding
:return: selected encoding
"""
dlg = cls(parent, path, encoding)
dlg.exec_()
return dlg.ui.comboBoxEncodings.current_encoding
|
pattern
|
pattern//helpers.pyfile:/helpers.py:function:encode_string/encode_string
|
def encode_string(v, encoding='utf-8'):
""" Returns the given value as a Python byte string (if possible).
"""
if isinstance(encoding, str):
encoding = ((encoding,),) + (('windows-1252',), ('utf-8', 'ignore'))
if isinstance(v, str):
for e in encoding:
try:
return v.encode(*e)
except:
pass
return v
return bytes(v)
|
bdsf
|
bdsf//output.pyfile:/output.py:function:ra2hhmmss/ra2hhmmss
|
def ra2hhmmss(deg):
"""Convert RA coordinate (in degrees) to HH MM SS"""
from math import modf
if deg < 0:
deg += 360.0
x, hh = modf(deg / 15.0)
x, mm = modf(x * 60)
ss = x * 60
return int(hh), int(mm), ss
|
python-icat-0.17.0
|
python-icat-0.17.0//icat/chunkedhttp.pyfile:/icat/chunkedhttp.py:function:stringiterator/stringiterator
|
def stringiterator(buffer):
"""Wrap a string in an iterator that yields it in one single chunk."""
if len(buffer) > 0:
yield buffer
|
testinfra
|
testinfra//modules/supervisor.pyclass:Supervisor/get_services
|
@classmethod
def get_services(cls):
"""Get a list of services running under supervisor
>>> host.supervisor.get_services()
[<Supervisor(name="gunicorn", status="RUNNING", pid=4232)>
<Supervisor(name="celery", status="FATAL", pid=None)>]
"""
services = []
for line in cls(None).check_output('supervisorctl status').splitlines():
attrs = cls._parse_status(line)
service = cls(attrs['name'], attrs)
services.append(service)
return services
|
tyssue-0.7.1
|
tyssue-0.7.1//tyssue/topology/sheet_topology.pyfile:/tyssue/topology/sheet_topology.py:function:face_division/face_division
|
def face_division(sheet, mother, vert_a, vert_b):
"""
Divides the face associated with edges
indexed by `edge_a` and `edge_b`, splitting it
in the middle of those edes.
"""
face_cols = sheet.face_df.loc[mother:mother]
sheet.face_df = sheet.face_df.append(face_cols, ignore_index=True)
sheet.face_df.index.name = 'face'
daughter = int(sheet.face_df.index[-1])
edge_cols = sheet.edge_df[sheet.edge_df['face'] == mother].iloc[0:1]
sheet.edge_df = sheet.edge_df.append(edge_cols, ignore_index=True)
new_edge_m = sheet.edge_df.index[-1]
sheet.edge_df.loc[new_edge_m, 'srce'] = vert_b
sheet.edge_df.loc[new_edge_m, 'trgt'] = vert_a
sheet.edge_df = sheet.edge_df.append(edge_cols, ignore_index=True)
new_edge_d = sheet.edge_df.index[-1]
sheet.edge_df.loc[new_edge_d, 'srce'] = vert_a
sheet.edge_df.loc[new_edge_d, 'trgt'] = vert_b
m_data = sheet.edge_df[sheet.edge_df['face'] == mother]
daughter_edges = [new_edge_d]
srce, trgt = vert_a, vert_b
srces, trgts = m_data[['srce', 'trgt']].values.T
while trgt != vert_a:
srce, trgt = trgt, trgts[srces == trgt][0]
daughter_edges.append(m_data[(m_data['srce'] == srce) & (m_data[
'trgt'] == trgt)].index[0])
sheet.edge_df.loc[daughter_edges, 'face'] = daughter
sheet.edge_df.index.name = 'edge'
sheet.reset_topo()
return daughter
|
PyKernelLogit
|
PyKernelLogit//bootstrap.pyfile:/bootstrap.py:function:ensure_replicates_kwarg_validity/ensure_replicates_kwarg_validity
|
def ensure_replicates_kwarg_validity(replicate_kwarg):
"""
Ensures `replicate_kwarg` is either 'bootstrap' or 'jackknife'. Raises a
helpful ValueError otherwise.
"""
if replicate_kwarg not in ['bootstrap', 'jackknife']:
msg = "`replicates` MUST be either 'bootstrap' or 'jackknife'."
raise ValueError(msg)
return None
|
velenxc-0.1.20171107163220
|
velenxc-0.1.20171107163220//velenxc/funclibs/m.pyfile:/velenxc/funclibs/m.py:function:m_de_weight_of_list/m_de_weight_of_list
|
def m_de_weight_of_list(seq):
"""
de-weight for list, for example [1, 1, 2] return [1, 2]
:param seq: list
:return: list
"""
return list(set(seq))
|
mailman-3.3.1
|
mailman-3.3.1//src/mailman/interfaces/pipeline.pyclass:IPipeline/__iter__
|
def __iter__():
"""Iterate over all the handlers in this pipeline."""
|
psf_utils-0.6.0
|
psf_utils-0.6.0//psf_utils/parse.pyfile:/psf_utils/parse.py:function:p_type_section/p_type_section
|
def p_type_section(p):
"""type_section : TYPE types"""
p[0] = dict(p[2])
|
nbsite-0.6.7
|
nbsite-0.6.7//nbsite/examples/sites/holoviews/holoviews/core/data/multipath.pyclass:MultiInterface/shape
|
@classmethod
def shape(cls, dataset):
"""
Returns the shape of all subpaths, making it appear like a
single array of concatenated subpaths separated by NaN values.
"""
if not dataset.data:
return 0, len(dataset.dimensions())
rows, cols = 0, 0
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
r, cols = ds.interface.shape(ds)
rows += r
return rows + len(dataset.data) - 1, cols
|
neutron_tempest_plugin
|
neutron_tempest_plugin//api/base.pyclass:BaseAdminNetworkTest/create_log
|
@classmethod
def create_log(cls, name, description=None, resource_type='security_group',
resource_id=None, target_id=None, event='ALL', enabled=True):
"""Wrapper utility that returns a test log object."""
log_args = {'name': name, 'description': description, 'resource_type':
resource_type, 'resource_id': resource_id, 'target_id': target_id,
'event': event, 'enabled': enabled}
body = cls.admin_client.create_log(**log_args)
log_object = body['log']
cls.log_objects.append(log_object)
return log_object
|
pya2l-0.0.1
|
pya2l-0.0.1//pya2l/parser/grammar/parser.pyclass:A2lParser/p_if_data_xcp_optional_list
|
@staticmethod
def p_if_data_xcp_optional_list(p):
"""if_data_xcp_optional_list : if_data_xcp_optional
| if_data_xcp_optional if_data_xcp_optional_list"""
try:
p[0] = [p[1]] + p[2]
except IndexError:
p[0] = [p[1]]
|
pkgcore-0.10.12
|
pkgcore-0.10.12//src/pkgcore/_vendor/tabulate.pyfile:/src/pkgcore/_vendor/tabulate.py:function:_build_simple_row/_build_simple_row
|
def _build_simple_row(padded_cells, rowfmt):
"""Format row according to DataRow format without padding."""
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
|
restkiss
|
restkiss//pyr.pyclass:PyramidResource/build_routename
|
@classmethod
def build_routename(cls, name, routename_prefix=None):
"""
Given a ``name`` & an optional ``routename_prefix``, this generates a
name for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param routename_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type routename_prefix: string
:returns: The final name
:rtype: string
"""
if routename_prefix is None:
routename_prefix = 'api_{0}'.format(cls.__name__.replace('Resource',
'').lower())
routename_prefix = routename_prefix.rstrip('_')
return '_'.join([routename_prefix, name])
|
python-pySAP-0.0.3
|
python-pySAP-0.0.3//pysap/extensions/formating.pyfile:/pysap/extensions/formating.py:function:get_hbr/get_hbr
|
def get_hbr(A):
""" Return the half-bottom-right of the given array.
"""
nx, _ = A.shape
li = int(nx / 2)
return A[li:, li:]
|
netpyne
|
netpyne//wrappers.pyfile:/wrappers.py:function:load/load
|
def load(filename, simConfig=None, output=False, instantiate=True,
createNEURONObj=True):
""" Sequence of commands load, simulate and analyse network """
from . import sim
sim.initialize()
sim.cfg.createNEURONObj = createNEURONObj
sim.loadAll(filename, instantiate=instantiate, createNEURONObj=
createNEURONObj)
if simConfig:
sim.setSimCfg(simConfig)
if len(sim.net.cells) == 0 and instantiate:
pops = sim.net.createPops()
cells = sim.net.createCells()
conns = sim.net.connectCells()
stims = sim.net.addStims()
simData = sim.setupRecording()
if output:
try:
return pops, cells, conns, stims, simData
except:
pass
|
stackifyapm
|
stackifyapm//utils/wrapt/wrappers.pyfile:/utils/wrapt/wrappers.py:function:with_metaclass/with_metaclass
|
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta('NewBase', bases, {})
|
pycouchdb
|
pycouchdb//utils.pyfile:/utils.py:function:_path_from_name/_path_from_name
|
def _path_from_name(name, type):
"""
Expand a 'design/foo' style name to its full path as a list of
segments.
>>> _path_from_name("_design/test", '_view')
['_design', 'test']
>>> _path_from_name("design/test", '_view')
['_design', 'design', '_view', 'test']
"""
if name.startswith('_'):
return name.split('/')
design, name = name.split('/', 1)
return ['_design', design, type, name]
|
thoraxe-0.5.1
|
thoraxe-0.5.1//thoraxe/subexons/subexons.pyfile:/thoraxe/subexons/subexons.py:function:_update_to_merge_list/_update_to_merge_list
|
def _update_to_merge_list(to_merge, subexon_1, subexon_2):
"""
Add subexon_1 and subexon_2 to the to_merge list.
>>> _update_to_merge_list([], 1, 2)
[{1, 2}]
>>> _update_to_merge_list([{1, 2}], 2, 3)
[{1, 2, 3}]
>>> _update_to_merge_list([{1, 2}], 8, 9)
[{1, 2}, {8, 9}]
"""
group = {subexon_1, subexon_2}
for existing_group in to_merge:
if existing_group.intersection(group):
existing_group.update(group)
return to_merge
to_merge.append(group)
return to_merge
|
weblate
|
weblate//formats/external.pyclass:XlsxFormat/mimetype
|
@staticmethod
def mimetype():
"""Return most common mime type for format."""
return 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
|
vertigo-0.1.3
|
vertigo-0.1.3//vertigo/zip_fns.pyfile:/vertigo/zip_fns.py:function:first/first
|
def first(graphs):
"""Iterate over the first graph's keys and no others."""
if graphs and graphs[0]:
for key in graphs[0].key_iter():
yield key
|
instapy
|
instapy//file_manager.pyfile:/file_manager.py:function:differ_paths/differ_paths
|
def differ_paths(old, new):
""" Compare old and new paths """
if old and old.endswith(('\\', '/')):
old = old[:-1]
old = old.replace('\\', '/')
if new and new.endswith(('\\', '/')):
new = new[:-1]
new = new.replace('\\', '/')
return new != old
|
borgmatic
|
borgmatic//commands/arguments.pyfile:/commands/arguments.py:function:parse_global_arguments/parse_global_arguments
|
def parse_global_arguments(unparsed_arguments, top_level_parser, subparsers):
"""
Given a sequence of arguments, a top-level parser (containing subparsers), and a subparsers
object as returned by argparse.ArgumentParser().add_subparsers(), parse and return any global
arguments as a parsed argparse.Namespace instance.
"""
remaining_arguments = list(unparsed_arguments)
present_subparser_names = set()
for subparser_name, subparser in subparsers.choices.items():
if subparser_name not in remaining_arguments:
continue
present_subparser_names.add(subparser_name)
unused_parsed, remaining_arguments = subparser.parse_known_args(
remaining_arguments)
if (not present_subparser_names and '--help' not in unparsed_arguments and
'-h' not in unparsed_arguments):
for subparser_name in ('prune', 'create', 'check'):
subparser = subparsers.choices[subparser_name]
unused_parsed, remaining_arguments = subparser.parse_known_args(
remaining_arguments)
for subparser_name in present_subparser_names:
if subparser_name in remaining_arguments:
remaining_arguments.remove(subparser_name)
return top_level_parser.parse_args(remaining_arguments)
|
yggdrasil
|
yggdrasil//tools.pyfile:/tools.py:function:eval_kwarg/eval_kwarg
|
def eval_kwarg(x):
"""If x is a string, eval it. Otherwise just return it.
Args:
x (str, obj): String to be evaluated as an object or an object.
Returns:
obj: Result of evaluated string or the input object.
"""
if isinstance(x, str):
try:
return eval(x)
except NameError:
return x
return x
|
profile-viewer-0.1.5
|
profile-viewer-0.1.5//versioneer.pyfile:/versioneer.py:function:render_pep440_pre/render_pep440_pre
|
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '.post.dev%d' % pieces['distance']
else:
rendered = '0.post.dev%d' % pieces['distance']
return rendered
|
pykerberos-1.2.1
|
pykerberos-1.2.1//pysrc/kerberos.pyfile:/pysrc/kerberos.py:function:authGSSClientClean/authGSSClientClean
|
def authGSSClientClean(context):
"""
Destroys the context for GSSAPI client-side authentication. This function is provided for API
compatibility with original pykerberos but does nothing. The context object destroys itself
when it is reclaimed.
@param context: the context object returned from authGSSClientInit.
@return: a result code (see above).
"""
|
lndmanage-0.10.0
|
lndmanage-0.10.0//lndmanage/lib/rebalance.pyclass:Rebalancer/_get_source_and_target_channels
|
@staticmethod
def _get_source_and_target_channels(channel_one, channel_two,
rebalance_direction):
"""
Determines what the sending and receiving channel ids are.
:param channel_one: first channel
:type channel_one: int
:param channel_two: second channel
:type channel_two: int
:param rebalance_direction: positive, if receiving, negative if sending
:type rebalance_direction: float
:return: sending and receiving channel
:rtype: (int, int)
"""
if rebalance_direction < 0:
source = channel_one
target = channel_two
else:
source = channel_two
target = channel_one
return source, target
|
beatle-0.2.4
|
beatle-0.2.4//beatle/ctx/_context.pyclass:context/RenderUndoRedoRemoving
|
@classmethod
def RenderUndoRedoRemoving(cls, obj):
"""render undo/redo"""
raise 'not implemented'
|
pypowervm-1.1.24
|
pypowervm-1.1.24//pypowervm/wrappers/entry_wrapper.pyclass:ElementWrapper/wrap
|
@classmethod
def wrap(cls, element, **kwargs):
"""Wrap an existing adapter.Element OR construct a fresh one.
This method should usually be invoked from an ElementWrapper subclass
decorated by Wrapper.pvm_type, and an instance of that subclass will be
returned.
If invoked directly from ElementWrapper, we attempt to detect whether
an appropriate subclass exists based on the Element's tag. If so, that
subclass is used; otherwise a generic ElementWrapper is used.
:param element: An existing adapter.Element to wrap.
:param **kwargs: Arbitrary attributes to set on the new ElementWrapper.
:returns: An ElementWrapper (subclass) instance containing the element.
"""
wcls = cls._class_for_element(element) if cls.schema_type is None else cls
wrap = wcls()
wrap.element = element
for key, val in kwargs.items():
setattr(wrap, key, val)
return wrap
|
fractals
|
fractals//main.pyclass:Figures/point_to_complex
|
@staticmethod
def point_to_complex(point):
"""Transform tuple to complex
:param point: Point to convert
:type point: tuple
:return: Complex representation of point
:rtype: complex"""
return complex(point[0], point[1])
|
nanomongo-0.4.1
|
nanomongo-0.4.1//nanomongo/field.pyclass:Field/check_kwargs
|
@classmethod
def check_kwargs(cls, kwargs, data_type):
"""Check keyword arguments & their values given to ``Field``
constructor such as ``default``, ``required`` ...
"""
err_str = (data_type.__name__ +
': %s keyword argument not allowed or "%s" value invalid')
for k, v in kwargs.items():
if k in cls.allowed_kwargs:
if not cls.allowed_kwargs[k](v):
raise TypeError(err_str % (k, v))
elif data_type in cls.extra_kwargs and k in cls.extra_kwargs[data_type
]:
if not cls.extra_kwargs[data_type][k](v):
raise TypeError(err_str % (k, v))
else:
raise TypeError(err_str % (k, v))
|
payflow
|
payflow//items.pyclass:BankItem/from_data
|
@classmethod
def from_data(cls, data):
"""
Constructor que permite obtener una instancia de BankItem a partir de un
diccionario de datos.
"""
return cls(data.get('bank_id'), data.get('name'), data.get('message'),
data.get('min_amount'), data.get('type'), data.get('parent'))
|
octomachinery-0.2.1
|
octomachinery-0.2.1//octomachinery/github/models/checks_api_requests.pyfile:/octomachinery/github/models/checks_api_requests.py:function:optional_converter/optional_converter
|
def optional_converter(kwargs_dict, convert_to_cls):
"""Instantiate a class instances from dict."""
if kwargs_dict is not None and not isinstance(kwargs_dict, convert_to_cls):
return convert_to_cls(**kwargs_dict)
return kwargs_dict
|
bpy
|
bpy//ops/wm.pyfile:/ops/wm.py:function:window_close/window_close
|
def window_close():
"""Close the current window
"""
pass
|
s4d-0.1.4.6
|
s4d-0.1.4.6//s4d/diar.pyclass:Diar/to_string_seg
|
@classmethod
def to_string_seg(cls, diar):
"""
transform a diarization into a string
:param diar: a diarization
:return: a string
"""
lst = []
for segment in diar:
gender = 'U'
if diar._attributes.exist('gender'):
gender = segment['gender']
env = 'U'
if diar._attributes.exist('env'):
env = segment['env']
channel = 'U'
if diar._attributes.exist('channel'):
channel = segment['channel']
lst.append('{:s} 1 {:d} {:d} {:s} {:s} {:s} {:s}\n'.format(segment[
'show'], segment['start'], segment['stop'] - segment['start'],
gender, channel, env, segment['cluster']))
return lst
|
agraph-python-101.0.3
|
agraph-python-101.0.3//src/franz/openrdf/sail/spec.pyfile:/src/franz/openrdf/sail/spec.py:function:reason/reason
|
def reason(store, reasoner='rdfs++'):
"""
Create a session spec that adds reasoning support to another session.
:param store: Base session spec.
:type store: string
:param reasoner: Reasoning type (e.g. `"rdfs++"`or `"restriction"`).
:type reasoner: string
:return: A session spec string.
:rtype: string
"""
return '%s[%s]' % (store, reasoner)
|
txkube
|
txkube//_interface.pyclass:IKubernetesClient/delete
|
def delete(obj):
"""
Delete a single object.
:param IObject obj: A description of which object to delete. The *kind*,
*namespace*, and *name* address the specific object to delete.
:return Deferred(None): The Deferred fires when the object has been
deleted.
"""
|
CrossMap-0.4.2
|
CrossMap-0.4.2//lib/cmmodule/BED.pyfile:/lib/cmmodule/BED.py:function:tillingBed/tillingBed
|
def tillingBed(chrName, chrSize, stepSize=10000):
"""tilling whome genome into small sizes"""
for start in range(0, chrSize, stepSize):
end = start + stepSize
if end < chrSize:
yield chrName, start, end
else:
yield chrName, start, chrSize
|
redscale-0.0.1.dev4
|
redscale-0.0.1.dev4//redscale/app.pyclass:RedScaleApp/extend_parser
|
@staticmethod
def extend_parser(parser):
"""Extend specific app parser with these general argument(s)"""
parser.add_argument('--cloud', '-c', dest='cloud', required=True, help=
'the name of the cloud')
|
clefairy-0.105
|
clefairy-0.105//pokemon/convert.pyfile:/pokemon/convert.py:function:scale_image/scale_image
|
def scale_image(image, new_width):
"""Resizes an image preserving the aspect ratio.
"""
original_width, original_height = image.size
aspect_ratio = original_height / float(original_width)
new_height = int(aspect_ratio * new_width)
new_image = image.resize((new_width * 2, new_height))
return new_image
|
robottelo
|
robottelo//cli/scap_tailoring_files.pyclass:TailoringFiles/download_tailoring_file
|
@classmethod
def download_tailoring_file(cls, options):
"""Downloads the tailoring file from satellite"""
cls.command_sub = 'download'
return cls.execute(cls._construct_command(options), output_format='table')
|
sympy
|
sympy//core/logic.pyfile:/core/logic.py:function:_fuzzy_group/_fuzzy_group
|
def _fuzzy_group(args, quick_exit=False):
"""Return True if all args are True, None if there is any None else False
unless ``quick_exit`` is True (then return None as soon as a second False
is seen.
``_fuzzy_group`` is like ``fuzzy_and`` except that it is more
conservative in returning a False, waiting to make sure that all
arguments are True or False and returning None if any arguments are
None. It also has the capability of permiting only a single False and
returning None if more than one is seen. For example, the presence of a
single transcendental amongst rationals would indicate that the group is
no longer rational; but a second transcendental in the group would make the
determination impossible.
Examples
========
>>> from sympy.core.logic import _fuzzy_group
By default, multiple Falses mean the group is broken:
>>> _fuzzy_group([False, False, True])
False
If multiple Falses mean the group status is unknown then set
`quick_exit` to True so None can be returned when the 2nd False is seen:
>>> _fuzzy_group([False, False, True], quick_exit=True)
But if only a single False is seen then the group is known to
be broken:
>>> _fuzzy_group([False, True, True], quick_exit=True)
False
"""
saw_other = False
for a in args:
if a is True:
continue
if a is None:
return
if quick_exit and saw_other:
return
saw_other = True
return not saw_other
|
openplc_editor
|
openplc_editor//plcopen/structures.pyfile:/plcopen/structures.py:function:find_section/find_section
|
def find_section(section_name, table):
"""
seek into the csv table to a section ( section_name match 1st field )
return the matching row without first field
"""
fields = [None]
while fields[0] != section_name:
fields = table.pop(0)
return fields[1:]
|
mftoolbox-4.1.8
|
mftoolbox-4.1.8//mftoolbox/funcs.pyfile:/mftoolbox/funcs.py:function:clean_text/clean_text
|
def clean_text(str_text):
"""
Remove de uma string espaços em branco nas pontas e '
'
:param str_text: texto a ser formatado
:return: texto formatado (se o parâmetro de origem era uma string), o próprio valor passado como parâmetro (se o parâmetro não for uma string) ou uma string vazia ('') se o parâmetro passado for nulo
"""
if str_text is None:
return ''
try:
text_to_return = str_text.replace('\n', '').strip()
except:
text_to_return = str_text
return text_to_return
|
sqla_inspect
|
sqla_inspect//.ropeproject/config.pyfile:/.ropeproject/config.py:function:project_opened/project_opened
|
def project_opened(project):
"""This function is called after opening the project"""
|
Dozer-0.7
|
Dozer-0.7//dozer/profile.pyfile:/dozer/profile.py:function:label/label
|
def label(code):
"""Generate a friendlier version of the code function called"""
if isinstance(code, str):
return code
else:
return '%s %s:%d' % (code.co_name, code.co_filename, code.
co_firstlineno)
|
widgetastic.core-0.51
|
widgetastic.core-0.51//src/widgetastic/utils.pyclass:Fillable/coerce
|
@classmethod
def coerce(cls, o):
"""This method serves as a processor for filling values.
When you are filling values inside widgets and views, I bet you will quickly realize that
filling basic values like strings or numbers is not enough. This method allows a potential
fillable implement :py:meth:`as_fill_value` to return a basic value that represents the
object in the UI
Args:
o: Object to be filled in the :py:class:`widgetastic.widget.View` or
:py:class:`widgetastic.widget.Widget`
Returns:
Whatever is supposed to be filled in the widget.
"""
if isinstance(o, cls):
return o.as_fill_value()
else:
return o
|
easybuild-framework-4.2.0
|
easybuild-framework-4.2.0//easybuild/base/optcomplete.pyfile:/easybuild/base/optcomplete.py:function:set_optionparser/set_optionparser
|
def set_optionparser(option_class, optionparser_class):
"""Set the default Option and OptionParser class"""
global OPTION_CLASS
global OPTIONPARSER_CLASS
OPTION_CLASS = option_class
OPTIONPARSER_CLASS = optionparser_class
|
clicolor
|
clicolor//cli.pyclass:CLI256/bg
|
@staticmethod
def bg(value: int) ->str:
"""
Returns the background escape code for a 256 color id.
See https://jonasjacek.github.io/colors/ for full list.
"""
return f'\x1b[48;5;{value}m'
|
PyChromecast-5.0.0
|
PyChromecast-5.0.0//pychromecast/discovery.pyfile:/pychromecast/discovery.py:function:stop_discovery/stop_discovery
|
def stop_discovery(browser):
"""Stop the chromecast discovery thread."""
browser.zc.close()
|
packetary
|
packetary//objects/package_version.pyclass:PackageVersion/_order
|
@classmethod
def _order(cls, x):
"""Return an integer value for character x"""
if x.isdigit():
return int(x) + 1
if x.isalpha():
return ord(x)
return ord(x) + 256
|
fake-bpy-module-2.80-20200428
|
fake-bpy-module-2.80-20200428//bpy/path.pyfile:/bpy/path.py:function:native_pathsep/native_pathsep
|
def native_pathsep(path):
"""Replace the path separator with the systems native os.sep.
"""
pass
|
Mathics-1.0
|
Mathics-1.0//mathics/builtin/pympler/asizeof.pyfile:/mathics/builtin/pympler/asizeof.py:function:_p100/_p100
|
def _p100(part, total, prec=1):
"""Return percentage as string.
"""
r = float(total)
if r:
r = part * 100.0 / r
return '%.*f%%' % (prec, r)
return 'n/a'
|
olga-1.2.0
|
olga-1.2.0//olga/load_model.pyfile:/olga/load_model.py:function:read_igor_D_gene_parameters/read_igor_D_gene_parameters
|
def read_igor_D_gene_parameters(params_file_name):
"""Load genD from file.
genD is a list of genomic D information. Each element is a list of the name
of the D allele and the germline sequence.
Parameters
----------
params_file_name : str
File name for a IGOR parameter file.
Returns
-------
genD : list
List of genomic D information.
"""
params_file = open(params_file_name, 'r')
D_gene_info = {}
in_D_gene_sec = False
for line in params_file:
if line.startswith('#GeneChoice;D_gene;'):
in_D_gene_sec = True
elif in_D_gene_sec:
if line[0] == '%':
split_line = line[1:].split(';')
D_gene_info[split_line[0]] = [split_line[1], int(split_line[2])
]
else:
break
params_file.close()
genD = [[]] * len(D_gene_info.keys())
for D_gene in D_gene_info.keys():
genD[D_gene_info[D_gene][1]] = [D_gene, D_gene_info[D_gene][0]]
return genD
|
listools-2.3.3
|
listools-2.3.3//listools/llogic/intersection.pyfile:/listools/llogic/intersection.py:function:intersection/intersection
|
def intersection(list_1: list, list_2: list) ->list:
"""llogic.intersection(list_1, list_2)
Returns the intersection of two lists (omitting repetitions). The order
of the elements of the output depends on the order they are found in the
first list. Usage:
>>> alist = [1, 2, 3, 4, 5]
>>> blist = [7, 6, 5, 4, 3]
>>> llogic.intersection(alist, blist)
[3, 4, 5]
>>> alist = [1, 2, 3, 3, 4, 4, 5, 5, 5]
>>> blist = [3, 3, 4, 5, 5, 6]
>>> llogic.intersection(alist, blist)
[3, 4, 5]
Note that llogic.intersection does not flatten the lists so nested lists
are of type list:
>>> alist = [3, 4, [1, [5, 2]]]
>>> blist = [1, 2, 3, 4, 5]
>>> llogic.intersection(alist, blist)
[3, 4]
The lists can contain any datatype:
>>> alist = [1, 2.3, 'foo', (3, 7)]
>>> blist = ['foo', 7+3j, (3, 7)]
>>> llogic.intersection(alist, blist)
['foo', (3, 7)]
If either list is empty then the result is an empty list:
>>> alist = [1, 2, 3, 4, 5]
>>> blist = []
>>> llogic.intersection(alist, blist)
[]
"""
if not isinstance(list_1, list):
raise TypeError("'list_1' must be 'list'")
if not isinstance(list_2, list):
raise TypeError("'list_2' must be 'list'")
output_list = []
for item in list_1:
if item in list_2 and item not in output_list:
output_list.append(item)
return output_list
|
sweetpear
|
sweetpear//map.pyfile:/map.py:function:__access_member_by_string/__access_member_by_string
|
def __access_member_by_string(x, member):
"""
Return the result of x.#member or x.#member()
where member is the name of the function/variable.
:param x: Element to operate on
:param member: Function or value member
:return:
"""
action = getattr(x, member)
if callable(action):
return action()
else:
return action
|
twisted
|
twisted//internet/interfaces.pyclass:IFileDescriptorReceiver/fileDescriptorReceived
|
def fileDescriptorReceived(descriptor):
"""
Called when a file descriptor is received over the connection.
@param descriptor: The descriptor which was received.
@type descriptor: C{int}
@return: L{None}
"""
|
openstack-cyborg-3.0.0
|
openstack-cyborg-3.0.0//cyborg/objects/control_path.pyclass:ControlpathID/get
|
@classmethod
def get(cls, context, uuid):
"""Find a DB ControlpathID and return an Obj ControlpathID."""
db_cp = cls.dbapi.control_path_get_by_uuid(context, uuid)
obj_cp = cls._from_db_object(cls(context), db_cp)
return obj_cp
|
python-msp430-tools-0.9.2
|
python-msp430-tools-0.9.2//msp430/asm/cpp.pyfile:/msp430/asm/cpp.py:function:line_joiner/line_joiner
|
def line_joiner(line_iterator):
""" Given a readline function, return lines, but handle line continuations
('\\
'). When lines are joined, the same number of blank lines is output
so that the line counter for the consumer stays correct.
"""
while True:
joined_line = '\\\n'
joined_lines = 0
while joined_line[-2:] == '\\\n':
joined_line = joined_line[:-2]
line = next(line_iterator)
if not line:
break
joined_line += line.rstrip() + '\n'
joined_lines += 1
while joined_lines > 1:
yield '\n'
joined_lines -= 1
if not joined_line:
break
yield joined_line
|
PIL
|
PIL//ImageChops.pyfile:/ImageChops.py:function:subtract/subtract
|
def subtract(image1, image2, scale=1.0, offset=0):
"""
Subtracts two images, dividing the result by scale and adding the offset.
If omitted, scale defaults to 1.0, and offset to 0.0. At least one of the
images must have mode "1".
.. code-block:: python
out = ((image1 - image2) / scale + offset)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract(image2.im, scale, offset))
|
entrance-1.1.12
|
entrance-1.1.12//entrance/feature/cfg_base.pyclass:ConfiguredFeature/all
|
@classmethod
def all(cls):
"""
List all subclasses
"""
return cls.__subclasses__()
|
pysorter
|
pysorter//filesystem.pyfile:/filesystem.py:function:is_file/is_file
|
def is_file(path):
"""
tests whether a canonical path refers to a file or a directory
"""
return not path.endswith('/')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.