repo
stringlengths
1
29
path
stringlengths
24
332
code
stringlengths
39
579k
oslo.middleware-4.0.2
oslo.middleware-4.0.2//oslo_middleware/base.pyclass:ConfigurableMiddleware/process_response
@staticmethod def process_response(response, request=None): """Do whatever you'd like to the response.""" return response
wx
wx//lib/ogl/oglmisc.pyfile:/lib/ogl/oglmisc.py:function:CheckLineIntersection/CheckLineIntersection
def CheckLineIntersection(x1, y1, x2, y2, x3, y3, x4, y4): """ Check for line intersection :param `x1`: x1 position :param `y1`: y1 position :param `x2`: x2 position :param `y2`: y2 position :param `x3`: x3 position :param `y3`: y3 position :param `x4`: x4 position :param `y4`: y4 position :returns: a lenght ratio and a k line??? """ denominator_term = (y4 - y3) * (x2 - x1) - (y2 - y1) * (x4 - x3) numerator_term = (x3 - x1) * (y4 - y3) + (x4 - x3) * (y1 - y3) length_ratio = 1.0 k_line = 1.0 if denominator_term < 0.005 and denominator_term > -0.005: line_constant = -1.0 else: line_constant = float(numerator_term) / denominator_term if line_constant < 1.0 and line_constant > 0.0: if y4 - y3 < 0.005 and y4 - y3 > -0.005: k_line = (x1 - x3 + line_constant * (x2 - x1)) / (x4 - x3) else: k_line = (y1 - y3 + line_constant * (y2 - y1)) / (y4 - y3) if k_line >= 0 and k_line < 1: length_ratio = line_constant else: k_line = 1 return length_ratio, k_line
octavvs
octavvs//mcr_als.pyclass:MyMainWindow/program_name
@classmethod def program_name(cls): """Return the name of the program that this main window represents""" return 'MCR-ALS'
altuscli-1.6.5
altuscli-1.6.5//altuscli/thirdparty/requests/hooks.pyfile:/altuscli/thirdparty/requests/hooks.py:function:dispatch_hook/dispatch_hook
def dispatch_hook(key, hooks, hook_data, **kwargs): """Dispatches a hook dictionary on a given piece of data.""" hooks = hooks or dict() if key in hooks: hooks = hooks.get(key) if hasattr(hooks, '__call__'): hooks = [hooks] for hook in hooks: _hook_data = hook(hook_data, **kwargs) if _hook_data is not None: hook_data = _hook_data return hook_data
pytzer
pytzer//parameters.pyfile:/parameters.py:function:psi_Na_Sr_Cl_MWRB78/psi_Na_Sr_Cl_MWRB78
def psi_Na_Sr_Cl_MWRB78(T, P): """c-c'-a: sodium strontium chloride [MWRB78].""" psi = -0.0052 valid = T == 298.15 return psi, valid
fcos-0.1.9
fcos-0.1.9//fcos_core/modeling/make_layers.pyfile:/fcos_core/modeling/make_layers.py:function:get_group_gn/get_group_gn
def get_group_gn(dim, dim_per_gp, num_groups): """get number of groups used by GroupNorm, based on number of channels.""" assert dim_per_gp == -1 or num_groups == -1, 'GroupNorm: can only specify G or C/G.' if dim_per_gp > 0: assert dim % dim_per_gp == 0, 'dim: {}, dim_per_gp: {}'.format(dim, dim_per_gp) group_gn = dim // dim_per_gp else: assert dim % num_groups == 0, 'dim: {}, num_groups: {}'.format(dim, num_groups) group_gn = num_groups return group_gn
nbsite
nbsite//examples/sites/holoviews/holoviews/ipython/preprocessors.pyfile:/examples/sites/holoviews/holoviews/ipython/preprocessors.py:function:replace_line_magic/replace_line_magic
def replace_line_magic(source, magic, template='{line}'): """ Given a cell's source, replace line magics using a formatting template, where {line} is the string that follows the magic. """ filtered = [] for line in source.splitlines(): if line.strip().startswith(magic): substitution = template.format(line=line.replace(magic, '')) filtered.append(substitution) else: filtered.append(line) return '\n'.join(filtered)
autorank
autorank//autorank.pyfile:/autorank.py:function:latex_table/latex_table
def latex_table(result, *, decimal_places=3, label=None): """ Creates a latex table from the results dataframe of the statistical analysis. # Parameters result (RankResult): Should be the return value the autorank function. decimal_places (int, default=3): Number of decimal places that are used for the report. label (str, default=None): Label of the table. Defaults to 'tbl:stat_results' if None. """ if label is None: label = 'tbl:stat_results' table_df = result.rankdf columns = table_df.columns.to_list() if result.pvalue >= result.alpha: columns.remove('effect_size') columns.remove('magnitude') if result.posthoc == 'tukeyhsd': columns.remove('meanrank') columns.insert(columns.index('ci_lower'), 'CI') columns.remove('ci_lower') columns.remove('ci_upper') rename_map = {} if result.all_normal: rename_map['effect_size'] = '$d$' else: rename_map['effect_size'] = 'D-E-L-T-A' rename_map['magnitude'] = 'Magnitude' rename_map['mad'] = 'MAD' rename_map['median'] = 'MED' rename_map['meanrank'] = 'MR' rename_map['mean'] = 'M' rename_map['std'] = 'SD' format_string = '[{0[ci_lower]:.' + str(decimal_places ) + 'f}, {0[ci_upper]:.' + str(decimal_places) + 'f}]' table_df['CI'] = table_df.agg(format_string.format, axis=1) table_df = table_df[columns] table_df = table_df.rename(rename_map, axis='columns') float_format = '{:0.' + str(decimal_places) + 'f}' table_string = table_df.to_latex(float_format=float_format.format).strip() table_string = table_string.replace('D-E-L-T-A', '$\\delta$') print('\\begin{table}[h]') print('\\centering') print(table_string) print('\\caption{Summary of populations}') print('\\label{%s}' % label) print('\\end{table}')
template_utils-0.4p2
template_utils-0.4p2//template_utils/markup.pyfile:/template_utils/markup.py:function:textile/textile
def textile(text, **kwargs): """ Applies Textile conversion to a string, and returns the HTML. This is simply a pass-through to the ``textile`` template filter included in ``django.contrib.markup``, which works around issues PyTextile has with Unicode strings. If you're not using Django but want to use Textile with ``MarkupFormatter``, you'll need to supply your own Textile filter. """ from django.contrib.markup.templatetags.markup import textile return textile(text)
fake-blender-api-2.79-0.3.1
fake-blender-api-2.79-0.3.1//bpy/ops/node.pyfile:/bpy/ops/node.py:function:properties/properties
def properties(): """Toggle the properties region visibility """ pass
easybuild-framework-4.2.0
easybuild-framework-4.2.0//easybuild/base/fancylogger.pyfile:/easybuild/base/fancylogger.py:function:setLogFormat/setLogFormat
def setLogFormat(f_format): """Set the log format. (Has to be set before logToSomething is called).""" global FANCYLOG_LOGGING_FORMAT FANCYLOG_LOGGING_FORMAT = f_format
Products
Products//ZopeTree/IZopeTree.pyclass:INode/getId
def getId(): """ Return the object's id in the tree. """
skelevision-0.1.5
skelevision-0.1.5//skelevision/objects.pyclass:TraceLog/activity_2_freq
@staticmethod def activity_2_freq(trace): """For a given trace, return a mapping from activity to frequency in trace. Parameters ---------- trace: `tuple` of `str` a trace as a tuple of activities Returns ------- `dict` mapping from activity to frequency in trace """ d = {} for a in trace: if a not in d: d[a] = 0 d[a] += 1 return d
sanzang-utils-1.3.3
sanzang-utils-1.3.3//szu_ed.pyfile:/szu_ed.py:function:table_to_str/table_to_str
def table_to_str(tab): """ Produce a formatted string for a translation table. Given a table stored as a dictionary, sort the contents and return the table as text in the translation table format for storage. """ table_str = '' items = list(tab.items()) items.sort(key=lambda x: (-len(x[0]), x[0])) for i in items: table_str += i[0] + '|' + '|'.join(i[1]) + '\n' return table_str
go_api
go_api//collections/interfaces.pyclass:ICollection/all_keys
def all_keys(): """ Return an iterable over all keys in the collection. May return a deferred instead of the iterable. """
PseudoNetCDF-3.1.0
PseudoNetCDF-3.1.0//src/PseudoNetCDF/core/_files.pyclass:PseudoNetCDFFile/isMine
@classmethod def isMine(cls, *args, **kwds): """ True if this file or object can be identified for use by this class. Useful to override for classes that can be initialized from disk. """ return False
pluggdapps-0.43dev
pluggdapps-0.43dev//pluggdapps/web/interfaces.pyclass:IHTTPCookie/decode_signed_value
def decode_signed_value(name, value): """Reverse of `create_signed_value`. Returns orignal value string."""
eric6
eric6//ThirdParty/Pygments/pygments/lexer.pyfile:/ThirdParty/Pygments/pygments/lexer.py:function:do_insertions/do_insertions
def do_insertions(insertions, tokens): """ Helper for lexers which must combine the results of several sublexers. ``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument. The result is a combined token stream. TODO: clean up the code here. """ insertions = iter(insertions) try: index, itokens = next(insertions) except StopIteration: for item in tokens: yield item return realpos = None insleft = True for i, t, v in tokens: if realpos is None: realpos = i oldi = 0 while insleft and i + len(v) >= index: tmpval = v[oldi:index - i] yield realpos, t, tmpval realpos += len(tmpval) for it_index, it_token, it_value in itokens: yield realpos, it_token, it_value realpos += len(it_value) oldi = index - i try: index, itokens = next(insertions) except StopIteration: insleft = False break yield realpos, t, v[oldi:] realpos += len(v) - oldi while insleft: realpos = realpos or 0 for p, t, v in itokens: yield realpos, t, v realpos += len(v) try: index, itokens = next(insertions) except StopIteration: insleft = False break
crtomo_tools-0.2.1
crtomo_tools-0.2.1//lib/crtomo/cfg.pyclass:crtomo_config/help
def help(key): """Return the help text specific to a certain key """ help_dict = {} return_text = help_dict.get(key, 'no help available') return return_text
dynetx
dynetx//classes/function.pyfile:/classes/function.py:function:create_empty_copy/create_empty_copy
def create_empty_copy(G, with_data=True): """Return a copy of the graph G with all of the edges removed. Parameters ---------- G : graph A DyNetx graph with_data : bool (default=True) Include data. Notes ----- Graph and edge data is not propagated to the new graph. """ H = G.__class__() H.add_nodes_from(G.nodes(data=with_data)) if with_data: H.graph.update(G.graph) return H
olcnastools-1.1.10
olcnastools-1.1.10//nastools/nastools.pyfile:/nastools/nastools.py:function:parse_seqid_file/parse_seqid_file
def parse_seqid_file(seqfile): """ Read in a file of SEQ IDs, and return the list of IDs :param seqfile: Files containing a column of SEQ IDs :return: list of SEQ IDs to process """ seqids = list() with open(seqfile) as f: for line in f: line = line.rstrip() if line: seqids.append(line) return seqids
fantastico-0.7.1
fantastico-0.7.1//virtual_env/libs/mysql-connector/python3/mysql/connector/constants.pyclass:CharacterSet/get_desc
@classmethod def get_desc(cls, setid): """Retrieves character set information as string using an ID Retrieves character set and collation information based on the given MySQL ID. Returns a tuple. """ try: return '%s/%s' % cls.get_info(setid) except: raise
didyoumean
didyoumean//didyoumean_internal.pyfile:/didyoumean_internal.py:function:add_string_to_exception/add_string_to_exception
def add_string_to_exception(value, string): """Add string to the exception parameter.""" assert type(value.args) == tuple if string: lst_args = list(value.args) for i, arg in enumerate(lst_args): if isinstance(arg, str): lst_args[i] = arg + string break else: lst_args.append(string) value.args = tuple(lst_args) for attr in ['msg', 'strerror', 'reason']: attrval = getattr(value, attr, None) if attrval is not None: setattr(value, attr, attrval + string)
fake-bpy-module-2.79-20200428
fake-bpy-module-2.79-20200428//bpy/ops/image.pyfile:/bpy/ops/image.py:function:sample/sample
def sample(): """Use mouse to sample a color in current image """ pass
silva.core.services-3.0.1
silva.core.services-3.0.1//src/silva/core/services/interfaces.pyclass:IContainerPolicyService/get_policy
def get_policy(name): """Return the named policy. """
dropbox
dropbox//team_log.pyclass:EventType/sign_in_as_session_start
@classmethod def sign_in_as_session_start(cls, val): """ Create an instance of this class set to the ``sign_in_as_session_start`` tag with value ``val``. :param SignInAsSessionStartType val: :rtype: EventType """ return cls('sign_in_as_session_start', val)
MiModD
MiModD//convert.pyfile:/convert.py:function:fastqReader/fastqReader
def fastqReader(inputFile): """A fast and robust fastq parser. Deals with multiline sequences and quality scores. Allows arbitrary numbers of empty lines anywhere in the file. Performs (only) the following file format checks while parsing: - each title line MUST start with @ symbol - each record MUST have a sequence - sequence and quality score of each record MUST be of equal length No alphabet checks are done on sequences or quality scores.""" title_token = ord('@') sep_token = ord('+') while True: try: title = next(inputFile) except StopIteration: return if not title[0] == title_token: if not title.rstrip(): continue raise AssertionError( 'Invalid file format: Title line not starting with @') title = title.rstrip() line_tmp = [] try: while True: currentLine = next(inputFile) if currentLine[0] == sep_token: break line_tmp.append(currentLine.rstrip()) seq = b''.join(line_tmp) seqlen = len(seq) if seqlen == 0: raise AssertionError( 'Invalid file format: Record without sequence') quallen = 0 line_tmp = [] while seqlen > quallen: currentLine = next(inputFile).rstrip() line_tmp.append(currentLine) quallen += len(currentLine) except StopIteration: raise AssertionError( 'Invalid file format: Truncated record at end of file') if seqlen < quallen: raise AssertionError( 'Invalid file format: Inconsistent lengths of sequence and quality score' ) qual = b''.join(line_tmp) yield title, seq, qual
py2sfn_task_tools
py2sfn_task_tools//state_data_client.pyfile:/state_data_client.py:function:_giveup_client_error/_giveup_client_error
def _giveup_client_error(exc: Exception) ->bool: """Handler for the backoff decorator to stop retrying boto3 client errors. Args: exc: Exception raised by a function decorated with ``backoff.on_exception`` Returns: True if the status code is a client error (HTTP 4xx), False otherwise """ try: return 400 <= exc.response['ResponseMetadata']['HTTPStatusCode'] < 500 except (AttributeError, KeyError): return False
vmn-scikit-image-0.17.dev0
vmn-scikit-image-0.17.dev0//skimage/util/apply_parallel.pyfile:/skimage/util/apply_parallel.py:function:_get_chunks/_get_chunks
def _get_chunks(shape, ncpu): """Split the array into equal sized chunks based on the number of available processors. The last chunk in each dimension absorbs the remainder array elements if the number of CPUs does not divide evenly into the number of array elements. Examples -------- >>> _get_chunks((4, 4), 4) ((2, 2), (2, 2)) >>> _get_chunks((4, 4), 2) ((2, 2), (4,)) >>> _get_chunks((5, 5), 2) ((2, 3), (5,)) >>> _get_chunks((2, 4), 2) ((1, 1), (4,)) """ from math import ceil chunks = [] nchunks_per_dim = int(ceil(ncpu ** (1.0 / len(shape)))) used_chunks = 1 for i in shape: if used_chunks < ncpu: regular_chunk = i // nchunks_per_dim remainder_chunk = regular_chunk + i % nchunks_per_dim if regular_chunk == 0: chunk_lens = remainder_chunk, else: chunk_lens = (regular_chunk,) * (nchunks_per_dim - 1) + ( remainder_chunk,) else: chunk_lens = i, chunks.append(chunk_lens) used_chunks *= nchunks_per_dim return tuple(chunks)
PyPDT-0.7.5
PyPDT-0.7.5//pypdt/pid.pyfile:/pypdt/pid.py:function:isBSMBoson/isBSMBoson
def isBSMBoson(pid): """Is this a valid BSM boson (SUSY Higgs, W', Z')?""" return abs(pid) >= 32 and abs(pid) <= 37
monasca_transform
monasca_transform//component/insert/prepare_data.pyclass:PrepareData/insert
@staticmethod def insert(transform_context, instance_usage_df): """write instance usage data to kafka""" return instance_usage_df
pagebot
pagebot//toolbox/transformer.pyfile:/toolbox/transformer.py:function:asFloatOrNone/asFloatOrNone
def asFloatOrNone(value): """Answers a float if it can be converted. Answer None otherwise. >>> asFloatOrNone(123) 123.0 >>> asFloatOrNone('123') 123.0 >>> asFloatOrNone('123a') is None True """ try: return float(value) except (ValueError, TypeError): return None
mrjob
mrjob//cat.pyfile:/cat.py:function:to_chunks/to_chunks
def to_chunks(readable, bufsize=1024): """Convert *readable*, which is any object supporting ``read()`` (e.g. fileobjs) to a stream of non-empty ``bytes``. If *readable* has an ``__iter__`` method but not a ``read`` method, pass through as-is. """ if hasattr(readable, '__iter__') and not hasattr(readable, 'read'): for chunk in readable: yield chunk return while True: chunk = readable.read(bufsize) if chunk: yield chunk else: return
red_star
red_star//rs_utils.pyfile:/rs_utils.py:function:ordinal/ordinal
def ordinal(n): """ Black magic that turns numbers into ordinal representation (1 -> 1st) :param n: number to be converted :return: string with ordinal number """ return '%d%s' % (n, 'tsnrhtdd'[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
melusine
melusine//prepare_email/metadata_engineering.pyclass:MetaDate/get_hour
@staticmethod def get_hour(row): """Get hour from date""" x = row['date'] try: return x.hour except Exception as e: return 0
CppHeaderParser
CppHeaderParser//doxygen.pyfile:/doxygen.py:function:extract_doxygen_method_params/extract_doxygen_method_params
def extract_doxygen_method_params(doxystr): """ Given a doxygen string for a method, extract parameter descriptions """ doxyVarDesc = {} doxyLines = doxystr.split('\n') lastParamDesc = '' for doxyLine in doxyLines: if ' @param ' in doxyLine or ' \\param ' in doxyLine: try: doxyLine = doxyLine[doxyLine.find('param ') + 6:] var, desc = doxyLine.split(' ', 1) doxyVarDesc[var] = desc.strip() lastParamDesc = var except: pass elif ' @return ' in doxyLine or ' \return ' in doxyLine: lastParamDesc = '' elif lastParamDesc: try: doxyLine = doxyLine.strip() if ' ' not in doxyLine: lastParamDesc = '' continue doxyLine = doxyLine[doxyLine.find(' ') + 1:] doxyVarDesc[lastParamDesc] += ' ' + doxyLine except: pass return doxyVarDesc
webhelpers2
webhelpers2//text.pyfile:/text.py:function:series/series
def series(*items, **kw): """Join strings using commas and a conjunction such as "and" or "or". The conjunction defaults to "and". Pass 'conj' as a keyword arg to change it. Pass 'strict=False' to omit the comma before the conjunction. Examples: >>> series("A", "B") 'A and B' >>> series("A", "B", conj="or") 'A or B' >>> series("A", "B", "C") 'A, B, and C' >>> series "A", "B", "C", strict=False) 'A, B and C' """ conjunction = kw.pop('conj', 'and') strict = kw.pop('strict', True) if kw: keys = sorted(kw) raise TypeError('unrecognized keyword args: {0}'.format(keys)) items = list(items) length = len(items) if length == 0: return '' if length == 1: return items[0] if length == 2: strict = False nonlast = ', '.join(items[:-1]) last = items[-1] comma = strict and ',' or '' return '%s%s %s %s' % (nonlast, comma, conjunction, last)
lenstronomy-1.5.0
lenstronomy-1.5.0//lenstronomy/Util/derivative_util.pyfile:/lenstronomy/Util/derivative_util.py:function:d_phi_dx/d_phi_dx
def d_phi_dx(x, y): """ angular derivative in respect to x when phi = arctan2(y, x) :param x: :param y: :return: """ return -y / (x ** 2 + y ** 2)
py_school_match
py_school_match//algorithms/da.pyclass:DA/definitely_unassign_student
@staticmethod def definitely_unassign_student(student): """Sets a student as rejected by all schools.""" student.assigned = True student.assigned_school = None
fake-bpy-module-2.79-20200428
fake-bpy-module-2.79-20200428//bpy/ops/view3d.pyfile:/bpy/ops/view3d.py:function:edit_mesh_extrude_move_normal/edit_mesh_extrude_move_normal
def edit_mesh_extrude_move_normal(): """Extrude and move along normals """ pass
argv-0.0.3
argv-0.0.3//argv/tokens.pyfile:/argv/tokens.py:function:split_flag_token/split_flag_token
def split_flag_token(token): """ Split a single token from the command line into its individual flags. Guarantees: * flags will not contain an '=' * flags will not be '--' Since all output will be single flags, we yield strings, rather than tuples. | call | output | |:-----|:-------| | `split_token('-m')` | `['m']` | | `split_token('-czf')` | `['c', 'z', 'f']` | | `split_token('--last')` | `['last']` | N.b., those lists are actually iterables. """ if token.startswith('--'): yield token[2:] else: for letter in token[1:]: yield letter
datacustodian-1.2.1
datacustodian-1.2.1//datacustodian/msg.pyfile:/datacustodian/msg.py:function:set_verbosity/set_verbosity
def set_verbosity(level): """Sets the modules message verbosity level for *all* messages printed. :arg level: a positive integer (>0); higher levels including more detail. """ global verbosity verbosity = level
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/mturk.pyfile:/pyboto3/mturk.py:function:create_hit_with_hit_type/create_hit_with_hit_type
def create_hit_with_hit_type(HITTypeId=None, MaxAssignments=None, LifetimeInSeconds=None, Question=None, RequesterAnnotation=None, UniqueRequestToken=None, AssignmentReviewPolicy=None, HITReviewPolicy= None, HITLayoutId=None, HITLayoutParameters=None): """ The CreateHITWithHITType operation creates a new Human Intelligence Task (HIT) using an existing HITTypeID generated by the CreateHITType operation. This is an alternative way to create HITs from the CreateHIT operation. This is the recommended best practice for Requesters who are creating large numbers of HITs. CreateHITWithHITType also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters . See also: AWS API Documentation :example: response = client.create_hit_with_hit_type( HITTypeId='string', MaxAssignments=123, LifetimeInSeconds=123, Question='string', RequesterAnnotation='string', UniqueRequestToken='string', AssignmentReviewPolicy={ 'PolicyName': 'string', 'Parameters': [ { 'Key': 'string', 'Values': [ 'string', ], 'MapEntries': [ { 'Key': 'string', 'Values': [ 'string', ] }, ] }, ] }, HITReviewPolicy={ 'PolicyName': 'string', 'Parameters': [ { 'Key': 'string', 'Values': [ 'string', ], 'MapEntries': [ { 'Key': 'string', 'Values': [ 'string', ] }, ] }, ] }, HITLayoutId='string', HITLayoutParameters=[ { 'Name': 'string', 'Value': 'string' }, ] ) :type HITTypeId: string :param HITTypeId: [REQUIRED] The HIT type ID you want to create this HIT with. :type MaxAssignments: integer :param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable. :type LifetimeInSeconds: integer :param LifetimeInSeconds: [REQUIRED] An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted. :type Question: string :param Question: The data the person completing the HIT uses to produce the results. Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace. Either a Question parameter or a HITLayoutId parameter must be provided. :type RequesterAnnotation: string :param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT. The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester. The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped. :type UniqueRequestToken: string :param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId. Note Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs. :type AssignmentReviewPolicy: dict :param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy. PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01 Parameters (list) --Name of the parameter from the Review policy. (dict) --Name of the parameter from the Review policy. Key (string) --Name of the parameter from the list of Review Polices. Values (list) --The list of values of the Parameter (string) -- MapEntries (list) --List of ParameterMapEntry objects. (dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly. (string) -- :type HITReviewPolicy: dict :param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy. PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01 Parameters (list) --Name of the parameter from the Review policy. (dict) --Name of the parameter from the Review policy. Key (string) --Name of the parameter from the list of Review Polices. Values (list) --The list of values of the Parameter (string) -- MapEntries (list) --List of ParameterMapEntry objects. (dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly. (string) -- :type HITLayoutId: string :param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters. Constraints: Either a Question parameter or a HITLayoutId parameter must be provided. :type HITLayoutParameters: list :param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout. (dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT. Name (string) --The name of the parameter in the HITLayout. Value (string) --The value substituted for the parameter referenced in the HITLayout. :rtype: dict :return: { 'HIT': { 'HITId': 'string', 'HITTypeId': 'string', 'HITGroupId': 'string', 'HITLayoutId': 'string', 'CreationTime': datetime(2015, 1, 1), 'Title': 'string', 'Description': 'string', 'Question': 'string', 'Keywords': 'string', 'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed', 'MaxAssignments': 123, 'Reward': 'string', 'AutoApprovalDelayInSeconds': 123, 'Expiration': datetime(2015, 1, 1), 'AssignmentDurationInSeconds': 123, 'RequesterAnnotation': 'string', 'QualificationRequirements': [ { 'QualificationTypeId': 'string', 'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn', 'IntegerValues': [ 123, ], 'LocaleValues': [ { 'Country': 'string', 'Subdivision': 'string' }, ], 'RequiredToPreview': True|False }, ], 'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate', 'NumberOfAssignmentsPending': 123, 'NumberOfAssignmentsAvailable': 123, 'NumberOfAssignmentsCompleted': 123 } } :returns: (integer) -- """ pass
swimbundle_utils-2.5.0
swimbundle_utils-2.5.0//swimbundle_utils/helpers.pyfile:/swimbundle_utils/helpers.py:function:asset_parser/asset_parser
def asset_parser(context_asset, host_name='host', username='username', password='password', auth=None, **kwargs): """Take in a context asset and break it into params for an ``__init__`` call on BasicRestEndpoint Args: context_asset: Context asset object host_name: host key name to grab from asset, defaults to ``host`` username: username key name to grab from asset, defaults to ``username`` password: password key name to grab from asset, defaults to ``password`` auth: optional auth argument to override username/password. Set to None to disable auth kwargs: optional keyword args to overwrite the parameters with Returns: Dictionary of key args to use with \\*\\*{} in the ``super().__init__()`` of a BasicRestEndpoint """ host = context_asset.get(host_name) if host is not None: host = host.strip(' /') if 'port' in context_asset: host = '{}:{}'.format(host, context_asset['port']) params = {'host': host, 'verify': context_asset.get('verify_ssl', True), 'proxy': context_asset.get('http_proxy')} if auth == 'basic': params['auth'] = context_asset[username], context_asset[password] elif auth: params['auth'] = auth params.update(kwargs) return params
homeassistant-0.109.6
homeassistant-0.109.6//homeassistant/components/mikrotik/hub.pyclass:MikrotikData/load_mac
@staticmethod def load_mac(devices=None): """Load dictionary using MAC address as key.""" if not devices: return None mac_devices = {} for device in devices: if 'mac-address' in device: mac = device['mac-address'] mac_devices[mac] = device return mac_devices
pytzer
pytzer//parameters.pyfile:/parameters.py:function:mun2i_NH3_NH3_Na_CB89/mun2i_NH3_NH3_Na_CB89
def mun2i_NH3_NH3_Na_CB89(T, P): """n-n-c: ammonia ammonia sodium [CB89].""" mun2i = -0.000311 valid = T == 298.15 return mun2i, valid
LaueTools-3.0.0.34
LaueTools-3.0.0.34//LaueTools/CrystalParameters.pyfile:/LaueTools/CrystalParameters.py:function:E2L/E2L
def E2L(energy): """ energy (ev) to wavelength in meter !!! """ return 12398.0 / energy * 1e-10
sasmodels-1.0.2
sasmodels-1.0.2//sasmodels/models/_spherepy.pyfile:/sasmodels/models/_spherepy.py:function:radius_effective/radius_effective
def radius_effective(mode, radius): """Calculate R_eff for sphere""" return radius if mode else 0.0
movekit-0.1.8
movekit-0.1.8//src/movekit/feature_extraction.pyfile:/src/movekit/feature_extraction.py:function:compute_average_speed/compute_average_speed
def compute_average_speed(data_animal_id_groups, fps): """ Function to compute average speed of an animal based on fps (frames per second) parameter. Calculate the average speed of a mover, based on the pandas dataframe and a frames per second (fps) parameter Formula used- Average Speed = Total Distance Travelled / Total Time taken Use output of compute_distance_and_direction() function to this function. Input- Python dict and fps Returns- Python dict """ for aid in data_animal_id_groups.keys(): data_animal_id_groups[aid]['average_speed'] = data_animal_id_groups[aid ]['distance'].rolling(window=fps, win_type=None).sum() / fps return data_animal_id_groups
music21-5.7.2
music21-5.7.2//music21/sieve.pyfile:/music21/sieve.py:function:unitNormEqual/unitNormEqual
def unitNormEqual(parts): """ Given a certain number of parts, return a list unit-interval values between 0 and 1, with as many divisions as parts; 0 and 1 are always inclusive. >>> sieve.unitNormEqual(3) [0.0, 0.5, 1] If parts is 0 or 1, then a single entry of [0] is given: >>> sieve.unitNormEqual(1) [0] """ if parts <= 1: return [0] elif parts == 2: return [0, 1] else: unit = [] step = 1 / (parts - 1) for y in range(parts - 1): unit.append(y * step) unit.append(1) return unit
cnfconverter-1.1.1
cnfconverter-1.1.1//cnf/cnfconverter.pyfile:/cnf/cnfconverter.py:function:p_expression_negative/p_expression_negative
def p_expression_negative(t): """expression : Not expression""" t[0] = {'child': t[2], 'op': '~'}
flywheel
flywheel//models/enginemetadata_engine_upload_input.pyclass:EnginemetadataEngineUploadInput/positional_to_model
@staticmethod def positional_to_model(value): """Converts a positional argument to a model value""" return value
panda-pilot-2.5.4.13
panda-pilot-2.5.4.13//pilot/user/atlas/diagnose.pyfile:/pilot/user/atlas/diagnose.py:function:is_bad_alloc/is_bad_alloc
def is_bad_alloc(job_report_errors, log): """ Check for bad_alloc errors. :param job_report_errors: list with errors extracted from the job report. :param log: job logger object. :return: bad_alloc (bool), diagnostics (string). """ bad_alloc = False diagnostics = '' for m in job_report_errors: if 'bad_alloc' in m: log.warning('encountered a bad_alloc error: %s' % m) bad_alloc = True diagnostics = m break return bad_alloc, diagnostics
flask-restplus-0.13.0
flask-restplus-0.13.0//flask_restplus/swagger.pyfile:/flask_restplus/swagger.py:function:_v/_v
def _v(value): """Dereference values (callable)""" return value() if callable(value) else value
qisys
qisys//ui.pyfile:/ui.py:function:_get_console_size_tput/_get_console_size_tput
def _get_console_size_tput(): """ Get Console Size http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window """ try: import subprocess proc = subprocess.Popen(['tput', 'cols'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) output = proc.communicate(input=None) cols = int(output[0]) proc = subprocess.Popen(['tput', 'lines'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) output = proc.communicate(input=None) rows = int(output[0]) return cols, rows except Exception: return None
beebird-0.0.1
beebird-0.0.1//beebird/uis/console.pyfile:/beebird/uis/console.py:function:printProgressBar/printProgressBar
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd='\r'): '\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)\n ' percent = ('{0:.' + str(decimals) + 'f}').format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end=printEnd) if iteration == total: print()
meetup2xibo-3.3.1
meetup2xibo-3.3.1//meetup2xibo/updater/event_location.pyclass:EventLocation/format_place_list
@staticmethod def format_place_list(places): """Format a list of places as an English phrase.""" if len(places) < 3: return ' and '.join(places) else: most_places = ', '.join(places[0:-1]) return '{}, and {}'.format(most_places, places[-1])
threedi-modelchecker-0.10
threedi-modelchecker-0.10//threedi_modelchecker/exporters.pyfile:/threedi_modelchecker/exporters.py:function:print_errors/print_errors
def print_errors(errors): """Simply prints all errors to stdout :param errors: iterator of BaseModelError """ for error in errors: print(error)
fhir_to_sdo-0.2.0
fhir_to_sdo-0.2.0//fhir_to_sdo/fhir_structured_definition.pyfile:/fhir_to_sdo/fhir_structured_definition.py:function:url_leaf/url_leaf
def url_leaf(path: str) ->str: """ return the 'leaf' of a slash or hash-separated URL :param path: url :return: """ return path.split('#')[-1] if '#' in path else path.split('/')[-1]
shub-2.10.0
shub-2.10.0//shub/utils.pyfile:/shub/utils.py:function:_update_conf/_update_conf
def _update_conf(conf, target, project, repository): """Update configuration target with given ``project`` and ``repository``""" if project: conf.projects[target] = project if repository: if target == 'default': conf.images[target] = repository else: if not isinstance(conf.projects[target], dict): conf.projects[target] = {'id': conf.projects[target]} conf.projects[target]['image'] = repository
giftbox
giftbox//wrappers.pyfile:/wrappers.py:function:get_mime/get_mime
def get_mime(filepath): """ Use python-magic to get the mime type of a file. Args: filepath (str): Path to the file to be sniffed by magic Returns: str: Returns a string representing the mime type of the file. """ import magic return magic.from_file(filepath, mime=True)
pynsot-1.4.1
pynsot-1.4.1//pynsot/util.pyfile:/pynsot/util.py:function:dict_to_cidr/dict_to_cidr
def dict_to_cidr(obj): """ Take an dict of a Network object and return a cidr-formatted string. :param obj: Dict of an Network object """ return '%s/%s' % (obj['network_address'], obj['prefix_length'])
rhasspy-hermes-0.2.0
rhasspy-hermes-0.2.0//rhasspyhermes/asr.pyclass:AsrStopListening/topic
@classmethod def topic(cls, **kwargs) ->str: """Get MQTT topic for this message type.""" return 'hermes/asr/stopListening'
combi
combi//_python_toolbox/sequence_tools/misc.pyfile:/_python_toolbox/sequence_tools/misc.py:function:get_length/get_length
def get_length(sequence): """Get the length of a sequence.""" return sequence.length if hasattr(sequence, 'length') else len(sequence)
chronix2grid
chronix2grid//generation/thermal/EDispatch_L2RPN2020/utils.pyfile:/generation/thermal/EDispatch_L2RPN2020/utils.py:function:update_gen_constrains/update_gen_constrains
def update_gen_constrains(gen_constraints_user): """Generator constraint passed by user Parameters ---------- gen_constraints_user : dict Valid keys p_max_pu, p_min_pu Returns ------- dict Updated gen dict constratins """ gen_constraints = {'p_max_pu': None, 'p_min_pu': None} gen_constraints.update(gen_constraints_user) return gen_constraints
edtf-4.0.1
edtf-4.0.1//edtf/jdutil.pyfile:/edtf/jdutil.py:function:jd_to_mjd/jd_to_mjd
def jd_to_mjd(jd): """ Convert Julian Day to Modified Julian Day Parameters ---------- jd : float Julian Day Returns ------- mjd : float Modified Julian Day """ return jd - 2400000.5
atomic-cloud-0.3.6
atomic-cloud-0.3.6//aws/ec2.pyfile:/aws/ec2.py:function:get_route_table_id/get_route_table_id
def get_route_table_id(rt: dict): """ Extract RouteTableId from the route table dictionary definition. :param rt: dictionary of route table definition. :return: string route table ID """ return rt.get('RouteTableId')
von_anchor-1.15.1
von_anchor-1.15.1//von_anchor/a2a/docutil.pyfile:/von_anchor/a2a/docutil.py:function:resource/resource
def resource(ref: str, delimiter: str=None) ->str: """ Given a (URI) reference, return up to its delimiter (exclusively), or all of it if there is none. :param ref: reference :param delimiter: delimiter character (default None maps to '#', or ';' introduces identifiers) """ return ref.split(delimiter if delimiter else '#')[0]
jhTAlib-20200412.0
jhTAlib-20200412.0//jhtalib/momentum_indicators/momentum_indicators.pyfile:/jhtalib/momentum_indicators/momentum_indicators.py:function:VHF/VHF
def VHF(df, n, price='Close'): """ Vertical Horizontal Filter Returns: list of floats = jhta.VHF(df, n, price='Close') Source: https://www.fmlabs.com/reference/default.htm?url=VHF.htm """ vhf_list = [] c_list = [] for i in range(len(df[price])): if i + 1 < n: vhf = float('NaN') c_list.append(float('NaN')) else: start = i + 1 - n end = i + 1 highest = max(df[price][start:end]) lowest = min(df[price][start:end]) c0 = df[price][i] c1 = df[price][i - 1] c = (c0 - c1) / c1 c_list.append(c) c_sum = sum(c_list[start:end]) vhf = (highest - lowest) / c_sum vhf_list.append(vhf) return vhf_list
bce-sdk-0.8.16
bce-sdk-0.8.16//baidubce/services/bmr/bmr_client.pyfile:/baidubce/services/bmr/bmr_client.py:function:java_step_properties/java_step_properties
def java_step_properties(jar, main_class, arguments=None): """ Create java step properties :param jar: the path of .jar file :type jar: string :param main_class: the package path for main class :type main_class: string :param arguments: arguments for the step :type arguments: string :return: :rtype map """ java_step = {'jar': jar, 'mainClass': main_class} if arguments is not None: java_step['arguments'] = arguments return java_step
fake-bpy-module-2.78-20200428
fake-bpy-module-2.78-20200428//bpy/ops/particle.pyfile:/bpy/ops/particle.py:function:hair_dynamics_preset_add/hair_dynamics_preset_add
def hair_dynamics_preset_add(name: str='', remove_active: bool=False): """Add or remove a Hair Dynamics Preset :param name: Name, Name of the preset, used to make the path name :type name: str :param remove_active: remove_active :type remove_active: bool """ pass
sundry
sundry//date_time.pyfile:/date_time.py:function:_time_string_format/_time_string_format
def _time_string_format(dt, time_zone=None): """ format a datetime based on time zone :param dt: datetime object :param time_zone: a timezone object, or None for local time zone :return: """ return dt.astimezone(time_zone).isoformat()
zope
zope//schema/vocabulary.pyfile:/schema/vocabulary.py:function:_clear/_clear
def _clear(): """Remove the registries (for use by tests).""" global _vocabularies _vocabularies = None
graphql-example-0.4.4
graphql-example-0.4.4//vendor/pip/_vendor/requests/utils.pyfile:/vendor/pip/_vendor/requests/utils.py:function:unquote_header_value/unquote_header_value
def unquote_header_value(value, is_filename=False): """Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. :rtype: str """ if value and value[0] == value[-1] == '"': value = value[1:-1] if not is_filename or value[:2] != '\\\\': return value.replace('\\\\', '\\').replace('\\"', '"') return value
argo
argo//workflows/dsl/_workflow.pyclass:Workflow/from_string
@classmethod def from_string(cls, wf: str, validate: bool=True) ->'Workflow': """Create a Workflow from a YAML string.""" body = {'data': wf} return cls.__deserialize(body, validate=validate)
plone.multilingual-1.2.1
plone.multilingual-1.2.1//src/plone/multilingual/interfaces.pyclass:ILanguageIndependentFieldsManager/copy_fields
def copy_fields(translation): """ Copy language independent fields to translation."""
trollimage
trollimage//version.pyfile:/version.py:function:render_pep440_pre/render_pep440_pre
def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance']: rendered += '.post.dev%d' % pieces['distance'] else: rendered = '0.post.dev%d' % pieces['distance'] return rendered
fake-bpy-module-2.80-20200428
fake-bpy-module-2.80-20200428//bpy_extras/object_utils.pyfile:/bpy_extras/object_utils.py:function:object_data_add/object_data_add
def object_data_add(context: 'bpy.types.Context', obdata: 'bpy.data', operator: 'bpy.types.Operator'=None, name: str=None) ->'bpy.types.Object': """Add an object using the view context and preference to initialize the location, rotation and layer. :param context: The context to use. :type context: 'bpy.types.Context' :param obdata: the data used for the new object. :type obdata: 'bpy.data' :param operator: The operator, checked for location and rotation properties. :type operator: 'bpy.types.Operator' :param name: Optional name :type name: str :return: the newly created object in the scene. """ pass
django-account-modified-0.1.8
django-account-modified-0.1.8//account/util.pyfile:/account/util.py:function:build_redirect_url/build_redirect_url
def build_redirect_url(request, default_url): """ Retrieve redirect url from session. Use default if retrieved one is broken or not safe. """ url = request.session.get('login_redirect_url') if not url or '//' in url or ' ' in url: url = default_url try: del request.session['login_redirect_url'] except KeyError: pass return url
thriftpy2-0.4.11
thriftpy2-0.4.11//thriftpy2/parser/parser.pyfile:/thriftpy2/parser/parser.py:function:p_cpp_include/p_cpp_include
def p_cpp_include(p): """cpp_include : CPP_INCLUDE LITERAL"""
gandi.cli-1.5
gandi.cli-1.5//gandi/cli/modules/iaas.pyclass:Iaas/from_hostname
@classmethod def from_hostname(cls, hostname): """Retrieve virtual machine id associated to a hostname.""" result = cls.list({'hostname': str(hostname)}) if result: return result[0]['id']
fake-blender-api-2.79-0.3.1
fake-blender-api-2.79-0.3.1//bpy/ops/sculpt.pyfile:/bpy/ops/sculpt.py:function:set_detail_size/set_detail_size
def set_detail_size(): """Set the mesh detail (either relative or constant one, depending on current dyntopo mode) """ pass
concrete_settings
concrete_settings//core.pyclass:SettingsMeta/_is_setting_name
@classmethod def _is_setting_name(mcs, name: str) ->bool: """Return True if name is written in the upper case""" return not name.startswith('_') and name.upper() == name
CMax-5.14
CMax-5.14//cmax/util.pyfile:/cmax/util.py:function:line_indices/line_indices
def line_indices(xxx_todo_changeme, xxx_todo_changeme1): """ Takes two cells in the grid (each described by a pair of integer indices), and returns a list of the cells in the grid that are on the line segment between the cells. """ i0, j0 = xxx_todo_changeme i1, j1 = xxx_todo_changeme1 assert type(i0) == int, 'Args to lineIndices must be pairs of integers' assert type(j0) == int, 'Args to lineIndices must be pairs of integers' assert type(i1) == int, 'Args to lineIndices must be pairs of integers' assert type(j1) == int, 'Args to lineIndices must be pairs of integers' ans = [(i0, j0)] di = i1 - i0 dj = j1 - j0 t = 0.5 if abs(di) > abs(dj): m = float(dj) / float(di) t += j0 if di < 0: di = -1 else: di = 1 m *= di while i0 != i1: i0 += di t += m ans.append((i0, int(t))) elif dj != 0: m = float(di) / float(dj) t += i0 if dj < 0: dj = -1 else: dj = 1 m *= dj while j0 != j1: j0 += dj t += m ans.append((int(t), j0)) return ans
fdc
fdc//hierarchy.pyfile:/hierarchy.py:function:build_dendrogram/build_dendrogram
def build_dendrogram(hierarchy, noise_range): """Constructs the linkage matrix for plotting using the scipy.hierarchy function Parameters ---------- hierarchy : list of dictionaries, length of list = number of coarse graining steps First element of the list is the dictionary specifying the clusters at the finest scale Further elements of the list are coarsed grained. Each element of the list are for different delta values noise_range : array-like, length = number of coarse graining steps The value of the noise parameter at every scale/step Returns ------- Z : array-like, shape=(n_coarse_grain,4) ; see scipy for more info Linkage matrix for plotting dendrogram """ Z = [] initial_idx_centers = list(hierarchy[0]['idx_centers']) dict_center_relative = {} for idx in initial_idx_centers: dict_center_relative[idx] = -1 depth = len(hierarchy) n_init_centers = len(initial_idx_centers) merge_count = 0 member_count_dict = {} for d in range(depth - 1): pre_idx_centers = hierarchy[d]['idx_centers'] cur_idx_centers = hierarchy[d + 1]['idx_centers'] pre_cluster_labels = hierarchy[d]['cluster_labels'] cur_cluster_labels = hierarchy[d + 1]['cluster_labels'] for idx in pre_idx_centers: if idx not in cur_idx_centers: i = cur_cluster_labels[idx] new_idx = cur_idx_centers[i] z = [-1, -1, -1, -1] if (dict_center_relative[idx] == -1) & (dict_center_relative [new_idx] == -1): z[0] = initial_idx_centers.index(idx) z[1] = initial_idx_centers.index(new_idx) z[2] = noise_range[d + 1] z[3] = 2 elif (dict_center_relative[idx] == -1) & (dict_center_relative [new_idx] != -1): z[0] = initial_idx_centers.index(idx) z[1] = dict_center_relative[new_idx] z[2] = noise_range[d + 1] z[3] = 1 + member_count_dict[z[1]] elif (dict_center_relative[idx] != -1) & (dict_center_relative [new_idx] == -1): z[0] = dict_center_relative[idx] z[1] = initial_idx_centers.index(new_idx) z[2] = noise_range[d + 1] z[3] = 1 + member_count_dict[z[0]] else: z[0] = dict_center_relative[idx] z[1] = dict_center_relative[new_idx] z[2] = noise_range[d + 1] z[3] = member_count_dict[z[0]] + member_count_dict[z[1]] new_cluster_idx = merge_count + n_init_centers dict_center_relative[idx] = new_cluster_idx dict_center_relative[new_idx] = new_cluster_idx member_count_dict[new_cluster_idx] = z[3] merge_count += 1 Z.append(z) return Z
doctor-3.13.7
doctor-3.13.7//doctor/docs/base.pyclass:BaseDirective/purge_docs
@classmethod def purge_docs(cls, app, env, docname): """Handler for Sphinx's env-purge-doc event. This event is emitted when all traces of a source file should be cleaned from the environment (that is, if the source file is removed, or before it is freshly read). This is for extensions that keep their own caches in attributes of the environment. For example, there is a cache of all modules on the environment. When a source file has been changed, the cache's entries for the file are cleared, since the module declarations could have been removed from the file. """ state = getattr(env, cls.directive_name, None) if state and docname in state.doc_names: state.doc_names.remove(docname)
deepforest-0.2.15
deepforest-0.2.15//keras_retinanet/utils/image.pyfile:/keras_retinanet/utils/image.py:function:_check_range/_check_range
def _check_range(val_range, min_val=None, max_val=None): """ Check whether the range is a valid range. Args val_range: A pair of lower and upper bound. min_val: Minimal value for the lower bound. max_val: Maximal value for the upper bound. """ if val_range[0] > val_range[1]: raise ValueError('interval lower bound > upper bound') if min_val is not None and val_range[0] < min_val: raise ValueError('invalid interval lower bound') if max_val is not None and val_range[1] > max_val: raise ValueError('invalid interval upper bound')
tdcsm-0.3.9.2
tdcsm-0.3.9.2//tdcsm/tdviz.pyclass:tdviz/get_siteid
def get_siteid(df, default='unknown'): """does case insensitive match on column names for SiteID. If found, will return string from the first row. If not found, will return default value.""" rtn = '' print('looking for SiteID in dataframe...') for col in df.columns: if col.lower().replace('_', '') == 'siteid': rtn = df[col].iloc[0] break if rtn == '': rtn = default print('did not find, using default', default) else: print('found SiteID in dataframe', rtn) return rtn
bpy
bpy//props.pyfile:/props.py:function:CollectionProperty/CollectionProperty
def CollectionProperty(type=None, name: str='', description: str='', options: set={'ANIMATABLE'}, tags: set={}): """Returns a new collection property definition. :param type: A subclass of bpy.types.PropertyGroup or bpy.types.ID. :param name: Name used in the user interface. :type name: str :param description: Text used for the tooltip and api documentation. :type description: str :param options: Enumerator in [‘HIDDEN’, ‘SKIP_SAVE’, ‘ANIMATABLE’, ‘LIBRARY_EDITABLE’, ‘PROPORTIONAL’,’TEXTEDIT_UPDATE’]. :type options: set :param tags: Enumerator of tags that are defined by parent class. :type tags: set """ pass
completion_aggregator
completion_aggregator//transformers.pyclass:AggregatorAnnotationTransformer/name
@classmethod def name(cls): """ Return the name of the transformer. """ return 'completion_aggregator_annotator'
COMPAS-0.15.6
COMPAS-0.15.6//src/compas/utilities/colors.pyfile:/src/compas/utilities/colors.py:function:i_to_blue/i_to_blue
def i_to_blue(i, normalize=False): """Convert a number between 0.0 and 1.0 to a shade of blue. Parameters ---------- i : float A number between 0.0 and 1.0. normalize : bool, optional Normalize the resulting RGB values. Default is to return integer values ranging from 0 to 255. Returns ------- tuple The RGB values of the color corresponding to the provided number. If `normalize` is true, the RGB values are normalized to values between 0.0 and 1.0. If `normalize` is false, the RGB values are integers between 0 and 255. Examples -------- >>> i_to_blue(1.0) (0, 0, 255) >>> i_to_blue(0.0) (255, 255, 255) """ i = max(i, 0.0) i = min(i, 1.0) r = g = min((1 - i) * 255, 255) if not normalize: return int(r), int(g), 255 return r / 255, g / 255, 1.0
tlist-0.6
tlist-0.6//tlist/tlist.pyfile:/tlist/tlist.py:function:tlist2dlist/tlist2dlist
def tlist2dlist(tl): """ tl1 = [('k1','v1'),('k2','v2'),('k3','v3'),('k4','v4'),('k5','v5'),('k6','v6')] dl = tlist2dlist(tl) pobj(dl) """ dl = [] for i in range(0, tl.__len__()): ele = {tl[i][0]: tl[i][1]} dl.append(ele) return dl
pyromaths
pyromaths//ex/lycee/SecondDegre.pyfile:/ex/lycee/SecondDegre.py:function:signe/signe
def signe(nombre): """Renvoit une chaîne contenant le signe de l'argument.""" if nombre < 0: return '-' return '+'
tyssue-0.7.1
tyssue-0.7.1//tyssue/geometry/base_geometry.pyclass:BaseGeometry/scale
@staticmethod def scale(sheet, delta, coords): """ Scales the coordinates `coords` by a factor `delta` """ sheet.vert_df[coords] = sheet.vert_df[coords] * delta
you_get
you_get//extractors/wanmen.pyfile:/extractors/wanmen.py:function:_wanmen_get_boke_id_by_json_topic_part/_wanmen_get_boke_id_by_json_topic_part
def _wanmen_get_boke_id_by_json_topic_part(json_content, tIndex, pIndex): """JSON, int, int, int->str Get one BokeCC video ID with courseid+topicID+partID.""" return json_content[0]['Topics'][tIndex]['Parts'][pIndex]['ccVideoLink']
PyQtWebEngine-5.14.0
PyQtWebEngine-5.14.0//configure.pyfile:/configure.py:function:_has_stubs/_has_stubs
def _has_stubs(pkg_config): """ See if a stub file for any of the modules will be generated. pkg_config is the package configuration. """ for module_config in pkg_config.modules: if module_config.pep484_stub_file: return True return False
msstitch-2.19
msstitch-2.19//src/app/actions/mslookup/proteinquant.pyfile:/src/app/actions/mslookup/proteinquant.py:function:map_psmnrcol_to_quantcol/map_psmnrcol_to_quantcol
def map_psmnrcol_to_quantcol(quantcols, psmcols, tablefn_map): """This function yields tuples of table filename, isobaric quant column and if necessary number-of-PSM column""" if not psmcols: for fn in quantcols: for qcol in quantcols[fn]: yield tablefn_map[fn], qcol else: for fn in quantcols: for qcol, psmcol in zip(quantcols[fn], psmcols[fn]): yield tablefn_map[fn], qcol, psmcol
Telethon-1.13.0
Telethon-1.13.0//telethon/crypto/rsa.pyfile:/telethon/crypto/rsa.py:function:get_byte_array/get_byte_array
def get_byte_array(integer): """Return the variable length bytes corresponding to the given int""" return int.to_bytes(integer, (integer.bit_length() + 8 - 1) // 8, byteorder='big', signed=False)
robottelo
robottelo//cli/auth.pyclass:Auth/status
@classmethod def status(cls, options=None): """Show login status""" cls.command_sub = 'status' return cls.execute(cls._construct_command(options), output_format='csv')
aiida_crystal17
aiida_crystal17//data/basis_set.pyfile:/data/basis_set.py:function:_parse_first_line/_parse_first_line
def _parse_first_line(line, fname): """ parse the first line of the basis set :param line: the line string :param fname: the filename string :return: (atomic_number, basis_type, num_shells) """ from aiida.common.exceptions import ParsingError first_line = line.strip().split() if not len(first_line) == 2: raise ParsingError( "The first line should contain only two fields: '{}' for file {}" .format(line, fname)) atomic_number_str = first_line[0] if not atomic_number_str.isdigit(): raise ParsingError( "The first field should be the atomic number '{}' for file {}". format(line, fname)) anumber = int(atomic_number_str) atomic_number = None basis_type = None if anumber < 99: atomic_number = anumber basis_type = 'all-electron' elif 200 < anumber < 999: raise NotImplementedError( 'valence electron basis sets not currently supported') elif anumber > 1000: atomic_number = anumber % 100 basis_type = 'all-electron' if atomic_number is None: raise ParsingError('Illegal atomic number {} for file {}'.format( anumber, fname)) num_shells_str = first_line[1] if not num_shells_str.isdigit(): raise ParsingError( 'The second field should be the number of shells {} for file {}' .format(line, fname)) num_shells = int(num_shells_str) newline = '{0} {1}\n'.format(atomic_number if basis_type == 'all-electron' else 200 + atomic_number, num_shells) return atomic_number, basis_type, num_shells, newline