repo
stringlengths
1
29
path
stringlengths
24
332
code
stringlengths
39
579k
mlpy
mlpy//wavelet/padding.pyfile:/wavelet/padding.py:function:next_p2/next_p2
def next_p2(n): """Returns the smallest integer, greater than n (n positive and >= 1) which can be obtained as power of 2. """ if n < 1: raise ValueError('n must be >= 1') v = 2 while v <= n: v = v * 2 return v
txtorcon
txtorcon//interface.pyclass:ICircuitListener/circuit_new
def circuit_new(circuit): """A new circuit has been created. You'll always get one of these for every Circuit even if it doesn't go through the "launched" state."""
sregistry-0.2.35
sregistry-0.2.35//sregistry/utils/names.pyfile:/sregistry/utils/names.py:function:set_default/set_default
def set_default(item, default, use_default): """if an item provided is None and boolean use_default is set to True, return the default. Otherwise, return the item. """ if item is None and use_default: return default return item
GSAS-II-WONDER_osx-1.0.4
GSAS-II-WONDER_osx-1.0.4//GSAS-II-WONDER/GSASIIlattice.pyfile:/GSAS-II-WONDER/GSASIIlattice.py:function:Hx2Rh/Hx2Rh
def Hx2Rh(Hx): """needs doc string""" Rh = [0, 0, 0] itk = -Hx[0] + Hx[1] + Hx[2] if itk % 3 != 0: return 0 else: Rh[1] = itk // 3 Rh[0] = Rh[1] + Hx[0] Rh[2] = Rh[1] - Hx[1] if Rh[0] < 0: for i in range(3): Rh[i] = -Rh[i] return Rh
pyoteapp
pyoteapp//csvreader.pyfile:/csvreader.py:function:limovieParser/limovieParser
def limovieParser(line, frame, time, value, ref1, ref2, ref3, extra): """ Limovie sample line --- 3.5,21381195,21381200,22,27,43.0000,,,,,2737.8,3897.32 ... """ part = line.split(',') frame.append(part[0]) time.append('[' + part[3] + ':' + part[4] + ':' + part[5] + ']') value.append(part[10]) if part[11]: ref1.append(part[11]) if part[12]: ref2.append(part[12])
Flask-Admin-1.5.6
Flask-Admin-1.5.6//flask_admin/contrib/mongoengine/tools.pyfile:/flask_admin/contrib/mongoengine/tools.py:function:parse_like_term/parse_like_term
def parse_like_term(term): """ Parse search term into (operation, term) tuple. Recognizes operators in the beginning of the search term. Case insensitive is the default. * = case sensitive (can precede other operators) ^ = starts with = = exact :param term: Search term """ case_sensitive = term.startswith('*') if case_sensitive: term = term[1:] if term.startswith('^'): oper = 'startswith' term = term[1:] elif term.startswith('='): oper = 'exact' term = term[1:] else: oper = 'contains' if not case_sensitive: oper = 'i' + oper return oper, term
confindr-0.7.2
confindr-0.7.2//confindr_src/confindr.pyfile:/confindr_src/confindr.py:function:base_dict_to_string/base_dict_to_string
def base_dict_to_string(base_dict): """ Converts a dictionary to a string. {'C': 12, 'A':4} gets converted to C:12;A:4 :param base_dict: Dictionary of bases and counts created by find_if_multibase :return: String representing that dictionary. """ outstr = '' base_list = sorted(base_dict.items(), key=lambda kv: kv[1], reverse=True) for base in base_list: outstr += '{}:{};'.format(base[0], base[1]) return outstr[:-1]
cliez-2.1.1
cliez-2.1.1//cliez/component.pyclass:Component/add_arguments
@classmethod def add_arguments(cls): """ Sub-Command Document Write At Here. """ pass
nixml-0.2
nixml-0.2//nixml/snapshots.pyfile:/nixml/snapshots.py:function:snapshot_cache/snapshot_cache
def snapshot_cache(check_exists): """Returns location of snapshot file""" from os import path, makedirs cache_directory = path.expanduser('~/.cache/nixml/') cache_file = cache_directory + 'snapshots.tsv' if check_exists and not path.exists(cache_file): return None makedirs(cache_directory, exist_ok=True) return cache_file
Tailbone-0.8.92
Tailbone-0.8.92//tailbone/views/master.pyclass:MasterView/get_model_class
@classmethod def get_model_class(cls, error=True): """ Returns the data model class for which the master view exists. """ if not hasattr(cls, 'model_class') and error: raise NotImplementedError('You must define the `model_class` for: {}' .format(cls)) return getattr(cls, 'model_class', None)
hazelcast
hazelcast//protocol/codec/atomic_reference_get_and_alter_codec.pyfile:/protocol/codec/atomic_reference_get_and_alter_codec.py:function:decode_response/decode_response
def decode_response(client_message, to_object=None): """ Decode response from client message""" parameters = dict(response=None) if not client_message.read_bool(): parameters['response'] = to_object(client_message.read_data()) return parameters
mrjob
mrjob//job.pyclass:MRJob/run
@classmethod def run(cls): """Entry point for running job from the command-line. This is also the entry point when a mapper or reducer is run by Hadoop Streaming. Does one of: * Run a mapper (:option:`--mapper`). See :py:meth:`run_mapper` * Run a combiner (:option:`--combiner`). See :py:meth:`run_combiner` * Run a reducer (:option:`--reducer`). See :py:meth:`run_reducer` * Run the entire job. See :py:meth:`run_job` """ cls().execute()
flywheel
flywheel//models/search_save_search_update.pyclass:SearchSaveSearchUpdate/positional_to_model
@staticmethod def positional_to_model(value): """Converts a positional argument to a model value""" return value
blockade-toolkit-1.1.5
blockade-toolkit-1.1.5//blockade/cli/config.pyfile:/blockade/cli/config.py:function:show_config/show_config
def show_config(config): """Show the current configuration.""" print('\nCurrent Configuration:\n') for k, v in sorted(config.config.items()): print('{0:15}: {1}'.format(k, v))
tycho-0.4.1
tycho-0.4.1//tycho/db/connection.pyfile:/tycho/db/connection.py:function:map_db_collection_with_class/map_db_collection_with_class
def map_db_collection_with_class(db, db_classes): """ maps database-collection with classes """ for db_class in db_classes: name = db_class._collection if name: setattr(db, name, db_class(getattr(db, name)))
wflow
wflow//wflow_emwaq.pyfile:/wflow_emwaq.py:function:dw_WriteNrSegments/dw_WriteNrSegments
def dw_WriteNrSegments(fname, nr): """ Writes the number of segments to B3 file B3\\_nrofseg.inc """ exfile = open(fname, 'w') print(';Written by dw_WriteNrSegments', file=exfile) print(str(nr) + ' ; nr of segments', file=exfile) exfile.close()
robottelo
robottelo//cli/contentview.pyclass:ContentView/puppet_module_remove
@classmethod def puppet_module_remove(cls, options): """Remove a puppet module from the content view""" cls.command_sub = 'puppet-module remove' return cls.execute(cls._construct_command(options), output_format='csv')
fsleyes
fsleyes//parseargs.pyfile:/parseargs.py:function:_generateSpecial_VolumeOpts_overrideDataRange/_generateSpecial_VolumeOpts_overrideDataRange
def _generateSpecial_VolumeOpts_overrideDataRange(overlayList, displayCtx, source, longArg): """Generates the :attr:`.VolumeOpts.overrideDataRange` option. If the :attr:`.VolumeOpts.enableOverrideDataRange` property is ``False``, no arguments are generated. """ if not source.enableOverrideDataRange: return [] else: return False
aiida-core-1.2.1
aiida-core-1.2.1//aiida/manage/external/rmq.pyclass:ProcessLauncher/handle_continue_exception
@staticmethod def handle_continue_exception(node, exception, message): """Handle exception raised in `_continue` call. If the process state of the node has not yet been put to excepted, the exception was raised before the process instance could be reconstructed, for example when the process class could not be loaded, thereby circumventing the exception handling of the state machine. Raising this exception will then acknowledge the process task with RabbitMQ leaving an uncleaned node in the `CREATED` state for ever. Therefore we have to perform the node cleaning manually. :param exception: the exception object :param message: string message to use for the log message """ from aiida.engine import ProcessState if not node.is_excepted: node.logger.exception(message) node.set_exception(str(exception)) node.set_process_state(ProcessState.EXCEPTED) node.seal()
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/elasticache.pyfile:/pyboto3/elasticache.py:function:modify_replication_group/modify_replication_group
def modify_replication_group(ReplicationGroupId=None, ReplicationGroupDescription=None, PrimaryClusterId=None, SnapshottingClusterId=None, AutomaticFailoverEnabled=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, PreferredMaintenanceWindow=None, NotificationTopicArn=None, CacheParameterGroupName=None, NotificationTopicStatus=None, ApplyImmediately=None, EngineVersion=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, CacheNodeType=None, NodeGroupId=None): """ Modifies the settings for a replication group. See also: AWS API Documentation :example: response = client.modify_replication_group( ReplicationGroupId='string', ReplicationGroupDescription='string', PrimaryClusterId='string', SnapshottingClusterId='string', AutomaticFailoverEnabled=True|False, CacheSecurityGroupNames=[ 'string', ], SecurityGroupIds=[ 'string', ], PreferredMaintenanceWindow='string', NotificationTopicArn='string', CacheParameterGroupName='string', NotificationTopicStatus='string', ApplyImmediately=True|False, EngineVersion='string', AutoMinorVersionUpgrade=True|False, SnapshotRetentionLimit=123, SnapshotWindow='string', CacheNodeType='string', NodeGroupId='string' ) :type ReplicationGroupId: string :param ReplicationGroupId: [REQUIRED] The identifier of the replication group to modify. :type ReplicationGroupDescription: string :param ReplicationGroupDescription: A description for the replication group. Maximum length is 255 characters. :type PrimaryClusterId: string :param PrimaryClusterId: For replication groups with a single primary, if this parameter is specified, ElastiCache promotes the specified cluster in the specified replication group to the primary role. The nodes of all other clusters in the replication group are read replicas. :type SnapshottingClusterId: string :param SnapshottingClusterId: The cache cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups. :type AutomaticFailoverEnabled: boolean :param AutomaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. Valid values: true | false Note ElastiCache Multi-AZ replication groups are not supported on: Redis versions earlier than 2.8.6. Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types. :type CacheSecurityGroupNames: list :param CacheSecurityGroupNames: A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible. This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC). Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default . (string) -- :type SecurityGroupIds: list :param SecurityGroupIds: Specifies the VPC Security Groups associated with the cache clusters in the replication group. This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (Amazon VPC). (string) -- :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat Example: sun:23:00-mon:01:30 :type NotificationTopicArn: string :param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent. Note The Amazon SNS topic owner must be same as the replication group owner. :type CacheParameterGroupName: string :param CacheParameterGroupName: The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request. :type NotificationTopicStatus: string :param NotificationTopicStatus: The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active . Valid values: active | inactive :type ApplyImmediately: boolean :param ApplyImmediately: If true , this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group. If false , changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first. Valid values: true | false Default: false :type EngineVersion: string :param EngineVersion: The upgraded version of the cache engine to be run on the cache clusters in the replication group. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: This parameter is currently disabled. :type SnapshotRetentionLimit: integer :param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted. Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. :type SnapshotWindow: string :param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId . Example: 05:00-09:00 If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. :type CacheNodeType: string :param CacheNodeType: A valid cache node type that you want to scale this replication group to. :type NodeGroupId: string :param NodeGroupId: The name of the Node Group (called shard in the console). :rtype: dict :return: { 'ReplicationGroup': { 'ReplicationGroupId': 'string', 'Description': 'string', 'Status': 'string', 'PendingModifiedValues': { 'PrimaryClusterId': 'string', 'AutomaticFailoverStatus': 'enabled'|'disabled' }, 'MemberClusters': [ 'string', ], 'NodeGroups': [ { 'NodeGroupId': 'string', 'Status': 'string', 'PrimaryEndpoint': { 'Address': 'string', 'Port': 123 }, 'Slots': 'string', 'NodeGroupMembers': [ { 'CacheClusterId': 'string', 'CacheNodeId': 'string', 'ReadEndpoint': { 'Address': 'string', 'Port': 123 }, 'PreferredAvailabilityZone': 'string', 'CurrentRole': 'string' }, ] }, ], 'SnapshottingClusterId': 'string', 'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling', 'ConfigurationEndpoint': { 'Address': 'string', 'Port': 123 }, 'SnapshotRetentionLimit': 123, 'SnapshotWindow': 'string', 'ClusterEnabled': True|False, 'CacheNodeType': 'string' } } :returns: Redis versions earlier than 2.8.6. Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types. """ pass
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/sns.pyfile:/pyboto3/sns.py:function:list_topics/list_topics
def list_topics(NextToken=None): """ Returns a list of the requester's topics. Each call returns a limited list of topics, up to 100. If there are more topics, a NextToken is also returned. Use the NextToken parameter in a new ListTopics call to get further results. See also: AWS API Documentation :example: response = client.list_topics( NextToken='string' ) :type NextToken: string :param NextToken: Token returned by the previous ListTopics request. :rtype: dict :return: { 'Topics': [ { 'TopicArn': 'string' }, ], 'NextToken': 'string' } """ pass
srunner
srunner//scenariomanager/atomic_scenario_criteria.pyclass:KeepLaneTest/_count_lane_invasion
@staticmethod def _count_lane_invasion(weak_self, event): """ Callback to update lane invasion count """ self = weak_self() if not self: return self.actual_value += 1
autoflpy-1.1.8
autoflpy-1.1.8//autoflpy/util/plotting.pyfile:/autoflpy/util/plotting.py:function:manual_time_offset/manual_time_offset
def manual_time_offset(values_list, time_x_offset, number_of_flights): """Applies a manually defined time offset to the "Time" to all time columns in the values_list""" if not time_x_offset: return values_list elif len(time_x_offset) != number_of_flights: print( 'time_x_offset is not the same length as the number of flights. It has not been applied.' ) return values_list else: pass for flight in range(number_of_flights): for sensor_index in range(len(values_list[flight])): for data_set_index in range(len(values_list[flight][sensor_index]) ): new_time_data = [] if values_list[flight][sensor_index][data_set_index][0 ] == 'Time': for data_point in values_list[flight][sensor_index][ data_set_index][2]: new_time_data.append(float(data_point) + float( time_x_offset[flight])) values_list[flight][sensor_index][data_set_index][2 ] = new_time_data return values_list
hurry.custom-0.6.2
hurry.custom-0.6.2//src/hurry/custom/interfaces.pyclass:IHurryCustomAPI/register_data_language
def register_data_language(parse_func, extension): """Register a data language for template input. parse_func - a function that takes a text and parses it into a data structure. extension - the extension to register the data language under. (example: .json). """
flaky
flaky//_flaky_plugin.pyclass:_FlakyPlugin/_increment_flaky_attribute
@classmethod def _increment_flaky_attribute(cls, test_item, flaky_attribute): """ Increments the value of an attribute on a flaky test. :param test_item: The test callable on which to set the attribute :type test_item: `callable` or :class:`nose.case.Test` or :class:`Function` :param flaky_attribute: The name of the attribute to set :type flaky_attribute: `unicode` """ cls._set_flaky_attribute(test_item, flaky_attribute, cls. _get_flaky_attribute(test_item, flaky_attribute) + 1)
pelix
pelix//shell/report.pyclass:_ReportCommands/get_namespace
@staticmethod def get_namespace(): """ Retrieves the name space of this command handler """ return 'report'
util_ds-0.21
util_ds-0.21//util_ds/nlp/rouge/rouge_score.pyfile:/util_ds/nlp/rouge/rouge_score.py:function:_get_ngrams/_get_ngrams
def _get_ngrams(n, text): """Calcualtes n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams """ ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set
cfcal-0.1.5
cfcal-0.1.5//versioneer.pyfile:/versioneer.py:function:render_pep440_pre/render_pep440_pre
def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance']: rendered += '.post.dev%d' % pieces['distance'] else: rendered = '0.post.dev%d' % pieces['distance'] return rendered
z3c.menu.ready2go-1.1.0
z3c.menu.ready2go-1.1.0//src/z3c/menu/ready2go/interfaces.pyclass:IMenuItem/getURLContext
def getURLContext(): """Returns the context the base url."""
nipy-0.4.2
nipy-0.4.2//nipy/algorithms/fwhm.pyfile:/nipy/algorithms/fwhm.py:function:_calc_detlam/_calc_detlam
def _calc_detlam(xx, yy, zz, yx, zx, zy): """ Calculate determinant of symmetric 3x3 matrix [[xx,yx,xz], [yx,yy,zy], [zx,zy,zz]] """ return zz * (yy * xx - yx ** 2) - zy * (zy * xx - zx * yx) + zx * (zy * yx - zx * yy)
statick-0.4.0
statick-0.4.0//statick_tool/exceptions.pyclass:Exceptions/print_exception_warning
@classmethod def print_exception_warning(cls, tool: str) ->None: """ Print warning about exception not being applied for an issue. Warning will only be printed once per tool. """ print( '[WARNING] File exceptions not available for {} tool plugin due to lack of absolute paths for issues.' .format(tool))
bamnostic-1.1.4
bamnostic-1.1.4//bamnostic/bgzf.pyfile:/bamnostic/bgzf.py:function:_format_warnings/_format_warnings
def _format_warnings(message, category, filename, lineno, file=None, line=None ): """ Warning formatter Args: message: warning message category (str): level of warning filename (str): path for warning output lineno (int): Where the warning originates Returns: Formatted warning for logging purposes """ return ' {}:{}:{}: {}\n'.format(category.__name__, filename, lineno, message)
skelebot-1.18.4
skelebot-1.18.4//skelebot/systems/execution/executor.pyfile:/skelebot/systems/execution/executor.py:function:getCommands/getCommands
def getCommands(args): """Split (if needed) and obtain the list of commands that were sent into Skelebot""" commands = [] command = [] for arg in args: if arg == '+': commands.append(command) command = [] else: command.append(arg) commands.append(command) return commands
pyramid
pyramid//interfaces.pyclass:IPEP302Loader/get_source
def get_source(fullname): """ Return the source code for the module identified by 'fullname'. Return a string, using newline characters for line endings, or None if the source is not available. Raise ImportError if the module can't be found by the importer at all. """
hyperparameter_hunter-3.0.0
hyperparameter_hunter-3.0.0//hyperparameter_hunter/library_helpers/keras_optimization_helper.pyfile:/hyperparameter_hunter/library_helpers/keras_optimization_helper.py:function:clean_parenthesized_string/clean_parenthesized_string
def clean_parenthesized_string(string): """Produce a clipped substring of `string` comprising all characters from the beginning of `string` through the closing paren that matches the first opening paren in `string` Parameters ---------- string: String A string that contains a parenthesized statement in its entirety, along with extra content to be removed. The target parenthesized statement may contain additional parentheses Returns ------- clean_string: String A substring of `string`, extending from the beginning of `string`, through the closing paren that matches the first opening paren found, producing a valid parenthesized statement""" close_paren = 0 for i in range(len(string)): if string[i] == '(': close_paren += 1 elif string[i] == ')': if close_paren > 1: close_paren -= 1 else: return string[:i + 1] raise ValueError( f'Need closing paren:"""\n{string}\n"""\nRemaining close_paren: {close_paren}' )
thug-1.6.1
thug-1.6.1//thug/ThugAPI/IThugAPI.pyclass:IThugAPI/disable_honeyagent
def disable_honeyagent(): """ disable_honeyagent Disable HoneyAgent Java sandbox analysis @return: None """
abupy-0.4.0
abupy-0.4.0//abupy/UtilBu/ABuScalerUtil.pyfile:/abupy/UtilBu/ABuScalerUtil.py:function:scaler_one/scaler_one
def scaler_one(group): """ 使用序列的第一个数据进行标准化处理,即将序列转换为第一个数据为1.0开始的序列 eg: group 2011-07-28 9820.0 2011-07-29 9845.0 2011-08-01 9685.0 2011-08-02 9653.0 2011-08-03 9549.0 2011-08-04 9345.0 2011-08-05 9114.8 2011-08-08 8766.8 2011-08-09 8750.0 2011-08-10 8646.0 ... 2017-07-13 5864.5 2017-07-14 5928.0 2017-07-17 6004.5 2017-07-18 6004.0 2017-07-19 5966.0 2017-07-20 5982.0 2017-07-21 5997.0 2017-07-24 6033.5 2017-07-25 6261.0 2017-07-26 6306.5 scaler_one(group) 2011-07-28 1.0000 2011-07-29 1.0025 2011-08-01 0.9863 2011-08-02 0.9830 2011-08-03 0.9724 2011-08-04 0.9516 2011-08-05 0.9282 2011-08-08 0.8927 2011-08-09 0.8910 2011-08-10 0.8804 ... 2017-07-13 0.5972 2017-07-14 0.6037 2017-07-17 0.6115 2017-07-18 0.6114 2017-07-19 0.6075 2017-07-20 0.6092 2017-07-21 0.6107 2017-07-24 0.6144 2017-07-25 0.6376 2017-07-26 0.6422 :param group: pd.DataFrame对象, pd.Series对象, np.array对象 """ return group / group[0]
rez
rez//build_system.pyclass:BuildSystem/name
@classmethod def name(cls): """Return the name of the build system, eg 'make'.""" raise NotImplementedError
dcicutils
dcicutils//ff_utils.pyfile:/ff_utils.py:function:process_add_on/process_add_on
def process_add_on(add_on): """ simple function to ensure that a query add on string starts with "?" """ if add_on.startswith('&'): add_on = '?' + add_on[1:] if add_on and not add_on.startswith('?'): add_on = '?' + add_on return add_on
CifFile
CifFile//drel/drel_ast_yacc.pyfile:/drel/drel_ast_yacc.py:function:p_stringliteral/p_stringliteral
def p_stringliteral(p): """stringliteral : STRPREFIX SHORTSTRING | STRPREFIX LONGSTRING | SHORTSTRING | LONGSTRING""" if len(p) == 3: p[0] = p[1] + p[2] else: p[0] = p[1]
Weblate-4.0.4
Weblate-4.0.4//weblate/addons/base.pyclass:BaseAddon/can_install
@classmethod def can_install(cls, component, user): """Check whether addon is compatible with given component.""" for key, values in cls.compat.items(): if getattr(component, key) not in values: return False return True
fake-bpy-module-2.79-20200428
fake-bpy-module-2.79-20200428//bpy/ops/wm.pyfile:/bpy/ops/wm.py:function:revert_mainfile/revert_mainfile
def revert_mainfile(use_scripts: bool=True): """Reload the saved file :param use_scripts: Trusted Source, Allow .blend file to execute scripts automatically, default available from system preferences :type use_scripts: bool """ pass
order-1.2.1
order-1.2.1//order/shift.pyclass:Shift/join_name
@classmethod def join_name(cls, source, direction): """ Joins a shift *source* and a shift *direction* to return a shift name. If either *source* or *direction* is *None*, *None* is returned. If *source* is *NOMINAL*, *direction* must be *NOMINAL* as well. Otherwise, *direction* must be either *UP* or *DOWN*. Example: .. code-block:: python join_name("nominal", "nominal") # -> "nominal" join_name("nominal", "up") # -> ValueError: pointless nominal shift direction join_name("pdf", "up") # -> "pdf_up" join_name("pdf", "high") # -> ValueError: invalid shift direction """ if source == cls.NOMINAL: if direction != cls.NOMINAL: raise ValueError('pointless nominal shift direction: {}'.format (direction)) else: return cls.NOMINAL elif direction in (cls.UP, cls.DOWN): return '{}_{}'.format(source, direction) else: raise ValueError('unknown shift direction: {}'.format(direction))
blender-1.4
blender-1.4//blender/2.79/scripts/addons/io_export_paper_model.pyfile:/blender/2.79/scripts/addons/io_export_paper_model.py:function:is_upsidedown_wrong/is_upsidedown_wrong
def is_upsidedown_wrong(name): """Tell if the string would get a different meaning if written upside down""" chars = set(name) mistakable = set('69NZMWpbqd') rotatable = set('80oOxXIl').union(mistakable) return chars.issubset(rotatable) and not chars.isdisjoint(mistakable)
crossbar
crossbar//_compat.pyfile:/_compat.py:function:native_string/native_string
def native_string(string): """ Make C{string} be the type of C{str}, decoding with ASCII if required. """ if isinstance(string, bytes): return string.decode('ascii') else: raise ValueError('This is already a native string.')
autoarray-0.10.6
autoarray-0.10.6//autoarray/dataset/preprocess.pyfile:/autoarray/dataset/preprocess.py:function:psf_with_odd_dimensions_from_psf/psf_with_odd_dimensions_from_psf
def psf_with_odd_dimensions_from_psf(psf): """ If the PSF kernel has one or two even-sized dimensions, return a PSF object where the kernel has odd-sized dimensions (odd-sized dimensions are required by a *Convolver*). Kernels are rescaled using the scikit-image routine rescale, which performs rescaling via an interpolation routine. This may lead to loss of accuracy in the PSF kernel and it is advised that users, where possible, create their PSF on an odd-sized array using their data reduction pipelines that remove this approximation. Parameters ---------- rescale_factor : float The factor by which the kernel is rescaled. If this has a value of 1.0, the kernel is rescaled to the closest odd-sized dimensions (e.g. 20 -> 19). Higher / lower values scale to higher / lower dimensions. renormalize : bool Whether the PSF should be renormalized after being rescaled. """ return psf.rescaled_with_odd_dimensions_from_rescale_factor(rescale_factor =1.0)
grtoolkit-20.2.17
grtoolkit-20.2.17//grtoolkit/File.pyfile:/grtoolkit/File.py:function:name/name
def name(path): """Extracts file name without extension""" return path[:path.rfind('.', 0)]
zonefile-0.1.1
zonefile-0.1.1//zone_file/record_processors.pyfile:/zone_file/record_processors.py:function:process_origin/process_origin
def process_origin(data, template): """ Replace {$origin} in template with a serialized $ORIGIN record """ record = '' if data is not None: record += '$ORIGIN %s' % data return template.replace('{$origin}', record)
toppra-0.2.2b0
toppra-0.2.2b0//toppra/interpolator.pyfile:/toppra/interpolator.py:function:_find_left_index/_find_left_index
def _find_left_index(gridpoints, s): """Find the least lowest entry that is larger or equal. Parameters ---------- gridpoints: Array of path positions. s: A path position. Returns ------- out: The desired index. """ for i in range(1, len(gridpoints)): if gridpoints[i - 1] <= s < gridpoints[i]: return i - 1 return len(gridpoints) - 2
openapscontrib
openapscontrib//predict/predict.pyfile:/predict/predict.py:function:carb_effect_curve/carb_effect_curve
def carb_effect_curve(t, absorption_time): """Returns the fraction of total carbohydrate effect with a given absorption time on blood glucose at the specified number of minutes after eating. This is the integral of Carbs on Board (COB), defined by a Scheiner GI curve from Think Link a Pancreas, fig 7-8. This is based on an algorithm that first appeared in GlucoDyn See: https://github.com/kenstack/GlucoDyn :param t: The time in minutes since the carbs were eaten :type t: float :param absorption_time: The total absorption time of the carbohydrates in minutes :type absorption_time: int :return: A percentage of the initial carb intake, from 0 to 1 :rtype: float """ if t <= 0: return 0.0 elif t <= absorption_time / 2.0: return 2.0 / absorption_time ** 2 * t ** 2 elif t < absorption_time: return -1.0 + 4.0 / absorption_time * (t - t ** 2 / (2.0 * absorption_time)) else: return 1.0
kubeflow
kubeflow//fairing/utils.pyfile:/fairing/utils.py:function:get_current_k8s_namespace/get_current_k8s_namespace
def get_current_k8s_namespace(): """Get the current namespace of kubernetes.""" with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r' ) as f: return f.readline()
zict-2.0.0
zict-2.0.0//zict/common.pyfile:/zict/common.py:function:close/close
def close(z): """ Close *z* if possible. """ if hasattr(z, 'close'): z.close()
hottbox-0.3.2
hottbox-0.3.2//hottbox/core/structures.pyclass:BaseTensorTD/_validate_init_data
@staticmethod def _validate_init_data(**kwargs): """ Validate data for the constructor of a new object """ raise NotImplementedError('Not implemented in base (BaseTensorTD) class')
Grortir-0.1.8
Grortir-0.1.8//grortir/main/model/core/abstract_stage.pyclass:AbstractStage/get_output_of_stage
@staticmethod def get_output_of_stage(): """Result of processing input with current control params.""" raise NotImplementedError
openpyxl
openpyxl//formula/translate.pyclass:Translator/strip_ws_name
@staticmethod def strip_ws_name(range_str): """Splits out the worksheet reference, if any, from a range reference.""" if '!' in range_str: sheet, range_str = range_str.rsplit('!', 1) return sheet + '!', range_str return '', range_str
flywheel-0.5.3
flywheel-0.5.3//flywheel/model_meta.pyfile:/flywheel/model_meta.py:function:merge_metadata/merge_metadata
def merge_metadata(cls): """ Merge all the __metadata__ dicts in a class's hierarchy keys that do not begin with '_' will be inherited. keys that begin with '_' will only apply to the object that defines them. """ cls_meta = cls.__dict__.get('__metadata__', {}) meta = {} for base in cls.__bases__: meta.update(getattr(base, '__metadata__', {})) for key in list(meta.keys()): if key.startswith('_'): del meta[key] meta.update(cls_meta) return meta
django-lets-go-2.9.6
django-lets-go-2.9.6//django_lets_go/common_functions.pyfile:/django_lets_go/common_functions.py:function:percentage/percentage
def percentage(value, total_sum): """calculate a percentage""" if total_sum == 0: return 0 else: return round(100 * float(value) / float(total_sum))
eve_rights-0.0.2
eve_rights-0.0.2//eve_rights/config.pyfile:/eve_rights/config.py:function:set_config_fields/set_config_fields
def set_config_fields(app): """ Add custom who can read/right fields in the schema so we can know the documents rights :param app the eve application """ account_definition = {'type': 'list', 'default': [], 'schema': {'type': 'objectid', 'data_relation': {'resource': app.config[ 'ACCOUNT_RESOURCE'], 'field': '_id'}}} domain = app.config['DOMAIN'] for resource in domain: if resource != app.config['ACCOUNT_RESOURCE' ] and resource not in app.config['RESOURCE_BLACKLIST']: domain[resource]['schema'][app.config['ACL_FIELDS_READ'] ] = account_definition domain[resource]['schema'][app.config['ACL_FIELDS_WRITE'] ] = account_definition
fury-0.5.1
fury-0.5.1//fury/data/fetcher.pyfile:/fury/data/fetcher.py:function:_already_there_msg/_already_there_msg
def _already_there_msg(folder): """Print a message indicating that dataset is already in place.""" msg = 'Dataset is already in place. If you want to fetch it again ' msg += 'please first remove the folder %s ' % folder print(msg)
pylash
pylash//core.pyclass:UnityOfDictAndClass/has
@staticmethod def has(obj, key): """ Returns `True` if the `key` is a key or a defined attribute in `obj`. Otherwise, returns `False`. Parameters ---------- obj : dict or object The target to check existence of a key-value pair or an attribute. key : str A `dict` key or an attribute name. Returns ------- bool Existence of a key-value pair or an attribute. """ if isinstance(obj, dict): return key in obj else: return hasattr(obj, key)
nmrglue-0.7
nmrglue-0.7//nmrglue/fileio/fileiobase.pyfile:/nmrglue/fileio/fileiobase.py:function:create_blank_udic/create_blank_udic
def create_blank_udic(ndim): """ Create a blank universal dictionary for a spectrum of dimension ndim. """ udic = dict() udic['ndim'] = ndim for i in range(ndim): d = dict() d['sw'] = 999.99 d['complex'] = True d['obs'] = 999.99 d['car'] = 999.99 d['size'] = 1 d['label'] = ['X', 'Y', 'Z', 'A'][i] if i == ndim - 1: d['encoding'] = 'direct' else: d['encoding'] = 'states' d['time'] = True d['freq'] = False udic[i] = d return udic
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/apigateway.pyfile:/pyboto3/apigateway.py:function:delete_rest_api/delete_rest_api
def delete_rest_api(restApiId=None): """ Deletes the specified API. See also: AWS API Documentation :example: response = client.delete_rest_api( restApiId='string' ) :type restApiId: string :param restApiId: [REQUIRED] The ID of the RestApi you want to delete. """ pass
pyphs-0.5.1
pyphs-0.5.1//pyphs/numerics/cpp/tools.pyfile:/pyphs/numerics/cpp/tools.py:function:formatPath/formatPath
def formatPath(path): """ return a string representation of the path with: - doubling of escape characters \\\\; - strip of the side ' characters. """ return repr(path).strip("'")
Products.CMFCore-2.4.6
Products.CMFCore-2.4.6//Products/CMFCore/interfaces/_tools.pyclass:IOldstyleDiscussionTool/isDiscussionAllowedFor
def isDiscussionAllowedFor(content): """ Return True discussion is allowed for 'content', else False. o Result may be looked up from an object-specific value, or by place, or from a site-wide policy. o Permission: Public """
datacustodian
datacustodian//agent/menu.pyfile:/agent/menu.py:function:compile_menu/compile_menu
def compile_menu(name): """Compiles the menu from application specifications. Args: name (str): name of the menu to compile. """ return {}
psamm-1.0
psamm-1.0//psamm/bayesian_util.pyfile:/psamm/bayesian_util.py:function:id_equals/id_equals
def id_equals(id1, id2): """Return True if the two IDs are considered equal.""" return id1.lower() == id2.lower()
spectral-libraries-1.0.9
spectral-libraries-1.0.9//spectral_libraries/externals/qps/externals/pyqtgraph/metaarray/MetaArray.pyclass:MetaArray/_readMeta
@staticmethod def _readMeta(fd): """Read meta array from the top of a file. Read lines until a blank line is reached. This function should ideally work for ALL versions of MetaArray. """ meta = u'' while True: line = fd.readline().strip() if line == '': break meta += line ret = eval(meta) return ret
isodatetime
isodatetime//data.pyclass:Calendar/default
@classmethod def default(cls): """Return the singleton instance. Create if necessary.""" if cls._DEFAULT is None: cls._DEFAULT = cls() return cls._DEFAULT
thorpy
thorpy//_utils/functions.pyfile:/_utils/functions.py:function:compress_array/compress_array
def compress_array(array, new_size): """Compress array by averaging over fusionned cells""" l = len(array) k = l / new_size new_array = list() for i in range(0, l, k): val = sum(array[i:i + k]) / k new_array.append(val) return new_array
thriftpy2-0.4.11
thriftpy2-0.4.11//thriftpy2/parser/parser.pyfile:/thriftpy2/parser/parser.py:function:p_const_map/p_const_map
def p_const_map(p): """const_map : '{' const_map_seq '}' """ p[0] = dict(p[2])
all_purpose_set-0.1.10
all_purpose_set-0.1.10//all_purpose_set/_vendor/tedent/_vendor/wrapt/wrappers.pyfile:/all_purpose_set/_vendor/tedent/_vendor/wrapt/wrappers.py:function:with_metaclass/with_metaclass
def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" return meta('NewBase', bases, {})
pylice-0.1.0
pylice-0.1.0//pylice/pylice.pyfile:/pylice/pylice.py:function:__extract_license/__extract_license
def __extract_license(dist): """Extracts license information from PKG-INFO metadata. Returns "UNKNOWN" if no license can be found. :param dist: the distribution :type dist: pkg_resources.Distribution :return: license (str) or None """ if not dist.has_metadata('PKG-INFO'): return 'UNKNOWN' for line in dist.get_metadata_lines('PKG-INFO'): try: meta_key, meta_value = line.split(': ', 1) except ValueError: continue if meta_key == 'License': return meta_value
mne
mne//forward/forward.pyfile:/forward/forward.py:function:_subject_from_forward/_subject_from_forward
def _subject_from_forward(forward): """Get subject id from inverse operator.""" return forward['src']._subject
dgl_cu92-0.4.3.post2.data
dgl_cu92-0.4.3.post2.data//purelib/dgl/backend/backend.pyfile:/purelib/dgl/backend/backend.py:function:tensor/tensor
def tensor(data, dtype=None): """Create a tensor given the data and data type. Parameters ---------- data : input data The interface should at least support list and numpy array. The data is copied to a newly-allocated tensor. dtype : data type, optional It should be one of the values in the data type dict. If is none, the type should be inferred from data. Returns ------- Tensor A framework-specific tensor. """ pass
ethjsonrpc-0.3.0
ethjsonrpc-0.3.0//ethjsonrpc/utils.pyfile:/ethjsonrpc/utils.py:function:hex_to_dec/hex_to_dec
def hex_to_dec(x): """ Convert hex to decimal """ return int(x, 16)
prettywebsite-0.0.3
prettywebsite-0.0.3//prettywebsite/brightness.pyfile:/prettywebsite/brightness.py:function:sRGB2RGB/sRGB2RGB
def sRGB2RGB(img): """ this function converts a sRGB img to linear RGB values. It loops through each pixel, and apply a conversion to pass from sRGB to linear RGB value. :param img: image to analyze, in sRGB :type img: numpy.ndarray :return: image to analyze, in RGB :rtyipe: numpy.ndarray """ newimg = [] for row in img: thisrow = [] for pixel in row: thispixel = [] for value in pixel: if value / 255 <= 0.04045: thispixel.append(value / (255 * 12.92)) else: thispixel.append(((value / 255 + 0.055) / 1.055) ** 2.4) thisrow.append(thispixel) newimg.append(thisrow) return newimg
pysqa-0.0.7
pysqa-0.0.7//versioneer.pyfile:/versioneer.py:function:scan_setup_py/scan_setup_py
def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open('setup.py', 'r') as f: for line in f.readlines(): if 'import versioneer' in line: found.add('import') if 'versioneer.get_cmdclass()' in line: found.add('cmdclass') if 'versioneer.get_version()' in line: found.add('get_version') if 'versioneer.VCS' in line: setters = True if 'versioneer.versionfile_source' in line: setters = True if len(found) != 3: print('') print('Your setup.py appears to be missing some important items') print('(but I might be wrong). Please make sure it has something') print('roughly like the following:') print('') print(' import versioneer') print(' setup( version=versioneer.get_version(),') print(' cmdclass=versioneer.get_cmdclass(), ...)') print('') errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print('now lives in setup.cfg, and should be removed from setup.py') print('') errors += 1 return errors
arithmos-canvas-core-0.1.12.1
arithmos-canvas-core-0.1.12.1//arithmoscanvas/gui/stackedwidget.pyfile:/arithmoscanvas/gui/stackedwidget.py:function:clipMinMax/clipMinMax
def clipMinMax(size, minSize, maxSize): """ Clip the size so it is bigger then minSize but smaller than maxSize. """ return size.expandedTo(minSize).boundedTo(maxSize)
APS_BlueSky_tools
APS_BlueSky_tools//plans.pyfile:/plans.py:function:tune_axes/tune_axes
def tune_axes(axes): """ BlueSky plan to tune a list of axes in sequence EXAMPLE Sequentially, tune a list of preconfigured axes:: RE(tune_axes([mr, m2r, ar, a2r]) """ for axis in axes: yield from axis.tune()
tacker-2.0.0
tacker-2.0.0//tacker/wsgi.pyclass:Router/factory
@classmethod def factory(cls, global_config, **local_config): """Return an instance of the WSGI Router class.""" return cls()
clifford
clifford//_conformal_layout.pyclass:ConformalLayout/_from_base_layout
@classmethod def _from_base_layout(cls, layout, added_sig=[1, -1], **kw ) ->'ConformalLayout': """ helper to implement :func:`clifford.conformalize` """ sig_c = list(layout.sig) + added_sig return cls._from_sig(sig=sig_c, firstIdx=layout.firstIdx, layout=layout, **kw)
ext_pylib-0.1
ext_pylib-0.1//ext_pylib/meta/meta.pyclass:DynamicProperty/setter
@staticmethod def setter(*args, **kwargs): """A stub. If it isn't overwritten, the property is read-only.""" raise TypeError( "Cannot modify property. It doesn't have a setter function.")
django_elasticsearch_dsl_drf
django_elasticsearch_dsl_drf//filter_backends/highlight.pyclass:HighlightBackend/prepare_highlight_fields
@classmethod def prepare_highlight_fields(cls, view): """Prepare faceted search fields. Prepares the following structure: >>> { >>> 'author.name': { >>> 'enabled': False, >>> 'options': { >>> 'fragment_size': 150, >>> 'number_of_fragments': 3 >>> } >>> } >>> 'title': { >>> 'options': { >>> 'pre_tags' : ["<em>"], >>> 'post_tags' : ["</em>"] >>> }, >>> 'enabled': True, >>> }, >>> } :param view: :type view: rest_framework.viewsets.ReadOnlyModelViewSet :return: Highlight fields options. :rtype: dict """ highlight_fields = view.highlight_fields for field, options in highlight_fields.items(): if 'enabled' not in highlight_fields[field]: highlight_fields[field]['enabled'] = False if 'options' not in highlight_fields[field]: highlight_fields[field]['options'] = {} return highlight_fields
omicidx-0.6.0
omicidx-0.6.0//omicidx/geo/parser.pyfile:/omicidx/geo/parser.py:function:_split_geo_name/_split_geo_name
def _split_geo_name(v): """split name of form first,middle,last into dict""" return dict(zip('first middle last'.split(), v.split(',')))
aehostd
aehostd//hosts.pyfile:/hosts.py:function:hosts_convert/hosts_convert
def hosts_convert(entry): """ convert an LDAP entry dict to a hosts map tuple """ hostnames = entry['aeFqdn'] return hostnames[0], hostnames[1:], entry['ipHostNumber']
slumber-0.7.1
slumber-0.7.1//slumber/utils.pyfile:/slumber/utils.py:function:iterator/iterator
def iterator(d): """ Helper to get and a proper dict iterator with Py2k and Py3k """ try: return d.iteritems() except AttributeError: return d.items()
pyfn-1.3.7
pyfn-1.3.7//pyfn/utils/marshalling.pyfile:/pyfn/utils/marshalling.py:function:get_sent_dict/get_sent_dict
def get_sent_dict(sent_filepath): """Return a {text:sent_num} dictionary. Given an absolute path to a .sentences file """ sent_dict = {} sent_iter = 0 with open(sent_filepath, 'r', encoding='utf-8') as sent_stream: for line in sent_stream: line = line.rstrip() sent_dict[sent_iter] = line sent_iter += 1 return sent_dict
circlemap
circlemap//utils.pyfile:/utils.py:function:merge_bed/merge_bed
def merge_bed(discordants_pd): """Function that takes as input a bed file and returns a pandas dataframe indicating if files should be merged. This function will merge everything that is overlapping by at least 1bp""" overlap = (discordants_pd.start - discordants_pd.shift().end - 1).lt(0) chr_overlap = discordants_pd.chrom == discordants_pd.shift().chrom return (overlap * 1 + chr_overlap * 1).lt(2).cumsum()
beampy
beampy//functions.pyfile:/functions.py:function:color_text/color_text
def color_text(textin, color): """ Adds Latex color to a string. """ if '#' in color: textin = '{\\color[HTML]{%s} %s }' % (color.replace('#', '').upper( ), textin) else: textin = '{\\color{%s} %s }' % (color, textin) return textin
aiida_quantumespresso
aiida_quantumespresso//tools/dbexporters/tcod_plugins/pw.pyclass:PwTcodtranslator/get_hartree_energy
@classmethod def get_hartree_energy(cls, calc, **kwargs): """Return Hartree energy in eV.""" return cls._get_pw_energy_value(calc, 'energy_hartree')
energy_demand-0.80
energy_demand-0.80//energy_demand/plotting/plotting_styles.pyfile:/energy_demand/plotting/plotting_styles.py:function:color_list/color_list
def color_list(): """ List with colors """ color_list = ['darkturquoise', 'orange', 'firebrick', 'darkviolet', 'khaki', 'olive', 'darkseagreen', 'darkcyan', 'indianred', 'darkblue', 'orchid', 'gainsboro', 'mediumseagreen', 'lightgray', 'mediumturquoise', 'lemonchiffon', 'cadetblue', 'lightyellow', 'lavenderblush', 'coral', 'purple', 'aqua', 'mediumslateblue', 'darkorange', 'mediumaquamarine', 'darksalmon', 'beige'] return color_list
sql_tools-3.1.0
sql_tools-3.1.0//sql_tools/sqlite/advTools.pyfile:/sql_tools/sqlite/advTools.py:function:validate/validate
def validate(db='', returnDict=False, err=False, deep=True): """ Vaidates the database whether the database is properly operable or not. """ pass
lewis-1.2.1
lewis-1.2.1//src/lewis/core/approaches.pyfile:/src/lewis/core/approaches.py:function:linear/linear
def linear(current, target, rate, dt): """ This function returns the new value after moving towards target at the given speed constantly for the time dt. If for example the current position is 10 and the target is -20, the returned value will be less than 10 if rate and dt are greater than 0: .. sourcecode:: Python new_pos = linear(10, -20, 10, 0.1) # new_pos = 9 The function makes sure that the returned value never overshoots: .. sourcecode:: Python new_pos = linear(10, -20, 10, 100) # new_pos = -20 :param current: The current value of the variable to be changed. :param target: The target value to approach. :param rate: The rate at which the parameter should move towards target. :param dt: The time for which to calculate the change. :return: The new variable value. """ sign = (target > current) - (target < current) if not sign: return current new_value = current + sign * rate * dt if sign * new_value > sign * target: return target return new_value
firstpack-0.0.1
firstpack-0.0.1//firstpack/methods/HelperMethods.pyfile:/firstpack/methods/HelperMethods.py:function:HelperMethod1/HelperMethod1
def HelperMethod1(arg1, arg2, banana): """ Helper meethod for doing something cool. Exactly what it will do, we don't know that just yet. Inputs: arg1 arg2 banana #Because who doesn't like bananas?!? Outputs: A silly print statement """ print('Method was called, did you call it? o_O') arguments = [arg1, arg2, banana] import random print( 'One day a {}, came into the office and was wearing a {} as a hat, thinking it was a {}' .format(random.choice(arguments), random.choice(arguments), random. choice(arguments)))
s-tui-1.0.0
s-tui-1.0.0//s_tui/helper_functions.pyfile:/s_tui/helper_functions.py:function:seconds_to_text/seconds_to_text
def seconds_to_text(secs): """ Converts seconds to a string of hours:minutes:seconds """ hours = secs // 3600 minutes = (secs - hours * 3600) // 60 seconds = secs - hours * 3600 - minutes * 60 return '%02d:%02d:%02d' % (hours, minutes, seconds)
abeliantensors
abeliantensors//tensor.pyclass:Tensor/check_form_match
@classmethod def check_form_match(cls, tensor1=None, tensor2=None, qhape1=None, shape1= None, dirs1=None, qhape2=None, shape2=None, dirs2=None, qodulus=None): """Check that the given two tensors have the same form in the sense that, i.e. that their indices have the same dimensions. Instead of giving two tensors, two shapes can also be given. """ if tensor1 is not None: shape1 = tensor1.shape if tensor2 is not None: shape2 = tensor2.shape return shape1 == shape2
pygenometracks
pygenometracks//utilities.pyfile:/utilities.py:function:get_length_w/get_length_w
def get_length_w(fig_width, region_start, region_end, fontsize): """ to improve the visualization of the labels it is good to have an estimation of their length in base pairs. In the following code I try to get the length of a 'W' in base pairs. """ inches_per_pt = 1.0 / 72.27 font_in_inches = fontsize * inches_per_pt region_len = region_end - region_start bp_per_inch = region_len / fig_width font_in_bp = font_in_inches * bp_per_inch return font_in_bp
corpkit-2.3.8
corpkit-2.3.8//corpkit/process.pyfile:/corpkit/process.py:function:lemmatiser/lemmatiser
def lemmatiser(list_of_words, tag, translated_option, lem_instance=False, preserve_case=False): """ Take a list of unicode words and a tag and return a lemmatised list """ from corpkit.dictionaries.word_transforms import wordlist, taglemma if not lem_instance: from nltk.stem.wordnet import WordNetLemmatizer lem_instance = WordNetLemmatizer() output = [] for word in list_of_words: if 'u' in translated_option: word = taglemma.get(word.lower(), 'Other') else: word = wordlist.get(word, lem_instance.lemmatize(word, tag)) output.append(word) return output
callchain-0.2.6
callchain-0.2.6//callchain/services/reduce.pyclass:KMath/uncommon
def uncommon(): """least common incoming thing"""
chemml-0.6.0
chemml-0.6.0//chemml/models/keras/trained/engine.pyfile:/chemml/models/keras/trained/engine.py:function:adapt_shape_array/adapt_shape_array
def adapt_shape_array(X, shape_must_be): """ This function takes care of the unspecified dimensions in the required shapes of data structures. The None elements in the shape tuple will be replaced with actual shape of the input array. Parameters ---------- X: ndarray The input numpy array. shape_must_be: tuple The required shape of the input array, might contains None elements. Returns ------- tuple The actual shape with no None elements. """ if None in shape_must_be: copy_shape_must_be = list(shape_must_be) while None in copy_shape_must_be: ind = copy_shape_must_be.index(None) copy_shape_must_be[ind] = X.shape[ind] return tuple(copy_shape_must_be) else: return shape_must_be