repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
keleshev/schema
schema.py
https://github.com/keleshev/schema/blob/4a0bf6f509e6b69956a8f2fd4e1c3873fc419be8/schema.py#L421-L489
def json_schema(self, schema_id=None, is_main_schema=True): """Generate a draft-07 JSON schema dict representing the Schema. This method can only be called when the Schema's value is a dict. This method must be called with a schema_id. Calling it without one is used in a recursive context for sub schemas.""" Schema = self.__class__ s = self._schema i = self._ignore_extra_keys flavor = _priority(s) if flavor != DICT and is_main_schema: raise ValueError("The main schema must be a dict.") if flavor == TYPE: # Handle type return {"type": {int: "integer", float: "number", bool: "boolean"}.get(s, "string")} elif flavor == ITERABLE and len(s) == 1: # Handle arrays of a single type or dict schema return {"type": "array", "items": Schema(s[0]).json_schema(is_main_schema=False)} elif isinstance(s, Or): # Handle Or values values = [Schema(or_key).json_schema(is_main_schema=False) for or_key in s._args] any_of = [] for value in values: if value not in any_of: any_of.append(value) return {"anyOf": any_of} if flavor != DICT: # If not handled, do not check return {} if is_main_schema and not schema_id: raise ValueError("schema_id is required.") # Handle dict required_keys = [] expanded_schema = {} for key in s: if isinstance(key, Hook): continue if isinstance(s[key], Schema): sub_schema = s[key] else: sub_schema = Schema(s[key], ignore_extra_keys=i) sub_schema_json = sub_schema.json_schema(is_main_schema=False) is_optional = False if isinstance(key, Optional): key = key._schema is_optional = True if isinstance(key, str): if not is_optional: required_keys.append(key) expanded_schema[key] = sub_schema_json elif isinstance(key, Or): for or_key in key._args: expanded_schema[or_key] = sub_schema_json schema_dict = { "type": "object", "properties": expanded_schema, "required": required_keys, "additionalProperties": i, } if is_main_schema: schema_dict.update({"id": schema_id, "$schema": "http://json-schema.org/draft-07/schema#"}) return schema_dict
[ "def", "json_schema", "(", "self", ",", "schema_id", "=", "None", ",", "is_main_schema", "=", "True", ")", ":", "Schema", "=", "self", ".", "__class__", "s", "=", "self", ".", "_schema", "i", "=", "self", ".", "_ignore_extra_keys", "flavor", "=", "_priority", "(", "s", ")", "if", "flavor", "!=", "DICT", "and", "is_main_schema", ":", "raise", "ValueError", "(", "\"The main schema must be a dict.\"", ")", "if", "flavor", "==", "TYPE", ":", "# Handle type", "return", "{", "\"type\"", ":", "{", "int", ":", "\"integer\"", ",", "float", ":", "\"number\"", ",", "bool", ":", "\"boolean\"", "}", ".", "get", "(", "s", ",", "\"string\"", ")", "}", "elif", "flavor", "==", "ITERABLE", "and", "len", "(", "s", ")", "==", "1", ":", "# Handle arrays of a single type or dict schema", "return", "{", "\"type\"", ":", "\"array\"", ",", "\"items\"", ":", "Schema", "(", "s", "[", "0", "]", ")", ".", "json_schema", "(", "is_main_schema", "=", "False", ")", "}", "elif", "isinstance", "(", "s", ",", "Or", ")", ":", "# Handle Or values", "values", "=", "[", "Schema", "(", "or_key", ")", ".", "json_schema", "(", "is_main_schema", "=", "False", ")", "for", "or_key", "in", "s", ".", "_args", "]", "any_of", "=", "[", "]", "for", "value", "in", "values", ":", "if", "value", "not", "in", "any_of", ":", "any_of", ".", "append", "(", "value", ")", "return", "{", "\"anyOf\"", ":", "any_of", "}", "if", "flavor", "!=", "DICT", ":", "# If not handled, do not check", "return", "{", "}", "if", "is_main_schema", "and", "not", "schema_id", ":", "raise", "ValueError", "(", "\"schema_id is required.\"", ")", "# Handle dict", "required_keys", "=", "[", "]", "expanded_schema", "=", "{", "}", "for", "key", "in", "s", ":", "if", "isinstance", "(", "key", ",", "Hook", ")", ":", "continue", "if", "isinstance", "(", "s", "[", "key", "]", ",", "Schema", ")", ":", "sub_schema", "=", "s", "[", "key", "]", "else", ":", "sub_schema", "=", "Schema", "(", "s", "[", "key", "]", ",", "ignore_extra_keys", "=", "i", ")", "sub_schema_json", "=", "sub_schema", ".", "json_schema", "(", "is_main_schema", "=", "False", ")", "is_optional", "=", "False", "if", "isinstance", "(", "key", ",", "Optional", ")", ":", "key", "=", "key", ".", "_schema", "is_optional", "=", "True", "if", "isinstance", "(", "key", ",", "str", ")", ":", "if", "not", "is_optional", ":", "required_keys", ".", "append", "(", "key", ")", "expanded_schema", "[", "key", "]", "=", "sub_schema_json", "elif", "isinstance", "(", "key", ",", "Or", ")", ":", "for", "or_key", "in", "key", ".", "_args", ":", "expanded_schema", "[", "or_key", "]", "=", "sub_schema_json", "schema_dict", "=", "{", "\"type\"", ":", "\"object\"", ",", "\"properties\"", ":", "expanded_schema", ",", "\"required\"", ":", "required_keys", ",", "\"additionalProperties\"", ":", "i", ",", "}", "if", "is_main_schema", ":", "schema_dict", ".", "update", "(", "{", "\"id\"", ":", "schema_id", ",", "\"$schema\"", ":", "\"http://json-schema.org/draft-07/schema#\"", "}", ")", "return", "schema_dict" ]
Generate a draft-07 JSON schema dict representing the Schema. This method can only be called when the Schema's value is a dict. This method must be called with a schema_id. Calling it without one is used in a recursive context for sub schemas.
[ "Generate", "a", "draft", "-", "07", "JSON", "schema", "dict", "representing", "the", "Schema", ".", "This", "method", "can", "only", "be", "called", "when", "the", "Schema", "s", "value", "is", "a", "dict", ".", "This", "method", "must", "be", "called", "with", "a", "schema_id", ".", "Calling", "it", "without", "one", "is", "used", "in", "a", "recursive", "context", "for", "sub", "schemas", "." ]
python
train
37.565217
Capitains/MyCapytain
MyCapytain/common/metadata.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/metadata.py#L235-L246
def getOr(subject, predicate, *args, **kwargs): """ Retrieve a metadata node or generate a new one :param subject: Subject to which the metadata node should be connected :param predicate: Predicate by which the metadata node should be connected :return: Metadata for given node :rtype: Metadata """ if (subject, predicate, None) in get_graph(): return Metadata(node=get_graph().objects(subject, predicate).__next__()) return Metadata(*args, **kwargs)
[ "def", "getOr", "(", "subject", ",", "predicate", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "(", "subject", ",", "predicate", ",", "None", ")", "in", "get_graph", "(", ")", ":", "return", "Metadata", "(", "node", "=", "get_graph", "(", ")", ".", "objects", "(", "subject", ",", "predicate", ")", ".", "__next__", "(", ")", ")", "return", "Metadata", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Retrieve a metadata node or generate a new one :param subject: Subject to which the metadata node should be connected :param predicate: Predicate by which the metadata node should be connected :return: Metadata for given node :rtype: Metadata
[ "Retrieve", "a", "metadata", "node", "or", "generate", "a", "new", "one" ]
python
train
43.083333
opencobra/cobrapy
cobra/core/dictlist.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/core/dictlist.py#L345-L357
def pop(self, *args): """remove and return item at index (default last).""" value = list.pop(self, *args) index = self._dict.pop(value.id) # If the pop occured from a location other than the end of the list, # we will need to subtract 1 from every entry afterwards if len(args) == 0 or args == [-1]: # removing from the end of the list return value _dict = self._dict for i, j in iteritems(_dict): if j > index: _dict[i] = j - 1 return value
[ "def", "pop", "(", "self", ",", "*", "args", ")", ":", "value", "=", "list", ".", "pop", "(", "self", ",", "*", "args", ")", "index", "=", "self", ".", "_dict", ".", "pop", "(", "value", ".", "id", ")", "# If the pop occured from a location other than the end of the list,", "# we will need to subtract 1 from every entry afterwards", "if", "len", "(", "args", ")", "==", "0", "or", "args", "==", "[", "-", "1", "]", ":", "# removing from the end of the list", "return", "value", "_dict", "=", "self", ".", "_dict", "for", "i", ",", "j", "in", "iteritems", "(", "_dict", ")", ":", "if", "j", ">", "index", ":", "_dict", "[", "i", "]", "=", "j", "-", "1", "return", "value" ]
remove and return item at index (default last).
[ "remove", "and", "return", "item", "at", "index", "(", "default", "last", ")", "." ]
python
valid
41.692308
saltstack/salt
salt/cloud/clouds/msazure.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L1597-L1628
def delete_disk(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 Delete a specific disk associated with the account CLI Examples: .. code-block:: bash salt-cloud -f delete_disk my-azure name=my_disk salt-cloud -f delete_disk my-azure name=my_disk delete_vhd=True ''' if call != 'function': raise SaltCloudSystemExit( 'The delete_disk function must be called with -f or --function.' ) if kwargs is None: kwargs = {} if 'name' not in kwargs: raise SaltCloudSystemExit('A name must be specified as "name"') if not conn: conn = get_conn() try: data = conn.delete_disk(kwargs['name'], kwargs.get('delete_vhd', False)) return {'Success': 'The disk was successfully deleted'} except AzureMissingResourceHttpError as exc: raise SaltCloudSystemExit('{0}: {1}'.format(kwargs['name'], exc.message))
[ "def", "delete_disk", "(", "kwargs", "=", "None", ",", "conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The delete_disk function must be called with -f or --function.'", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "if", "'name'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A name must be specified as \"name\"'", ")", "if", "not", "conn", ":", "conn", "=", "get_conn", "(", ")", "try", ":", "data", "=", "conn", ".", "delete_disk", "(", "kwargs", "[", "'name'", "]", ",", "kwargs", ".", "get", "(", "'delete_vhd'", ",", "False", ")", ")", "return", "{", "'Success'", ":", "'The disk was successfully deleted'", "}", "except", "AzureMissingResourceHttpError", "as", "exc", ":", "raise", "SaltCloudSystemExit", "(", "'{0}: {1}'", ".", "format", "(", "kwargs", "[", "'name'", "]", ",", "exc", ".", "message", ")", ")" ]
.. versionadded:: 2015.8.0 Delete a specific disk associated with the account CLI Examples: .. code-block:: bash salt-cloud -f delete_disk my-azure name=my_disk salt-cloud -f delete_disk my-azure name=my_disk delete_vhd=True
[ "..", "versionadded", "::", "2015", ".", "8", ".", "0" ]
python
train
28.8125
astropy/photutils
photutils/aperture/bounding_box.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/bounding_box.py#L149-L157
def slices(self): """ The bounding box as a tuple of `slice` objects. The slice tuple is in numpy axis order (i.e. ``(y, x)``) and therefore can be used to slice numpy arrays. """ return (slice(self.iymin, self.iymax), slice(self.ixmin, self.ixmax))
[ "def", "slices", "(", "self", ")", ":", "return", "(", "slice", "(", "self", ".", "iymin", ",", "self", ".", "iymax", ")", ",", "slice", "(", "self", ".", "ixmin", ",", "self", ".", "ixmax", ")", ")" ]
The bounding box as a tuple of `slice` objects. The slice tuple is in numpy axis order (i.e. ``(y, x)``) and therefore can be used to slice numpy arrays.
[ "The", "bounding", "box", "as", "a", "tuple", "of", "slice", "objects", "." ]
python
train
32.333333
wglass/lighthouse
lighthouse/pluggable.py
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/pluggable.py#L36-L72
def get_installed_classes(cls): """ Iterates over installed plugins associated with the `entry_point` and returns a dictionary of viable ones keyed off of their names. A viable installed plugin is one that is both loadable *and* a subclass of the Pluggable subclass in question. """ installed_classes = {} for entry_point in pkg_resources.iter_entry_points(cls.entry_point): try: plugin = entry_point.load() except ImportError as e: logger.error( "Could not load plugin %s: %s", entry_point.name, str(e) ) continue if not issubclass(plugin, cls): logger.error( "Could not load plugin %s:" + " %s class is not subclass of %s", entry_point.name, plugin.__class__.__name__, cls.__name__ ) continue if not plugin.validate_dependencies(): logger.error( "Could not load plugin %s:" + " %s class dependencies not met", entry_point.name, plugin.__name__ ) continue installed_classes[entry_point.name] = plugin return installed_classes
[ "def", "get_installed_classes", "(", "cls", ")", ":", "installed_classes", "=", "{", "}", "for", "entry_point", "in", "pkg_resources", ".", "iter_entry_points", "(", "cls", ".", "entry_point", ")", ":", "try", ":", "plugin", "=", "entry_point", ".", "load", "(", ")", "except", "ImportError", "as", "e", ":", "logger", ".", "error", "(", "\"Could not load plugin %s: %s\"", ",", "entry_point", ".", "name", ",", "str", "(", "e", ")", ")", "continue", "if", "not", "issubclass", "(", "plugin", ",", "cls", ")", ":", "logger", ".", "error", "(", "\"Could not load plugin %s:\"", "+", "\" %s class is not subclass of %s\"", ",", "entry_point", ".", "name", ",", "plugin", ".", "__class__", ".", "__name__", ",", "cls", ".", "__name__", ")", "continue", "if", "not", "plugin", ".", "validate_dependencies", "(", ")", ":", "logger", ".", "error", "(", "\"Could not load plugin %s:\"", "+", "\" %s class dependencies not met\"", ",", "entry_point", ".", "name", ",", "plugin", ".", "__name__", ")", "continue", "installed_classes", "[", "entry_point", ".", "name", "]", "=", "plugin", "return", "installed_classes" ]
Iterates over installed plugins associated with the `entry_point` and returns a dictionary of viable ones keyed off of their names. A viable installed plugin is one that is both loadable *and* a subclass of the Pluggable subclass in question.
[ "Iterates", "over", "installed", "plugins", "associated", "with", "the", "entry_point", "and", "returns", "a", "dictionary", "of", "viable", "ones", "keyed", "off", "of", "their", "names", "." ]
python
train
35.864865
mobinrg/rpi_spark_drives
JMRPiSpark/Drives/Key/RPiKeyButtons.py
https://github.com/mobinrg/rpi_spark_drives/blob/e1602d8268a5ef48e9e0a8b37de89e0233f946ea/JMRPiSpark/Drives/Key/RPiKeyButtons.py#L161-L189
def configKeyButtons( self, enableButtons = [], bounceTime = DEF_BOUNCE_TIME_NORMAL, pullUpDown = GPIO.PUD_UP, event = GPIO.BOTH ): """! \~english Config multi key buttons IO and event on same time @param enableButtons: an array of key button configs. eg. <br> [{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ] @param bounceTime: Default set to DEF_BOUNCE_TIME_NORMAL @param pullUpDown: Default set to GPIO.PUD_UP @param event: Default set to GPIO.BOTH. it can be: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH } \~chinese 同时配置多个按键IO和事件 @param enableButtons: 组按键配置 例如: <br> [{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ] @param bounceTime: 默认 DEF_BOUNCE_TIME_NORMAL @param pullUpDown: 默认 GPIO.PUD_UP @param event: 默认 GPIO.BOTH 它可以是: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH } \~ \n @see DEF_BOUNCE_TIME_SHORT_MON (10ms) @see DEF_BOUNCE_TIME_SHORT (50ms) @see DEF_BOUNCE_TIME_NORMAL (100ms) @see DEF_BOUNCE_TIME_LONG (200ms) """ for key in enableButtons: self.setKeyButton( key["id"], key["callback"], bounceTime, pullUpDown, event ) pass
[ "def", "configKeyButtons", "(", "self", ",", "enableButtons", "=", "[", "]", ",", "bounceTime", "=", "DEF_BOUNCE_TIME_NORMAL", ",", "pullUpDown", "=", "GPIO", ".", "PUD_UP", ",", "event", "=", "GPIO", ".", "BOTH", ")", ":", "for", "key", "in", "enableButtons", ":", "self", ".", "setKeyButton", "(", "key", "[", "\"id\"", "]", ",", "key", "[", "\"callback\"", "]", ",", "bounceTime", ",", "pullUpDown", ",", "event", ")", "pass" ]
! \~english Config multi key buttons IO and event on same time @param enableButtons: an array of key button configs. eg. <br> [{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ] @param bounceTime: Default set to DEF_BOUNCE_TIME_NORMAL @param pullUpDown: Default set to GPIO.PUD_UP @param event: Default set to GPIO.BOTH. it can be: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH } \~chinese 同时配置多个按键IO和事件 @param enableButtons: 组按键配置 例如: <br> [{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ] @param bounceTime: 默认 DEF_BOUNCE_TIME_NORMAL @param pullUpDown: 默认 GPIO.PUD_UP @param event: 默认 GPIO.BOTH 它可以是: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH } \~ \n @see DEF_BOUNCE_TIME_SHORT_MON (10ms) @see DEF_BOUNCE_TIME_SHORT (50ms) @see DEF_BOUNCE_TIME_NORMAL (100ms) @see DEF_BOUNCE_TIME_LONG (200ms)
[ "!", "\\", "~english", "Config", "multi", "key", "buttons", "IO", "and", "event", "on", "same", "time" ]
python
train
42.586207
erinxocon/spotify-local
src/spotify_local/core.py
https://github.com/erinxocon/spotify-local/blob/8188eef221e3d8b9f408ff430d80e74560360459/src/spotify_local/core.py#L94-L102
def remove_all_listeners(self, event=None): """Remove all functions for all events, or one event if one is specifed. :param event: Optional event you wish to remove all functions from """ if event is not None: self._registered_events[event] = OrderedDict() else: self._registered_events = defaultdict(OrderedDict)
[ "def", "remove_all_listeners", "(", "self", ",", "event", "=", "None", ")", ":", "if", "event", "is", "not", "None", ":", "self", ".", "_registered_events", "[", "event", "]", "=", "OrderedDict", "(", ")", "else", ":", "self", ".", "_registered_events", "=", "defaultdict", "(", "OrderedDict", ")" ]
Remove all functions for all events, or one event if one is specifed. :param event: Optional event you wish to remove all functions from
[ "Remove", "all", "functions", "for", "all", "events", "or", "one", "event", "if", "one", "is", "specifed", "." ]
python
train
41.111111
rwl/godot
godot/xdot_parser.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/xdot_parser.py#L354-L360
def proc_polyline(self, tokens): """ Returns the components of a polyline. """ pts = [(p["x"], p["y"]) for p in tokens["points"]] component = Polyline(pen=self.pen, points=pts) return component
[ "def", "proc_polyline", "(", "self", ",", "tokens", ")", ":", "pts", "=", "[", "(", "p", "[", "\"x\"", "]", ",", "p", "[", "\"y\"", "]", ")", "for", "p", "in", "tokens", "[", "\"points\"", "]", "]", "component", "=", "Polyline", "(", "pen", "=", "self", ".", "pen", ",", "points", "=", "pts", ")", "return", "component" ]
Returns the components of a polyline.
[ "Returns", "the", "components", "of", "a", "polyline", "." ]
python
test
31.571429
bharadwaj-raju/libdesktop
libdesktop/wallpaper.py
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/wallpaper.py#L241-L462
def set_wallpaper(image): '''Set the desktop wallpaper. Sets the desktop wallpaper to an image. Args: image (str): The path to the image to be set as wallpaper. ''' desktop_env = system.get_name() if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']: uri = 'file://%s' % image SCHEMA = 'org.gnome.desktop.background' KEY = 'picture-uri' if desktop_env == 'mate': uri = image SCHEMA = 'org.mate.background' KEY = 'picture-filename' try: from gi.repository import Gio gsettings = Gio.Settings.new(SCHEMA) gsettings.set_string(KEY, uri) except ImportError: try: gsettings_proc = sp.Popen( ['gsettings', 'set', SCHEMA, KEY, uri]) except: # MATE < 1.6 sp.Popen(['mateconftool-2', '-t', 'string', '--set', '/desktop/mate/background/picture_filename', '%s' % image], stdout=sp.PIPE) finally: gsettings_proc.communicate() if gsettings_proc.returncode != 0: sp.Popen(['mateconftool-2', '-t', 'string', '--set', '/desktop/mate/background/picture_filename', '%s' % image]) elif desktop_env == 'gnome2': sp.Popen( ['gconftool-2', '-t', 'string', '--set', '/desktop/gnome/background/picture_filename', image] ) elif desktop_env == 'kde': # This probably only works in Plasma 5+ kde_script = dedent( '''\ var Desktops = desktops(); for (i=0;i<Desktops.length;i++) {{ d = Desktops[i]; d.wallpaperPlugin = "org.kde.image"; d.currentConfigGroup = Array("Wallpaper", "org.kde.image", "General"); d.writeConfig("Image", "file://{}") }} ''').format(image) sp.Popen( ['dbus-send', '--session', '--dest=org.kde.plasmashell', '--type=method_call', '/PlasmaShell', 'org.kde.PlasmaShell.evaluateScript', 'string:{}'.format(kde_script)] ) elif desktop_env in ['kde3', 'trinity']: args = 'dcop kdesktop KBackgroundIface setWallpaper 0 "%s" 6' % image sp.Popen(args, shell=True) elif desktop_env == 'xfce4': # XFCE4's image property is not image-path but last-image (What?) list_of_properties = system.get_cmd_out( ['xfconf-query', '-R', '-l', '-c', 'xfce4-desktop', '-p', '/backdrop'] ) for i in list_of_properties.split('\n'): if i.endswith('last-image'): # The property given is a background property sp.Popen( ['xfconf-query -c xfce4-desktop -p %s -s "%s"' % (i, image)], shell=True) sp.Popen(['xfdesktop --reload'], shell=True) elif desktop_env == 'razor-qt': desktop_conf = configparser.ConfigParser() # Development version desktop_conf_file = os.path.join( get_config_dir('razor')[0], 'desktop.conf') if os.path.isfile(desktop_conf_file): config_option = r'screens\1\desktops\1\wallpaper' else: desktop_conf_file = os.path.join( os.path.expanduser('~'), '.razor/desktop.conf') config_option = r'desktops\1\wallpaper' desktop_conf.read(os.path.join(desktop_conf_file)) try: if desktop_conf.has_option('razor', config_option): desktop_conf.set('razor', config_option, image) with codecs.open(desktop_conf_file, 'w', encoding='utf-8', errors='replace') as f: desktop_conf.write(f) except: pass elif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']: try: args = ['feh', '--bg-scale', image] sp.Popen(args) except: sys.stderr.write('Error: Failed to set wallpaper with feh!') sys.stderr.write('Please make sre that You have feh installed.') elif desktop_env == 'icewm': args = ['icewmbg', image] sp.Popen(args) elif desktop_env == 'blackbox': args = ['bsetbg', '-full', image] sp.Popen(args) elif desktop_env == 'lxde': args = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image sp.Popen(args, shell=True) elif desktop_env == 'lxqt': args = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image sp.Popen(args, shell=True) elif desktop_env == 'windowmaker': args = 'wmsetbg -s -u %s' % image sp.Popen(args, shell=True) elif desktop_env == 'enlightenment': args = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image sp.Popen(args, shell=True) elif desktop_env == 'awesome': with sp.Popen("awesome-client", stdin=sp.PIPE) as awesome_client: command = ('local gears = require("gears"); for s = 1,' ' screen.count() do gears.wallpaper.maximized' '("%s", s, true); end;') % image awesome_client.communicate(input=bytes(command, 'UTF-8')) elif desktop_env == 'windows': WINDOWS_SCRIPT = dedent(''' reg add "HKEY_CURRENT_USER\Control Panel\Desktop" \ /v Wallpaper /t REG_SZ /d %s /f rundll32.exe user32.dll,UpdatePerUserSystemParameters ''') % image windows_script_file = os.path.join( tempfile.gettempdir(), 'wallscript.bat') with open(windows_script_file, 'w') as f: f.write(WINDOWS_SCRIPT) sp.Popen([windows_script_file], shell=True) # Sometimes the method above works # and sometimes the one below SPI_SETDESKWALLPAPER = 20 ctypes.windll.user32.SystemParametersInfoA( SPI_SETDESKWALLPAPER, 0, image, 0) elif desktop_env == 'mac': try: from appscript import app, mactypes app('Finder').desktop_picture.set(mactypes.File(image)) except ImportError: OSX_SCRIPT = dedent( '''tell application "System Events" set desktopCount to count of desktops repeat with desktopNumber from 1 to desktopCount tell desktop desktopNumber set picture to POSIX file "%s" end tell end repeat end tell''') % image sp.Popen(['osascript', OSX_SCRIPT]) else: try: sp.Popen(['feh', '--bg-scale', image]) # feh is nearly a catch-all for Linux WMs except: pass
[ "def", "set_wallpaper", "(", "image", ")", ":", "desktop_env", "=", "system", ".", "get_name", "(", ")", "if", "desktop_env", "in", "[", "'gnome'", ",", "'unity'", ",", "'cinnamon'", ",", "'pantheon'", ",", "'mate'", "]", ":", "uri", "=", "'file://%s'", "%", "image", "SCHEMA", "=", "'org.gnome.desktop.background'", "KEY", "=", "'picture-uri'", "if", "desktop_env", "==", "'mate'", ":", "uri", "=", "image", "SCHEMA", "=", "'org.mate.background'", "KEY", "=", "'picture-filename'", "try", ":", "from", "gi", ".", "repository", "import", "Gio", "gsettings", "=", "Gio", ".", "Settings", ".", "new", "(", "SCHEMA", ")", "gsettings", ".", "set_string", "(", "KEY", ",", "uri", ")", "except", "ImportError", ":", "try", ":", "gsettings_proc", "=", "sp", ".", "Popen", "(", "[", "'gsettings'", ",", "'set'", ",", "SCHEMA", ",", "KEY", ",", "uri", "]", ")", "except", ":", "# MATE < 1.6", "sp", ".", "Popen", "(", "[", "'mateconftool-2'", ",", "'-t'", ",", "'string'", ",", "'--set'", ",", "'/desktop/mate/background/picture_filename'", ",", "'%s'", "%", "image", "]", ",", "stdout", "=", "sp", ".", "PIPE", ")", "finally", ":", "gsettings_proc", ".", "communicate", "(", ")", "if", "gsettings_proc", ".", "returncode", "!=", "0", ":", "sp", ".", "Popen", "(", "[", "'mateconftool-2'", ",", "'-t'", ",", "'string'", ",", "'--set'", ",", "'/desktop/mate/background/picture_filename'", ",", "'%s'", "%", "image", "]", ")", "elif", "desktop_env", "==", "'gnome2'", ":", "sp", ".", "Popen", "(", "[", "'gconftool-2'", ",", "'-t'", ",", "'string'", ",", "'--set'", ",", "'/desktop/gnome/background/picture_filename'", ",", "image", "]", ")", "elif", "desktop_env", "==", "'kde'", ":", "# This probably only works in Plasma 5+", "kde_script", "=", "dedent", "(", "'''\\\n\t\tvar Desktops = desktops();\n\t\tfor (i=0;i<Desktops.length;i++) {{\n\t\t\td = Desktops[i];\n\t\t\td.wallpaperPlugin = \"org.kde.image\";\n\t\t\td.currentConfigGroup = Array(\"Wallpaper\",\n\t\t\t\t\t\t\t\t\t\t\"org.kde.image\",\n\t\t\t\t\t\t\t\t\t\t\"General\");\n\t\t\td.writeConfig(\"Image\", \"file://{}\")\n\t\t}}\n\t\t'''", ")", ".", "format", "(", "image", ")", "sp", ".", "Popen", "(", "[", "'dbus-send'", ",", "'--session'", ",", "'--dest=org.kde.plasmashell'", ",", "'--type=method_call'", ",", "'/PlasmaShell'", ",", "'org.kde.PlasmaShell.evaluateScript'", ",", "'string:{}'", ".", "format", "(", "kde_script", ")", "]", ")", "elif", "desktop_env", "in", "[", "'kde3'", ",", "'trinity'", "]", ":", "args", "=", "'dcop kdesktop KBackgroundIface setWallpaper 0 \"%s\" 6'", "%", "image", "sp", ".", "Popen", "(", "args", ",", "shell", "=", "True", ")", "elif", "desktop_env", "==", "'xfce4'", ":", "# XFCE4's image property is not image-path but last-image (What?)", "list_of_properties", "=", "system", ".", "get_cmd_out", "(", "[", "'xfconf-query'", ",", "'-R'", ",", "'-l'", ",", "'-c'", ",", "'xfce4-desktop'", ",", "'-p'", ",", "'/backdrop'", "]", ")", "for", "i", "in", "list_of_properties", ".", "split", "(", "'\\n'", ")", ":", "if", "i", ".", "endswith", "(", "'last-image'", ")", ":", "# The property given is a background property", "sp", ".", "Popen", "(", "[", "'xfconf-query -c xfce4-desktop -p %s -s \"%s\"'", "%", "(", "i", ",", "image", ")", "]", ",", "shell", "=", "True", ")", "sp", ".", "Popen", "(", "[", "'xfdesktop --reload'", "]", ",", "shell", "=", "True", ")", "elif", "desktop_env", "==", "'razor-qt'", ":", "desktop_conf", "=", "configparser", ".", "ConfigParser", "(", ")", "# Development version", "desktop_conf_file", "=", "os", ".", "path", ".", "join", "(", "get_config_dir", "(", "'razor'", ")", "[", "0", "]", ",", "'desktop.conf'", ")", "if", "os", ".", "path", ".", "isfile", "(", "desktop_conf_file", ")", ":", "config_option", "=", "r'screens\\1\\desktops\\1\\wallpaper'", "else", ":", "desktop_conf_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "'.razor/desktop.conf'", ")", "config_option", "=", "r'desktops\\1\\wallpaper'", "desktop_conf", ".", "read", "(", "os", ".", "path", ".", "join", "(", "desktop_conf_file", ")", ")", "try", ":", "if", "desktop_conf", ".", "has_option", "(", "'razor'", ",", "config_option", ")", ":", "desktop_conf", ".", "set", "(", "'razor'", ",", "config_option", ",", "image", ")", "with", "codecs", ".", "open", "(", "desktop_conf_file", ",", "'w'", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'replace'", ")", "as", "f", ":", "desktop_conf", ".", "write", "(", "f", ")", "except", ":", "pass", "elif", "desktop_env", "in", "[", "'fluxbox'", ",", "'jwm'", ",", "'openbox'", ",", "'afterstep'", ",", "'i3'", "]", ":", "try", ":", "args", "=", "[", "'feh'", ",", "'--bg-scale'", ",", "image", "]", "sp", ".", "Popen", "(", "args", ")", "except", ":", "sys", ".", "stderr", ".", "write", "(", "'Error: Failed to set wallpaper with feh!'", ")", "sys", ".", "stderr", ".", "write", "(", "'Please make sre that You have feh installed.'", ")", "elif", "desktop_env", "==", "'icewm'", ":", "args", "=", "[", "'icewmbg'", ",", "image", "]", "sp", ".", "Popen", "(", "args", ")", "elif", "desktop_env", "==", "'blackbox'", ":", "args", "=", "[", "'bsetbg'", ",", "'-full'", ",", "image", "]", "sp", ".", "Popen", "(", "args", ")", "elif", "desktop_env", "==", "'lxde'", ":", "args", "=", "'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled'", "%", "image", "sp", ".", "Popen", "(", "args", ",", "shell", "=", "True", ")", "elif", "desktop_env", "==", "'lxqt'", ":", "args", "=", "'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled'", "%", "image", "sp", ".", "Popen", "(", "args", ",", "shell", "=", "True", ")", "elif", "desktop_env", "==", "'windowmaker'", ":", "args", "=", "'wmsetbg -s -u %s'", "%", "image", "sp", ".", "Popen", "(", "args", ",", "shell", "=", "True", ")", "elif", "desktop_env", "==", "'enlightenment'", ":", "args", "=", "'enlightenment_remote -desktop-bg-add 0 0 0 0 %s'", "%", "image", "sp", ".", "Popen", "(", "args", ",", "shell", "=", "True", ")", "elif", "desktop_env", "==", "'awesome'", ":", "with", "sp", ".", "Popen", "(", "\"awesome-client\"", ",", "stdin", "=", "sp", ".", "PIPE", ")", "as", "awesome_client", ":", "command", "=", "(", "'local gears = require(\"gears\"); for s = 1,'", "' screen.count() do gears.wallpaper.maximized'", "'(\"%s\", s, true); end;'", ")", "%", "image", "awesome_client", ".", "communicate", "(", "input", "=", "bytes", "(", "command", ",", "'UTF-8'", ")", ")", "elif", "desktop_env", "==", "'windows'", ":", "WINDOWS_SCRIPT", "=", "dedent", "(", "'''\n\t\t\treg add \"HKEY_CURRENT_USER\\Control Panel\\Desktop\" \\\n\t\t\t/v Wallpaper /t REG_SZ /d %s /f\n\n\t\t\trundll32.exe user32.dll,UpdatePerUserSystemParameters\n\t\t\t'''", ")", "%", "image", "windows_script_file", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "'wallscript.bat'", ")", "with", "open", "(", "windows_script_file", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "WINDOWS_SCRIPT", ")", "sp", ".", "Popen", "(", "[", "windows_script_file", "]", ",", "shell", "=", "True", ")", "# Sometimes the method above works", "# and sometimes the one below", "SPI_SETDESKWALLPAPER", "=", "20", "ctypes", ".", "windll", ".", "user32", ".", "SystemParametersInfoA", "(", "SPI_SETDESKWALLPAPER", ",", "0", ",", "image", ",", "0", ")", "elif", "desktop_env", "==", "'mac'", ":", "try", ":", "from", "appscript", "import", "app", ",", "mactypes", "app", "(", "'Finder'", ")", ".", "desktop_picture", ".", "set", "(", "mactypes", ".", "File", "(", "image", ")", ")", "except", "ImportError", ":", "OSX_SCRIPT", "=", "dedent", "(", "'''tell application \"System Events\"\n\t\t\t\t\t set desktopCount to count of desktops\n\t\t\t\t\t\t\t repeat with desktopNumber from 1 to desktopCount\n\t\t\t\t\t\t\t tell desktop desktopNumber\n\t\t\t\t\t\t\t\t set picture to POSIX file \"%s\"\n\t\t\t\t\t\t\t end tell\n\t\t\t\t\t\t\t end repeat\n\t\t\t\t end tell'''", ")", "%", "image", "sp", ".", "Popen", "(", "[", "'osascript'", ",", "OSX_SCRIPT", "]", ")", "else", ":", "try", ":", "sp", ".", "Popen", "(", "[", "'feh'", ",", "'--bg-scale'", ",", "image", "]", ")", "# feh is nearly a catch-all for Linux WMs", "except", ":", "pass" ]
Set the desktop wallpaper. Sets the desktop wallpaper to an image. Args: image (str): The path to the image to be set as wallpaper.
[ "Set", "the", "desktop", "wallpaper", "." ]
python
train
25.166667
gbowerman/azurerm
azurerm/networkrp.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/networkrp.py#L643-L657
def list_vnets(access_token, subscription_id): '''List the VNETs in a subscription . Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of VNets list with properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', '/virtualNetworks?api-version=', NETWORK_API]) return do_get(endpoint, access_token)
[ "def", "list_vnets", "(", "access_token", ",", "subscription_id", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/providers/Microsoft.Network/'", ",", "'/virtualNetworks?api-version='", ",", "NETWORK_API", "]", ")", "return", "do_get", "(", "endpoint", ",", "access_token", ")" ]
List the VNETs in a subscription . Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of VNets list with properties.
[ "List", "the", "VNETs", "in", "a", "subscription", "." ]
python
train
37.466667
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/location/location_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/location/location_client.py#L72-L90
def get_resource_area_by_host(self, area_id, host_id): """GetResourceAreaByHost. [Preview API] :param str area_id: :param str host_id: :rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>` """ route_values = {} if area_id is not None: route_values['areaId'] = self._serialize.url('area_id', area_id, 'str') query_parameters = {} if host_id is not None: query_parameters['hostId'] = self._serialize.query('host_id', host_id, 'str') response = self._send(http_method='GET', location_id='e81700f7-3be2-46de-8624-2eb35882fcaa', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ResourceAreaInfo', response)
[ "def", "get_resource_area_by_host", "(", "self", ",", "area_id", ",", "host_id", ")", ":", "route_values", "=", "{", "}", "if", "area_id", "is", "not", "None", ":", "route_values", "[", "'areaId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'area_id'", ",", "area_id", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "host_id", "is", "not", "None", ":", "query_parameters", "[", "'hostId'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'host_id'", ",", "host_id", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'e81700f7-3be2-46de-8624-2eb35882fcaa'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'ResourceAreaInfo'", ",", "response", ")" ]
GetResourceAreaByHost. [Preview API] :param str area_id: :param str host_id: :rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>`
[ "GetResourceAreaByHost", ".", "[", "Preview", "API", "]", ":", "param", "str", "area_id", ":", ":", "param", "str", "host_id", ":", ":", "rtype", ":", ":", "class", ":", "<ResourceAreaInfo", ">", "<azure", ".", "devops", ".", "v5_0", ".", "location", ".", "models", ".", "ResourceAreaInfo", ">" ]
python
train
48.526316
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L2811-L2814
def do_quit(self, _: argparse.Namespace) -> bool: """Exit this application""" self._should_quit = True return self._STOP_AND_EXIT
[ "def", "do_quit", "(", "self", ",", "_", ":", "argparse", ".", "Namespace", ")", "->", "bool", ":", "self", ".", "_should_quit", "=", "True", "return", "self", ".", "_STOP_AND_EXIT" ]
Exit this application
[ "Exit", "this", "application" ]
python
train
37.5
observerss/yamo
yamo/document.py
https://github.com/observerss/yamo/blob/ef0ab1ab7be2ecbc452d55ac9b367eb4c0d88646/yamo/document.py#L253-L263
def cached(cls, timeout=60, cache_none=False): """ Cache queries :param timeout: cache timeout :param cache_none: cache None result Usage:: >>> Model.cached(60).query({...}) """ return CachedModel(cls=cls, timeout=timeout, cache_none=cache_none)
[ "def", "cached", "(", "cls", ",", "timeout", "=", "60", ",", "cache_none", "=", "False", ")", ":", "return", "CachedModel", "(", "cls", "=", "cls", ",", "timeout", "=", "timeout", ",", "cache_none", "=", "cache_none", ")" ]
Cache queries :param timeout: cache timeout :param cache_none: cache None result Usage:: >>> Model.cached(60).query({...})
[ "Cache", "queries" ]
python
train
26.727273
radjkarl/imgProcessor
imgProcessor/camera/CameraCalibration.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L178-L185
def setCamera(self, camera_name, bit_depth=16): ''' Args: camera_name (str): Name of the camera bit_depth (int): depth (bit) of the camera sensor ''' self.coeffs['name'] = camera_name self.coeffs['depth'] = bit_depth
[ "def", "setCamera", "(", "self", ",", "camera_name", ",", "bit_depth", "=", "16", ")", ":", "self", ".", "coeffs", "[", "'name'", "]", "=", "camera_name", "self", ".", "coeffs", "[", "'depth'", "]", "=", "bit_depth" ]
Args: camera_name (str): Name of the camera bit_depth (int): depth (bit) of the camera sensor
[ "Args", ":", "camera_name", "(", "str", ")", ":", "Name", "of", "the", "camera", "bit_depth", "(", "int", ")", ":", "depth", "(", "bit", ")", "of", "the", "camera", "sensor" ]
python
train
35
silenc3r/dikicli
dikicli/core.py
https://github.com/silenc3r/dikicli/blob/53721cdf75db04e2edca5ed3f99beae7c079d980/dikicli/core.py#L239-L266
def _cache_lookup(word, data_dir, native=False): """Checks if word is in cache. Parameters ---------- word : str Word to check in cache. data_dir : pathlib.Path Cache directory location. Returns ------- translation : str or None Translation of given word. """ trans_dir = "translations" if native: trans_dir += "_native" logger.debug("Cache lookup: %s", word) filename = data_dir.joinpath(trans_dir, "{}.html".format(word)) if filename.is_file(): with open(filename, mode="r") as f: logger.debug("Cache found: %s", word) # TODO: not sure if we should parse data here translation = _parse_cached(f.read()) return translation logger.debug("Cache miss: %s", word) return None
[ "def", "_cache_lookup", "(", "word", ",", "data_dir", ",", "native", "=", "False", ")", ":", "trans_dir", "=", "\"translations\"", "if", "native", ":", "trans_dir", "+=", "\"_native\"", "logger", ".", "debug", "(", "\"Cache lookup: %s\"", ",", "word", ")", "filename", "=", "data_dir", ".", "joinpath", "(", "trans_dir", ",", "\"{}.html\"", ".", "format", "(", "word", ")", ")", "if", "filename", ".", "is_file", "(", ")", ":", "with", "open", "(", "filename", ",", "mode", "=", "\"r\"", ")", "as", "f", ":", "logger", ".", "debug", "(", "\"Cache found: %s\"", ",", "word", ")", "# TODO: not sure if we should parse data here", "translation", "=", "_parse_cached", "(", "f", ".", "read", "(", ")", ")", "return", "translation", "logger", ".", "debug", "(", "\"Cache miss: %s\"", ",", "word", ")", "return", "None" ]
Checks if word is in cache. Parameters ---------- word : str Word to check in cache. data_dir : pathlib.Path Cache directory location. Returns ------- translation : str or None Translation of given word.
[ "Checks", "if", "word", "is", "in", "cache", "." ]
python
train
28.535714
jtwhite79/pyemu
pyemu/pst/pst_handler.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L2543-L2559
def less_than_obs_constraints(self): """get the names of the observations that are listed as less than inequality constraints. Zero- weighted obs are skipped Returns ------- pandas.Series : obsnme of obseravtions that are non-zero weighted less than constraints """ obs = self.observation_data lt_obs = obs.loc[obs.apply(lambda x: self._is_less_const(x.obgnme) \ and x.weight != 0.0,axis=1),"obsnme"] return lt_obs
[ "def", "less_than_obs_constraints", "(", "self", ")", ":", "obs", "=", "self", ".", "observation_data", "lt_obs", "=", "obs", ".", "loc", "[", "obs", ".", "apply", "(", "lambda", "x", ":", "self", ".", "_is_less_const", "(", "x", ".", "obgnme", ")", "and", "x", ".", "weight", "!=", "0.0", ",", "axis", "=", "1", ")", ",", "\"obsnme\"", "]", "return", "lt_obs" ]
get the names of the observations that are listed as less than inequality constraints. Zero- weighted obs are skipped Returns ------- pandas.Series : obsnme of obseravtions that are non-zero weighted less than constraints
[ "get", "the", "names", "of", "the", "observations", "that", "are", "listed", "as", "less", "than", "inequality", "constraints", ".", "Zero", "-", "weighted", "obs", "are", "skipped" ]
python
train
32.470588
bugra/angel-list
angel/angel.py
https://github.com/bugra/angel-list/blob/75ac453e873727675ba18e1f45b5bc0cfda26fd7/angel/angel.py#L465-L472
def get_review_id(self, id_): """ Get a particular review id, independent from the user_id and startup_id """ return _get_request(_REVIEW_ID.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, at=self.access_token))
[ "def", "get_review_id", "(", "self", ",", "id_", ")", ":", "return", "_get_request", "(", "_REVIEW_ID", ".", "format", "(", "c_api", "=", "_C_API_BEGINNING", ",", "api", "=", "_API_VERSION", ",", "id_", "=", "id_", ",", "at", "=", "self", ".", "access_token", ")", ")" ]
Get a particular review id, independent from the user_id and startup_id
[ "Get", "a", "particular", "review", "id", "independent", "from", "the", "user_id", "and", "startup_id" ]
python
train
52
juiceinc/recipe
recipe/shelf.py
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L669-L702
def find(self, obj, filter_to_class=Ingredient, constructor=None): """ Find an Ingredient, optionally using the shelf. :param obj: A string or Ingredient :param filter_to_class: The Ingredient subclass that obj must be an instance of :param constructor: An optional callable for building Ingredients from obj :return: An Ingredient of subclass `filter_to_class` """ if callable(constructor): obj = constructor(obj, shelf=self) if isinstance(obj, basestring): set_descending = obj.startswith('-') if set_descending: obj = obj[1:] if obj not in self: raise BadRecipe("{} doesn't exist on the shelf".format(obj)) ingredient = self[obj] if not isinstance(ingredient, filter_to_class): raise BadRecipe('{} is not a {}'.format(obj, filter_to_class)) if set_descending: ingredient.ordering = 'desc' return ingredient elif isinstance(obj, filter_to_class): return obj else: raise BadRecipe('{} is not a {}'.format(obj, filter_to_class))
[ "def", "find", "(", "self", ",", "obj", ",", "filter_to_class", "=", "Ingredient", ",", "constructor", "=", "None", ")", ":", "if", "callable", "(", "constructor", ")", ":", "obj", "=", "constructor", "(", "obj", ",", "shelf", "=", "self", ")", "if", "isinstance", "(", "obj", ",", "basestring", ")", ":", "set_descending", "=", "obj", ".", "startswith", "(", "'-'", ")", "if", "set_descending", ":", "obj", "=", "obj", "[", "1", ":", "]", "if", "obj", "not", "in", "self", ":", "raise", "BadRecipe", "(", "\"{} doesn't exist on the shelf\"", ".", "format", "(", "obj", ")", ")", "ingredient", "=", "self", "[", "obj", "]", "if", "not", "isinstance", "(", "ingredient", ",", "filter_to_class", ")", ":", "raise", "BadRecipe", "(", "'{} is not a {}'", ".", "format", "(", "obj", ",", "filter_to_class", ")", ")", "if", "set_descending", ":", "ingredient", ".", "ordering", "=", "'desc'", "return", "ingredient", "elif", "isinstance", "(", "obj", ",", "filter_to_class", ")", ":", "return", "obj", "else", ":", "raise", "BadRecipe", "(", "'{} is not a {}'", ".", "format", "(", "obj", ",", "filter_to_class", ")", ")" ]
Find an Ingredient, optionally using the shelf. :param obj: A string or Ingredient :param filter_to_class: The Ingredient subclass that obj must be an instance of :param constructor: An optional callable for building Ingredients from obj :return: An Ingredient of subclass `filter_to_class`
[ "Find", "an", "Ingredient", "optionally", "using", "the", "shelf", "." ]
python
train
35.029412
hydpy-dev/hydpy
hydpy/core/netcdftools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/netcdftools.py#L1396-L1399
def subdevicenames(self) -> Tuple[str, ...]: """A |tuple| containing the device names.""" self: NetCDFVariableBase return tuple(self.sequences.keys())
[ "def", "subdevicenames", "(", "self", ")", "->", "Tuple", "[", "str", ",", "...", "]", ":", "self", ":", "NetCDFVariableBase", "return", "tuple", "(", "self", ".", "sequences", ".", "keys", "(", ")", ")" ]
A |tuple| containing the device names.
[ "A", "|tuple|", "containing", "the", "device", "names", "." ]
python
train
42.75
cga-harvard/Hypermap-Registry
hypermap/search/pycsw_plugin.py
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search/pycsw_plugin.py#L190-L194
def query_source(self, source): """ Query by source """ return self._get_repo_filter(Layer.objects).filter(url=source)
[ "def", "query_source", "(", "self", ",", "source", ")", ":", "return", "self", ".", "_get_repo_filter", "(", "Layer", ".", "objects", ")", ".", "filter", "(", "url", "=", "source", ")" ]
Query by source
[ "Query", "by", "source" ]
python
train
29.2
elastic/elasticsearch-dsl-py
examples/alias_migration.py
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/examples/alias_migration.py#L70-L106
def migrate(move_data=True, update_alias=True): """ Upgrade function that creates a new index for the data. Optionally it also can (and by default will) reindex previous copy of the data into the new index (specify ``move_data=False`` to skip this step) and update the alias to point to the latest index (set ``update_alias=False`` to skip). Note that while this function is running the application can still perform any and all searches without any loss of functionality. It should, however, not perform any writes at this time as those might be lost. """ # construct a new index name by appending current timestamp next_index = PATTERN.replace('*', datetime.now().strftime('%Y%m%d%H%M%S%f')) # get the low level connection es = connections.get_connection() # create new index, it will use the settings from the template es.indices.create(index=next_index) if move_data: # move data from current alias to the new index es.reindex( body={"source": {"index": ALIAS}, "dest": {"index": next_index}}, request_timeout=3600 ) # refresh the index to make the changes visible es.indices.refresh(index=next_index) if update_alias: # repoint the alias to point to the newly created index es.indices.update_aliases(body={ 'actions': [ {"remove": {"alias": ALIAS, "index": PATTERN}}, {"add": {"alias": ALIAS, "index": next_index}}, ] })
[ "def", "migrate", "(", "move_data", "=", "True", ",", "update_alias", "=", "True", ")", ":", "# construct a new index name by appending current timestamp", "next_index", "=", "PATTERN", ".", "replace", "(", "'*'", ",", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y%m%d%H%M%S%f'", ")", ")", "# get the low level connection", "es", "=", "connections", ".", "get_connection", "(", ")", "# create new index, it will use the settings from the template", "es", ".", "indices", ".", "create", "(", "index", "=", "next_index", ")", "if", "move_data", ":", "# move data from current alias to the new index", "es", ".", "reindex", "(", "body", "=", "{", "\"source\"", ":", "{", "\"index\"", ":", "ALIAS", "}", ",", "\"dest\"", ":", "{", "\"index\"", ":", "next_index", "}", "}", ",", "request_timeout", "=", "3600", ")", "# refresh the index to make the changes visible", "es", ".", "indices", ".", "refresh", "(", "index", "=", "next_index", ")", "if", "update_alias", ":", "# repoint the alias to point to the newly created index", "es", ".", "indices", ".", "update_aliases", "(", "body", "=", "{", "'actions'", ":", "[", "{", "\"remove\"", ":", "{", "\"alias\"", ":", "ALIAS", ",", "\"index\"", ":", "PATTERN", "}", "}", ",", "{", "\"add\"", ":", "{", "\"alias\"", ":", "ALIAS", ",", "\"index\"", ":", "next_index", "}", "}", ",", "]", "}", ")" ]
Upgrade function that creates a new index for the data. Optionally it also can (and by default will) reindex previous copy of the data into the new index (specify ``move_data=False`` to skip this step) and update the alias to point to the latest index (set ``update_alias=False`` to skip). Note that while this function is running the application can still perform any and all searches without any loss of functionality. It should, however, not perform any writes at this time as those might be lost.
[ "Upgrade", "function", "that", "creates", "a", "new", "index", "for", "the", "data", ".", "Optionally", "it", "also", "can", "(", "and", "by", "default", "will", ")", "reindex", "previous", "copy", "of", "the", "data", "into", "the", "new", "index", "(", "specify", "move_data", "=", "False", "to", "skip", "this", "step", ")", "and", "update", "the", "alias", "to", "point", "to", "the", "latest", "index", "(", "set", "update_alias", "=", "False", "to", "skip", ")", "." ]
python
train
40.702703
ceph/ceph-deploy
ceph_deploy/hosts/remotes.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L14-L50
def platform_information(_linux_distribution=None): """ detect platform information from remote host """ linux_distribution = _linux_distribution or platform.linux_distribution distro, release, codename = linux_distribution() if not distro: distro, release, codename = parse_os_release() if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian debian_codenames = { '10': 'buster', '9': 'stretch', '8': 'jessie', '7': 'wheezy', '6': 'squeeze', } major_version = release.split('.')[0] codename = debian_codenames.get(major_version, '') # In order to support newer jessie/sid or wheezy/sid strings we test this # if sid is buried in the minor, we should use sid anyway. if not codename and '/' in release: major, minor = release.split('/') if minor == 'sid': codename = minor else: codename = major if not codename and 'oracle' in distro.lower(): # this could be an empty string in Oracle linux codename = 'oracle' if not codename and 'virtuozzo linux' in distro.lower(): # this could be an empty string in Virtuozzo linux codename = 'virtuozzo' if not codename and 'arch' in distro.lower(): # this could be an empty string in Arch linux codename = 'arch' return ( str(distro).rstrip(), str(release).rstrip(), str(codename).rstrip() )
[ "def", "platform_information", "(", "_linux_distribution", "=", "None", ")", ":", "linux_distribution", "=", "_linux_distribution", "or", "platform", ".", "linux_distribution", "distro", ",", "release", ",", "codename", "=", "linux_distribution", "(", ")", "if", "not", "distro", ":", "distro", ",", "release", ",", "codename", "=", "parse_os_release", "(", ")", "if", "not", "codename", "and", "'debian'", "in", "distro", ".", "lower", "(", ")", ":", "# this could be an empty string in Debian", "debian_codenames", "=", "{", "'10'", ":", "'buster'", ",", "'9'", ":", "'stretch'", ",", "'8'", ":", "'jessie'", ",", "'7'", ":", "'wheezy'", ",", "'6'", ":", "'squeeze'", ",", "}", "major_version", "=", "release", ".", "split", "(", "'.'", ")", "[", "0", "]", "codename", "=", "debian_codenames", ".", "get", "(", "major_version", ",", "''", ")", "# In order to support newer jessie/sid or wheezy/sid strings we test this", "# if sid is buried in the minor, we should use sid anyway.", "if", "not", "codename", "and", "'/'", "in", "release", ":", "major", ",", "minor", "=", "release", ".", "split", "(", "'/'", ")", "if", "minor", "==", "'sid'", ":", "codename", "=", "minor", "else", ":", "codename", "=", "major", "if", "not", "codename", "and", "'oracle'", "in", "distro", ".", "lower", "(", ")", ":", "# this could be an empty string in Oracle linux", "codename", "=", "'oracle'", "if", "not", "codename", "and", "'virtuozzo linux'", "in", "distro", ".", "lower", "(", ")", ":", "# this could be an empty string in Virtuozzo linux", "codename", "=", "'virtuozzo'", "if", "not", "codename", "and", "'arch'", "in", "distro", ".", "lower", "(", ")", ":", "# this could be an empty string in Arch linux", "codename", "=", "'arch'", "return", "(", "str", "(", "distro", ")", ".", "rstrip", "(", ")", ",", "str", "(", "release", ")", ".", "rstrip", "(", ")", ",", "str", "(", "codename", ")", ".", "rstrip", "(", ")", ")" ]
detect platform information from remote host
[ "detect", "platform", "information", "from", "remote", "host" ]
python
train
40.945946
HewlettPackard/python-hpOneView
hpOneView/resources/settings/backups.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/settings/backups.py#L141-L155
def update_remote_archive(self, save_uri, timeout=-1): """ Saves a backup of the appliance to a previously-configured remote location. Args: save_uri (dict): The URI for saving the backup to a previously configured location. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Backup details. """ return self._client.update_with_zero_body(uri=save_uri, timeout=timeout)
[ "def", "update_remote_archive", "(", "self", ",", "save_uri", ",", "timeout", "=", "-", "1", ")", ":", "return", "self", ".", "_client", ".", "update_with_zero_body", "(", "uri", "=", "save_uri", ",", "timeout", "=", "timeout", ")" ]
Saves a backup of the appliance to a previously-configured remote location. Args: save_uri (dict): The URI for saving the backup to a previously configured location. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Backup details.
[ "Saves", "a", "backup", "of", "the", "appliance", "to", "a", "previously", "-", "configured", "remote", "location", "." ]
python
train
39.6
gwpy/gwpy
gwpy/io/gwf.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/gwf.py#L99-L127
def write_frames(filename, frames, compression=257, compression_level=6): """Write a list of frame objects to a file **Requires:** |LDAStools.frameCPP|_ Parameters ---------- filename : `str` path to write into frames : `list` of `LDAStools.frameCPP.FrameH` list of frames to write into file compression : `int`, optional enum value for compression scheme, default is ``GZIP`` compression_level : `int`, optional compression level for given scheme """ from LDAStools import frameCPP # open stream stream = open_gwf(filename, 'w') # write frames one-by-one if isinstance(frames, frameCPP.FrameH): frames = [frames] for frame in frames: stream.WriteFrame(frame, compression, compression_level)
[ "def", "write_frames", "(", "filename", ",", "frames", ",", "compression", "=", "257", ",", "compression_level", "=", "6", ")", ":", "from", "LDAStools", "import", "frameCPP", "# open stream", "stream", "=", "open_gwf", "(", "filename", ",", "'w'", ")", "# write frames one-by-one", "if", "isinstance", "(", "frames", ",", "frameCPP", ".", "FrameH", ")", ":", "frames", "=", "[", "frames", "]", "for", "frame", "in", "frames", ":", "stream", ".", "WriteFrame", "(", "frame", ",", "compression", ",", "compression_level", ")" ]
Write a list of frame objects to a file **Requires:** |LDAStools.frameCPP|_ Parameters ---------- filename : `str` path to write into frames : `list` of `LDAStools.frameCPP.FrameH` list of frames to write into file compression : `int`, optional enum value for compression scheme, default is ``GZIP`` compression_level : `int`, optional compression level for given scheme
[ "Write", "a", "list", "of", "frame", "objects", "to", "a", "file" ]
python
train
26.827586
PyThaiNLP/pythainlp
pythainlp/spell/pn.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/spell/pn.py#L138-L156
def spell(self, word: str) -> List[str]: """ Return a list of possible words, according to edit distance of 1 and 2, sorted by frequency of word occurrance in the spelling dictionary :param str word: A word to check its spelling """ if not word: return "" candidates = ( self.known([word]) or self.known(_edits1(word)) or self.known(_edits2(word)) or [word] ) candidates.sort(key=self.freq, reverse=True) return candidates
[ "def", "spell", "(", "self", ",", "word", ":", "str", ")", "->", "List", "[", "str", "]", ":", "if", "not", "word", ":", "return", "\"\"", "candidates", "=", "(", "self", ".", "known", "(", "[", "word", "]", ")", "or", "self", ".", "known", "(", "_edits1", "(", "word", ")", ")", "or", "self", ".", "known", "(", "_edits2", "(", "word", ")", ")", "or", "[", "word", "]", ")", "candidates", ".", "sort", "(", "key", "=", "self", ".", "freq", ",", "reverse", "=", "True", ")", "return", "candidates" ]
Return a list of possible words, according to edit distance of 1 and 2, sorted by frequency of word occurrance in the spelling dictionary :param str word: A word to check its spelling
[ "Return", "a", "list", "of", "possible", "words", "according", "to", "edit", "distance", "of", "1", "and", "2", "sorted", "by", "frequency", "of", "word", "occurrance", "in", "the", "spelling", "dictionary" ]
python
train
28.789474
xhtml2pdf/xhtml2pdf
xhtml2pdf/reportlab_paragraph.py
https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/reportlab_paragraph.py#L1657-L1675
def getPlainText(self, identify=None): """ Convenience function for templates which want access to the raw text, without XML tags. """ frags = getattr(self, 'frags', None) if frags: plains = [] for frag in frags: if hasattr(frag, 'text'): plains.append(frag.text) return ''.join(plains) elif identify: text = getattr(self, 'text', None) if text is None: text = repr(self) return text else: return ''
[ "def", "getPlainText", "(", "self", ",", "identify", "=", "None", ")", ":", "frags", "=", "getattr", "(", "self", ",", "'frags'", ",", "None", ")", "if", "frags", ":", "plains", "=", "[", "]", "for", "frag", "in", "frags", ":", "if", "hasattr", "(", "frag", ",", "'text'", ")", ":", "plains", ".", "append", "(", "frag", ".", "text", ")", "return", "''", ".", "join", "(", "plains", ")", "elif", "identify", ":", "text", "=", "getattr", "(", "self", ",", "'text'", ",", "None", ")", "if", "text", "is", "None", ":", "text", "=", "repr", "(", "self", ")", "return", "text", "else", ":", "return", "''" ]
Convenience function for templates which want access to the raw text, without XML tags.
[ "Convenience", "function", "for", "templates", "which", "want", "access", "to", "the", "raw", "text", "without", "XML", "tags", "." ]
python
train
29.789474
pmacosta/ptrie
ptrie/ptrie.py
https://github.com/pmacosta/ptrie/blob/c176d3ee810b7b5243c7ff2bbf2f1af0b0fff2a8/ptrie/ptrie.py#L937-L975
def get_subtree(self, name): # noqa: D302 r""" Get all node names in a sub-tree. :param name: Sub-tree root node name :type name: :ref:`NodeName` :rtype: list of :ref:`NodeName` :raises: * RuntimeError (Argument \`name\` is not valid) * RuntimeError (Node *[name]* not in tree) Using the same example tree created in :py:meth:`ptrie.Trie.add_nodes`:: >>> from __future__ import print_function >>> import docs.support.ptrie_example, pprint >>> tobj = docs.support.ptrie_example.create_tree() >>> print(tobj) root ├branch1 (*) │├leaf1 ││└subleaf1 (*) │└leaf2 (*) │ └subleaf2 └branch2 >>> pprint.pprint(tobj.get_subtree('root.branch1')) ['root.branch1', 'root.branch1.leaf1', 'root.branch1.leaf1.subleaf1', 'root.branch1.leaf2', 'root.branch1.leaf2.subleaf2'] """ if self._validate_node_name(name): raise RuntimeError("Argument `name` is not valid") self._node_in_tree(name) return self._get_subtree(name)
[ "def", "get_subtree", "(", "self", ",", "name", ")", ":", "# noqa: D302", "if", "self", ".", "_validate_node_name", "(", "name", ")", ":", "raise", "RuntimeError", "(", "\"Argument `name` is not valid\"", ")", "self", ".", "_node_in_tree", "(", "name", ")", "return", "self", ".", "_get_subtree", "(", "name", ")" ]
r""" Get all node names in a sub-tree. :param name: Sub-tree root node name :type name: :ref:`NodeName` :rtype: list of :ref:`NodeName` :raises: * RuntimeError (Argument \`name\` is not valid) * RuntimeError (Node *[name]* not in tree) Using the same example tree created in :py:meth:`ptrie.Trie.add_nodes`:: >>> from __future__ import print_function >>> import docs.support.ptrie_example, pprint >>> tobj = docs.support.ptrie_example.create_tree() >>> print(tobj) root ├branch1 (*) │├leaf1 ││└subleaf1 (*) │└leaf2 (*) │ └subleaf2 └branch2 >>> pprint.pprint(tobj.get_subtree('root.branch1')) ['root.branch1', 'root.branch1.leaf1', 'root.branch1.leaf1.subleaf1', 'root.branch1.leaf2', 'root.branch1.leaf2.subleaf2']
[ "r", "Get", "all", "node", "names", "in", "a", "sub", "-", "tree", "." ]
python
train
30.923077
kelproject/pykube
pykube/config.py
https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L260-L269
def filename(self): """ Returns the provided data as a file location. """ if self._filename: return self._filename else: with tempfile.NamedTemporaryFile(delete=False) as f: f.write(self._bytes) return f.name
[ "def", "filename", "(", "self", ")", ":", "if", "self", ".", "_filename", ":", "return", "self", ".", "_filename", "else", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "as", "f", ":", "f", ".", "write", "(", "self", ".", "_bytes", ")", "return", "f", ".", "name" ]
Returns the provided data as a file location.
[ "Returns", "the", "provided", "data", "as", "a", "file", "location", "." ]
python
train
29.1
gwastro/pycbc-glue
pycbc_glue/ligolw/lsctables.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/lsctables.py#L4278-L4284
def apply_to_segmentlist(self, seglist): """ Apply our low and high windows to the segments in a segmentlist. """ for i, seg in enumerate(seglist): seglist[i] = seg.__class__(seg[0] - self.low_window, seg[1] + self.high_window)
[ "def", "apply_to_segmentlist", "(", "self", ",", "seglist", ")", ":", "for", "i", ",", "seg", "in", "enumerate", "(", "seglist", ")", ":", "seglist", "[", "i", "]", "=", "seg", ".", "__class__", "(", "seg", "[", "0", "]", "-", "self", ".", "low_window", ",", "seg", "[", "1", "]", "+", "self", ".", "high_window", ")" ]
Apply our low and high windows to the segments in a segmentlist.
[ "Apply", "our", "low", "and", "high", "windows", "to", "the", "segments", "in", "a", "segmentlist", "." ]
python
train
33.428571
ASKIDA/Selenium2LibraryExtension
src/Selenium2LibraryExtension/keywords/__init__.py
https://github.com/ASKIDA/Selenium2LibraryExtension/blob/5ca3fa776063c6046dff317cb2575e4772d7541f/src/Selenium2LibraryExtension/keywords/__init__.py#L332-L339
def element_focus_should_be_set(self, locator): """Verifies the element identified by `locator` has focus. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id |""" self._info("Verifying element '%s' focus is set" % locator) self._check_element_focus(True, locator)
[ "def", "element_focus_should_be_set", "(", "self", ",", "locator", ")", ":", "self", ".", "_info", "(", "\"Verifying element '%s' focus is set\"", "%", "locator", ")", "self", ".", "_check_element_focus", "(", "True", ",", "locator", ")" ]
Verifies the element identified by `locator` has focus. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id |
[ "Verifies", "the", "element", "identified", "by", "locator", "has", "focus", "." ]
python
train
38.75
iterative/dvc
dvc/version.py
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/version.py#L13-L27
def _generate_version(base_version): """Generate a version with information about the git repository""" pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) if not _is_git_repo(pkg_dir) or not _have_git(): return base_version if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir): return base_version return "{base_version}+{short_sha}{dirty}".format( base_version=base_version, short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6], dirty=".mod" if _is_dirty(pkg_dir) else "", )
[ "def", "_generate_version", "(", "base_version", ")", ":", "pkg_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ")", "if", "not", "_is_git_repo", "(", "pkg_dir", ")", "or", "not", "_have_git", "(", ")", ":", "return", "base_version", "if", "_is_release", "(", "pkg_dir", ",", "base_version", ")", "and", "not", "_is_dirty", "(", "pkg_dir", ")", ":", "return", "base_version", "return", "\"{base_version}+{short_sha}{dirty}\"", ".", "format", "(", "base_version", "=", "base_version", ",", "short_sha", "=", "_git_revision", "(", "pkg_dir", ")", ".", "decode", "(", "\"utf-8\"", ")", "[", "0", ":", "6", "]", ",", "dirty", "=", "\".mod\"", "if", "_is_dirty", "(", "pkg_dir", ")", "else", "\"\"", ",", ")" ]
Generate a version with information about the git repository
[ "Generate", "a", "version", "with", "information", "about", "the", "git", "repository" ]
python
train
37.333333
Azure/azure-sdk-for-python
azure-mgmt-reservations/azure/mgmt/reservations/operations/reservation_operations.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-reservations/azure/mgmt/reservations/operations/reservation_operations.py#L194-L243
def merge( self, reservation_order_id, sources=None, custom_headers=None, raw=False, polling=True, **operation_config): """Merges two `Reservation`s. Merge the specified `Reservation`s into a new `Reservation`. The two `Reservation`s being merged must have same properties. :param reservation_order_id: Order Id of the reservation :type reservation_order_id: str :param sources: Format of the resource id should be /providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId} :type sources: list[str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns list or ClientRawResponse<list> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.reservations.models.ReservationResponse]] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.reservations.models.ReservationResponse]]] :raises: :class:`ErrorException<azure.mgmt.reservations.models.ErrorException>` """ raw_result = self._merge_initial( reservation_order_id=reservation_order_id, sources=sources, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('[ReservationResponse]', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
[ "def", "merge", "(", "self", ",", "reservation_order_id", ",", "sources", "=", "None", ",", "custom_headers", "=", "None", ",", "raw", "=", "False", ",", "polling", "=", "True", ",", "*", "*", "operation_config", ")", ":", "raw_result", "=", "self", ".", "_merge_initial", "(", "reservation_order_id", "=", "reservation_order_id", ",", "sources", "=", "sources", ",", "custom_headers", "=", "custom_headers", ",", "raw", "=", "True", ",", "*", "*", "operation_config", ")", "def", "get_long_running_output", "(", "response", ")", ":", "deserialized", "=", "self", ".", "_deserialize", "(", "'[ReservationResponse]'", ",", "response", ")", "if", "raw", ":", "client_raw_response", "=", "ClientRawResponse", "(", "deserialized", ",", "response", ")", "return", "client_raw_response", "return", "deserialized", "lro_delay", "=", "operation_config", ".", "get", "(", "'long_running_operation_timeout'", ",", "self", ".", "config", ".", "long_running_operation_timeout", ")", "if", "polling", "is", "True", ":", "polling_method", "=", "ARMPolling", "(", "lro_delay", ",", "*", "*", "operation_config", ")", "elif", "polling", "is", "False", ":", "polling_method", "=", "NoPolling", "(", ")", "else", ":", "polling_method", "=", "polling", "return", "LROPoller", "(", "self", ".", "_client", ",", "raw_result", ",", "get_long_running_output", ",", "polling_method", ")" ]
Merges two `Reservation`s. Merge the specified `Reservation`s into a new `Reservation`. The two `Reservation`s being merged must have same properties. :param reservation_order_id: Order Id of the reservation :type reservation_order_id: str :param sources: Format of the resource id should be /providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId} :type sources: list[str] :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns list or ClientRawResponse<list> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.reservations.models.ReservationResponse]] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.reservations.models.ReservationResponse]]] :raises: :class:`ErrorException<azure.mgmt.reservations.models.ErrorException>`
[ "Merges", "two", "Reservation", "s", "." ]
python
test
47.68
elastic/apm-agent-python
elasticapm/instrumentation/packages/base.py
https://github.com/elastic/apm-agent-python/blob/2975663d7bd22282dc39336b2c37b37c12c7a774/elasticapm/instrumentation/packages/base.py#L143-L157
def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction): """ Method called for unsampled wrapped calls. This can e.g. be used to add traceparent headers to the underlying http call for HTTP instrumentations. :param module: :param method: :param wrapped: :param instance: :param args: :param kwargs: :param transaction: :return: """ return args, kwargs
[ "def", "mutate_unsampled_call_args", "(", "self", ",", "module", ",", "method", ",", "wrapped", ",", "instance", ",", "args", ",", "kwargs", ",", "transaction", ")", ":", "return", "args", ",", "kwargs" ]
Method called for unsampled wrapped calls. This can e.g. be used to add traceparent headers to the underlying http call for HTTP instrumentations. :param module: :param method: :param wrapped: :param instance: :param args: :param kwargs: :param transaction: :return:
[ "Method", "called", "for", "unsampled", "wrapped", "calls", ".", "This", "can", "e", ".", "g", ".", "be", "used", "to", "add", "traceparent", "headers", "to", "the", "underlying", "http", "call", "for", "HTTP", "instrumentations", "." ]
python
train
32.333333
Alignak-monitoring/alignak
alignak/daterange.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daterange.py#L993-L1042
def get_start_and_end_time(self, ref=None): """Specific function to get start time and end time for MonthDayDaterange :param ref: time in seconds :type ref: int :return: tuple with start and end time :rtype: tuple (int, int) """ now = time.localtime(ref) if self.syear == 0: self.syear = now.tm_year month_start_id = now.tm_mon day_start = find_day_by_offset(self.syear, month_start_id, self.smday) start_time = get_start_of_day(self.syear, month_start_id, day_start) if self.eyear == 0: self.eyear = now.tm_year month_end_id = now.tm_mon day_end = find_day_by_offset(self.eyear, month_end_id, self.emday) end_time = get_end_of_day(self.eyear, month_end_id, day_end) now_epoch = time.mktime(now) if start_time > end_time: month_start_id -= 1 if month_start_id < 1: month_start_id = 12 self.syear -= 1 day_start = find_day_by_offset(self.syear, month_start_id, self.smday) start_time = get_start_of_day(self.syear, month_start_id, day_start) if end_time < now_epoch: month_end_id += 1 month_start_id += 1 if month_end_id > 12: month_end_id = 1 self.eyear += 1 if month_start_id > 12: month_start_id = 1 self.syear += 1 # For the start day_start = find_day_by_offset(self.syear, month_start_id, self.smday) start_time = get_start_of_day(self.syear, month_start_id, day_start) # For the end day_end = find_day_by_offset(self.eyear, month_end_id, self.emday) end_time = get_end_of_day(self.eyear, month_end_id, day_end) return (start_time, end_time)
[ "def", "get_start_and_end_time", "(", "self", ",", "ref", "=", "None", ")", ":", "now", "=", "time", ".", "localtime", "(", "ref", ")", "if", "self", ".", "syear", "==", "0", ":", "self", ".", "syear", "=", "now", ".", "tm_year", "month_start_id", "=", "now", ".", "tm_mon", "day_start", "=", "find_day_by_offset", "(", "self", ".", "syear", ",", "month_start_id", ",", "self", ".", "smday", ")", "start_time", "=", "get_start_of_day", "(", "self", ".", "syear", ",", "month_start_id", ",", "day_start", ")", "if", "self", ".", "eyear", "==", "0", ":", "self", ".", "eyear", "=", "now", ".", "tm_year", "month_end_id", "=", "now", ".", "tm_mon", "day_end", "=", "find_day_by_offset", "(", "self", ".", "eyear", ",", "month_end_id", ",", "self", ".", "emday", ")", "end_time", "=", "get_end_of_day", "(", "self", ".", "eyear", ",", "month_end_id", ",", "day_end", ")", "now_epoch", "=", "time", ".", "mktime", "(", "now", ")", "if", "start_time", ">", "end_time", ":", "month_start_id", "-=", "1", "if", "month_start_id", "<", "1", ":", "month_start_id", "=", "12", "self", ".", "syear", "-=", "1", "day_start", "=", "find_day_by_offset", "(", "self", ".", "syear", ",", "month_start_id", ",", "self", ".", "smday", ")", "start_time", "=", "get_start_of_day", "(", "self", ".", "syear", ",", "month_start_id", ",", "day_start", ")", "if", "end_time", "<", "now_epoch", ":", "month_end_id", "+=", "1", "month_start_id", "+=", "1", "if", "month_end_id", ">", "12", ":", "month_end_id", "=", "1", "self", ".", "eyear", "+=", "1", "if", "month_start_id", ">", "12", ":", "month_start_id", "=", "1", "self", ".", "syear", "+=", "1", "# For the start", "day_start", "=", "find_day_by_offset", "(", "self", ".", "syear", ",", "month_start_id", ",", "self", ".", "smday", ")", "start_time", "=", "get_start_of_day", "(", "self", ".", "syear", ",", "month_start_id", ",", "day_start", ")", "# For the end", "day_end", "=", "find_day_by_offset", "(", "self", ".", "eyear", ",", "month_end_id", ",", "self", ".", "emday", ")", "end_time", "=", "get_end_of_day", "(", "self", ".", "eyear", ",", "month_end_id", ",", "day_end", ")", "return", "(", "start_time", ",", "end_time", ")" ]
Specific function to get start time and end time for MonthDayDaterange :param ref: time in seconds :type ref: int :return: tuple with start and end time :rtype: tuple (int, int)
[ "Specific", "function", "to", "get", "start", "time", "and", "end", "time", "for", "MonthDayDaterange" ]
python
train
36.72
juju/charm-helpers
charmhelpers/contrib/storage/linux/ceph.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/storage/linux/ceph.py#L1351-L1365
def get_previous_request(rid): """Return the last ceph broker request sent on a given relation @param rid: Relation id to query for request """ request = None broker_req = relation_get(attribute='broker_req', rid=rid, unit=local_unit()) if broker_req: request_data = json.loads(broker_req) request = CephBrokerRq(api_version=request_data['api-version'], request_id=request_data['request-id']) request.set_ops(request_data['ops']) return request
[ "def", "get_previous_request", "(", "rid", ")", ":", "request", "=", "None", "broker_req", "=", "relation_get", "(", "attribute", "=", "'broker_req'", ",", "rid", "=", "rid", ",", "unit", "=", "local_unit", "(", ")", ")", "if", "broker_req", ":", "request_data", "=", "json", ".", "loads", "(", "broker_req", ")", "request", "=", "CephBrokerRq", "(", "api_version", "=", "request_data", "[", "'api-version'", "]", ",", "request_id", "=", "request_data", "[", "'request-id'", "]", ")", "request", ".", "set_ops", "(", "request_data", "[", "'ops'", "]", ")", "return", "request" ]
Return the last ceph broker request sent on a given relation @param rid: Relation id to query for request
[ "Return", "the", "last", "ceph", "broker", "request", "sent", "on", "a", "given", "relation" ]
python
train
36.333333
Esri/ArcREST
src/arcrest/ags/server.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/server.py#L57-L74
def _validateurl(self, url): """assembles the server url""" parsed = urlparse(url) path = parsed.path.strip("/") if path: parts = path.split("/") url_types = ("admin", "manager", "rest") if any(i in parts for i in url_types): while parts.pop() not in url_types: next elif "services" in parts: while parts.pop() not in "services": next path = "/".join(parts) else: path = "arcgis" self._adminUrl = "%s://%s/%s/admin" % (parsed.scheme, parsed.netloc, path) return "%s://%s/%s/rest/services" % (parsed.scheme, parsed.netloc, path)
[ "def", "_validateurl", "(", "self", ",", "url", ")", ":", "parsed", "=", "urlparse", "(", "url", ")", "path", "=", "parsed", ".", "path", ".", "strip", "(", "\"/\"", ")", "if", "path", ":", "parts", "=", "path", ".", "split", "(", "\"/\"", ")", "url_types", "=", "(", "\"admin\"", ",", "\"manager\"", ",", "\"rest\"", ")", "if", "any", "(", "i", "in", "parts", "for", "i", "in", "url_types", ")", ":", "while", "parts", ".", "pop", "(", ")", "not", "in", "url_types", ":", "next", "elif", "\"services\"", "in", "parts", ":", "while", "parts", ".", "pop", "(", ")", "not", "in", "\"services\"", ":", "next", "path", "=", "\"/\"", ".", "join", "(", "parts", ")", "else", ":", "path", "=", "\"arcgis\"", "self", ".", "_adminUrl", "=", "\"%s://%s/%s/admin\"", "%", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ",", "path", ")", "return", "\"%s://%s/%s/rest/services\"", "%", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ",", "path", ")" ]
assembles the server url
[ "assembles", "the", "server", "url" ]
python
train
39.444444
ThreatConnect-Inc/tcex
tcex/tcex_resources.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_resources.py#L1621-L1629
def indicator(self, data): """Update the request URI to include the Indicator for specific indicator retrieval. Args: data (string): The indicator value """ # handle hashes in form md5 : sha1 : sha256 data = self.get_first_hash(data) super(File, self).indicator(data)
[ "def", "indicator", "(", "self", ",", "data", ")", ":", "# handle hashes in form md5 : sha1 : sha256", "data", "=", "self", ".", "get_first_hash", "(", "data", ")", "super", "(", "File", ",", "self", ")", ".", "indicator", "(", "data", ")" ]
Update the request URI to include the Indicator for specific indicator retrieval. Args: data (string): The indicator value
[ "Update", "the", "request", "URI", "to", "include", "the", "Indicator", "for", "specific", "indicator", "retrieval", "." ]
python
train
35.555556
fhcrc/taxtastic
taxtastic/taxonomy.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxonomy.py#L457-L481
def verify_rank_integrity(self, tax_id, rank, parent_id, children): """Confirm that for each node the parent ranks and children ranks are coherent """ def _lower(n1, n2): return self.ranks.index(n1) < self.ranks.index(n2) if rank not in self.ranks: raise TaxonIntegrityError('rank "{}" is undefined'.format(rank)) parent_rank = self.rank(parent_id) # undefined ranks can be placed anywhere in a lineage if not _lower(rank, parent_rank) and rank != self.NO_RANK: msg = ('New node "{}", rank "{}" has same or ' 'higher rank than parent node "{}", rank "{}"') msg = msg.format(tax_id, rank, parent_id, parent_rank) raise TaxonIntegrityError(msg) for child in children: if not _lower(self.rank(child), rank): msg = 'Child node {} has same or lower rank as new node {}' msg = msg.format(tax_id, child) raise TaxonIntegrityError(msg) return True
[ "def", "verify_rank_integrity", "(", "self", ",", "tax_id", ",", "rank", ",", "parent_id", ",", "children", ")", ":", "def", "_lower", "(", "n1", ",", "n2", ")", ":", "return", "self", ".", "ranks", ".", "index", "(", "n1", ")", "<", "self", ".", "ranks", ".", "index", "(", "n2", ")", "if", "rank", "not", "in", "self", ".", "ranks", ":", "raise", "TaxonIntegrityError", "(", "'rank \"{}\" is undefined'", ".", "format", "(", "rank", ")", ")", "parent_rank", "=", "self", ".", "rank", "(", "parent_id", ")", "# undefined ranks can be placed anywhere in a lineage", "if", "not", "_lower", "(", "rank", ",", "parent_rank", ")", "and", "rank", "!=", "self", ".", "NO_RANK", ":", "msg", "=", "(", "'New node \"{}\", rank \"{}\" has same or '", "'higher rank than parent node \"{}\", rank \"{}\"'", ")", "msg", "=", "msg", ".", "format", "(", "tax_id", ",", "rank", ",", "parent_id", ",", "parent_rank", ")", "raise", "TaxonIntegrityError", "(", "msg", ")", "for", "child", "in", "children", ":", "if", "not", "_lower", "(", "self", ".", "rank", "(", "child", ")", ",", "rank", ")", ":", "msg", "=", "'Child node {} has same or lower rank as new node {}'", "msg", "=", "msg", ".", "format", "(", "tax_id", ",", "child", ")", "raise", "TaxonIntegrityError", "(", "msg", ")", "return", "True" ]
Confirm that for each node the parent ranks and children ranks are coherent
[ "Confirm", "that", "for", "each", "node", "the", "parent", "ranks", "and", "children", "ranks", "are", "coherent" ]
python
train
41.52
bachiraoun/pysimplelog
SimpleLog.py
https://github.com/bachiraoun/pysimplelog/blob/2681ed5b1b8d7e66c3fff3ec3cca2b14ac571238/SimpleLog.py#L1210-L1212
def warn(self, message, *args, **kwargs): """alias to message at warning level""" self.log("warn", message, *args, **kwargs)
[ "def", "warn", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "log", "(", "\"warn\"", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
alias to message at warning level
[ "alias", "to", "message", "at", "warning", "level" ]
python
train
46
opendatateam/udata
udata/utils.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/utils.py#L145-L154
def to_iso(dt): ''' Format a date or datetime into an ISO-8601 string Support dates before 1900. ''' if isinstance(dt, datetime): return to_iso_datetime(dt) elif isinstance(dt, date): return to_iso_date(dt)
[ "def", "to_iso", "(", "dt", ")", ":", "if", "isinstance", "(", "dt", ",", "datetime", ")", ":", "return", "to_iso_datetime", "(", "dt", ")", "elif", "isinstance", "(", "dt", ",", "date", ")", ":", "return", "to_iso_date", "(", "dt", ")" ]
Format a date or datetime into an ISO-8601 string Support dates before 1900.
[ "Format", "a", "date", "or", "datetime", "into", "an", "ISO", "-", "8601", "string" ]
python
train
23.8
EntilZha/PyFunctional
functional/transformations.py
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/transformations.py#L526-L535
def count_by_key_impl(sequence): """ Implementation for count_by_key_t :param sequence: sequence of (key, value) pairs :return: counts by key """ counter = collections.Counter() for key, _ in sequence: counter[key] += 1 return six.viewitems(counter)
[ "def", "count_by_key_impl", "(", "sequence", ")", ":", "counter", "=", "collections", ".", "Counter", "(", ")", "for", "key", ",", "_", "in", "sequence", ":", "counter", "[", "key", "]", "+=", "1", "return", "six", ".", "viewitems", "(", "counter", ")" ]
Implementation for count_by_key_t :param sequence: sequence of (key, value) pairs :return: counts by key
[ "Implementation", "for", "count_by_key_t", ":", "param", "sequence", ":", "sequence", "of", "(", "key", "value", ")", "pairs", ":", "return", ":", "counts", "by", "key" ]
python
train
28
chaoss/grimoirelab-sortinghat
sortinghat/cmd/autogender.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/autogender.py#L91-L146
def autogender(self, api_token=None, genderize_all=False): """Autocomplete gender information of unique identities. Autocomplete unique identities gender using genderize.io API. Only those unique identities without an assigned gender will be updated unless `genderize_all` option is given. """ name_cache = {} no_gender = not genderize_all pattern = re.compile(r"(^\w+)\s\w+") profiles = api.search_profiles(self.db, no_gender=no_gender) for profile in profiles: if not profile.name: continue name = profile.name.strip() m = pattern.match(name) if not m: continue firstname = m.group(1).lower() if firstname in name_cache: gender_data = name_cache[firstname] else: try: gender, acc = genderize(firstname, api_token) except (requests.exceptions.RequestException, requests.exceptions.RetryError) as e: msg = "Skipping '%s' name (%s) due to a connection error. Error: %s" msg = msg % (firstname, profile.uuid, str(e)) self.warning(msg) continue gender_data = { 'gender': gender, 'gender_acc': acc } name_cache[firstname] = gender_data if not gender_data['gender']: continue try: api.edit_profile(self.db, profile.uuid, **gender_data) self.display('autogender.tmpl', uuid=profile.uuid, name=profile.name, gender_data=gender_data) except (NotFoundError, InvalidValueError) as e: self.error(str(e)) return e.code return CMD_SUCCESS
[ "def", "autogender", "(", "self", ",", "api_token", "=", "None", ",", "genderize_all", "=", "False", ")", ":", "name_cache", "=", "{", "}", "no_gender", "=", "not", "genderize_all", "pattern", "=", "re", ".", "compile", "(", "r\"(^\\w+)\\s\\w+\"", ")", "profiles", "=", "api", ".", "search_profiles", "(", "self", ".", "db", ",", "no_gender", "=", "no_gender", ")", "for", "profile", "in", "profiles", ":", "if", "not", "profile", ".", "name", ":", "continue", "name", "=", "profile", ".", "name", ".", "strip", "(", ")", "m", "=", "pattern", ".", "match", "(", "name", ")", "if", "not", "m", ":", "continue", "firstname", "=", "m", ".", "group", "(", "1", ")", ".", "lower", "(", ")", "if", "firstname", "in", "name_cache", ":", "gender_data", "=", "name_cache", "[", "firstname", "]", "else", ":", "try", ":", "gender", ",", "acc", "=", "genderize", "(", "firstname", ",", "api_token", ")", "except", "(", "requests", ".", "exceptions", ".", "RequestException", ",", "requests", ".", "exceptions", ".", "RetryError", ")", "as", "e", ":", "msg", "=", "\"Skipping '%s' name (%s) due to a connection error. Error: %s\"", "msg", "=", "msg", "%", "(", "firstname", ",", "profile", ".", "uuid", ",", "str", "(", "e", ")", ")", "self", ".", "warning", "(", "msg", ")", "continue", "gender_data", "=", "{", "'gender'", ":", "gender", ",", "'gender_acc'", ":", "acc", "}", "name_cache", "[", "firstname", "]", "=", "gender_data", "if", "not", "gender_data", "[", "'gender'", "]", ":", "continue", "try", ":", "api", ".", "edit_profile", "(", "self", ".", "db", ",", "profile", ".", "uuid", ",", "*", "*", "gender_data", ")", "self", ".", "display", "(", "'autogender.tmpl'", ",", "uuid", "=", "profile", ".", "uuid", ",", "name", "=", "profile", ".", "name", ",", "gender_data", "=", "gender_data", ")", "except", "(", "NotFoundError", ",", "InvalidValueError", ")", "as", "e", ":", "self", ".", "error", "(", "str", "(", "e", ")", ")", "return", "e", ".", "code", "return", "CMD_SUCCESS" ]
Autocomplete gender information of unique identities. Autocomplete unique identities gender using genderize.io API. Only those unique identities without an assigned gender will be updated unless `genderize_all` option is given.
[ "Autocomplete", "gender", "information", "of", "unique", "identities", "." ]
python
train
34.267857
onicagroup/runway
runway/commands/modules_command.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/commands/modules_command.py#L382-L394
def reverse_deployments(deployments=None): """Reverse deployments and the modules/regions in them.""" if deployments is None: deployments = [] reversed_deployments = [] for i in deployments[::-1]: deployment = copy.deepcopy(i) for config in ['modules', 'regions']: if deployment.get(config): deployment[config] = deployment[config][::-1] reversed_deployments.append(deployment) return reversed_deployments
[ "def", "reverse_deployments", "(", "deployments", "=", "None", ")", ":", "if", "deployments", "is", "None", ":", "deployments", "=", "[", "]", "reversed_deployments", "=", "[", "]", "for", "i", "in", "deployments", "[", ":", ":", "-", "1", "]", ":", "deployment", "=", "copy", ".", "deepcopy", "(", "i", ")", "for", "config", "in", "[", "'modules'", ",", "'regions'", "]", ":", "if", "deployment", ".", "get", "(", "config", ")", ":", "deployment", "[", "config", "]", "=", "deployment", "[", "config", "]", "[", ":", ":", "-", "1", "]", "reversed_deployments", ".", "append", "(", "deployment", ")", "return", "reversed_deployments" ]
Reverse deployments and the modules/regions in them.
[ "Reverse", "deployments", "and", "the", "modules", "/", "regions", "in", "them", "." ]
python
train
39.846154
RediSearch/redisearch-py
redisearch/aggregation.py
https://github.com/RediSearch/redisearch-py/blob/f65d1dd078713cbe9b83584e86655a254d0531ab/redisearch/aggregation.py#L169-L182
def apply(self, **kwexpr): """ Specify one or more projection expressions to add to each result ### Parameters - **kwexpr**: One or more key-value pairs for a projection. The key is the alias for the projection, and the value is the projection expression itself, for example `apply(square_root="sqrt(@foo)")` """ for alias, expr in kwexpr.items(): self._projections.append([alias, expr]) return self
[ "def", "apply", "(", "self", ",", "*", "*", "kwexpr", ")", ":", "for", "alias", ",", "expr", "in", "kwexpr", ".", "items", "(", ")", ":", "self", ".", "_projections", ".", "append", "(", "[", "alias", ",", "expr", "]", ")", "return", "self" ]
Specify one or more projection expressions to add to each result ### Parameters - **kwexpr**: One or more key-value pairs for a projection. The key is the alias for the projection, and the value is the projection expression itself, for example `apply(square_root="sqrt(@foo)")`
[ "Specify", "one", "or", "more", "projection", "expressions", "to", "add", "to", "each", "result" ]
python
valid
34.357143
rsgalloway/grit
grit/repo/version.py
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/repo/version.py#L193-L203
def save(self, msg=None): """ Modify item data and commit to repo. Git objects are immutable, to save means adding a new item :param msg: Commit message. """ if msg is None: msg = 'Saving %s' % self.name log.debug(msg) self.repo.addItem(self, msg)
[ "def", "save", "(", "self", ",", "msg", "=", "None", ")", ":", "if", "msg", "is", "None", ":", "msg", "=", "'Saving %s'", "%", "self", ".", "name", "log", ".", "debug", "(", "msg", ")", "self", ".", "repo", ".", "addItem", "(", "self", ",", "msg", ")" ]
Modify item data and commit to repo. Git objects are immutable, to save means adding a new item :param msg: Commit message.
[ "Modify", "item", "data", "and", "commit", "to", "repo", ".", "Git", "objects", "are", "immutable", "to", "save", "means", "adding", "a", "new", "item" ]
python
train
28.636364
PhilippeFerreiraDeSousa/bitext-matching
lib/enpc_aligner/IBM2_func.py
https://github.com/PhilippeFerreiraDeSousa/bitext-matching/blob/195c3e98775cfa5e63e4bb0bb1da6f741880d980/lib/enpc_aligner/IBM2_func.py#L104-L131
def matrix( m, n, lst, m_text: list=None, n_text: list=None): """ m: row n: column lst: items >>> print(_matrix(2, 3, [(1, 1), (2, 3)])) |x| | | | | |x| """ fmt = "" if n_text: fmt += " {}\n".format(" ".join(n_text)) for i in range(1, m+1): if m_text: fmt += "{:<4.4} ".format(m_text[i-1]) fmt += "|" for j in range(1, n+1): if (i, j) in lst: fmt += "x|" else: fmt += " |" fmt += "\n" return fmt
[ "def", "matrix", "(", "m", ",", "n", ",", "lst", ",", "m_text", ":", "list", "=", "None", ",", "n_text", ":", "list", "=", "None", ")", ":", "fmt", "=", "\"\"", "if", "n_text", ":", "fmt", "+=", "\" {}\\n\"", ".", "format", "(", "\" \"", ".", "join", "(", "n_text", ")", ")", "for", "i", "in", "range", "(", "1", ",", "m", "+", "1", ")", ":", "if", "m_text", ":", "fmt", "+=", "\"{:<4.4} \"", ".", "format", "(", "m_text", "[", "i", "-", "1", "]", ")", "fmt", "+=", "\"|\"", "for", "j", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "if", "(", "i", ",", "j", ")", "in", "lst", ":", "fmt", "+=", "\"x|\"", "else", ":", "fmt", "+=", "\" |\"", "fmt", "+=", "\"\\n\"", "return", "fmt" ]
m: row n: column lst: items >>> print(_matrix(2, 3, [(1, 1), (2, 3)])) |x| | | | | |x|
[ "m", ":", "row", "n", ":", "column", "lst", ":", "items", ">>>", "print", "(", "_matrix", "(", "2", "3", "[", "(", "1", "1", ")", "(", "2", "3", ")", "]", "))", "|x|", "|", "|", "|", "|", "|x|" ]
python
train
20.75
Fantomas42/django-blog-zinnia
zinnia/views/mixins/callable_queryset.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/views/mixins/callable_queryset.py#L13-L20
def get_queryset(self): """ Check that the queryset is defined and call it. """ if self.queryset is None: raise ImproperlyConfigured( "'%s' must define 'queryset'" % self.__class__.__name__) return self.queryset()
[ "def", "get_queryset", "(", "self", ")", ":", "if", "self", ".", "queryset", "is", "None", ":", "raise", "ImproperlyConfigured", "(", "\"'%s' must define 'queryset'\"", "%", "self", ".", "__class__", ".", "__name__", ")", "return", "self", ".", "queryset", "(", ")" ]
Check that the queryset is defined and call it.
[ "Check", "that", "the", "queryset", "is", "defined", "and", "call", "it", "." ]
python
train
34.25
obulpathi/cdn-fastly-python
fastly/__init__.py
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L436-L439
def check_domain(self, service_id, version_number, name): """Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly.""" content = self._fetch("/service/%s/version/%d/domain/%s/check" % (service_id, version_number, name)) return FastlyDomainCheck(self, content)
[ "def", "check_domain", "(", "self", ",", "service_id", ",", "version_number", ",", "name", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/domain/%s/check\"", "%", "(", "service_id", ",", "version_number", ",", "name", ")", ")", "return", "FastlyDomainCheck", "(", "self", ",", "content", ")" ]
Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly.
[ "Checks", "the", "status", "of", "a", "domain", "s", "DNS", "record", ".", "Returns", "an", "array", "of", "3", "items", ".", "The", "first", "is", "the", "details", "for", "the", "domain", ".", "The", "second", "is", "the", "current", "CNAME", "of", "the", "domain", ".", "The", "third", "is", "a", "boolean", "indicating", "whether", "or", "not", "it", "has", "been", "properly", "setup", "to", "use", "Fastly", "." ]
python
train
114.75
MrYsLab/pymata-aio
pymata_aio/pymata_iot.py
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata_iot.py#L303-L318
async def get_pinstate_report(self, command): """ This method retrieves a Firmata pin_state report for a pin.. See: http://firmata.org/wiki/Protocol#Pin_State_Query :param command: {"method": "get_pin_state", "params": [PIN]} :returns: {"method": "get_pin_state_reply", "params": [PIN_NUMBER, PIN_MODE, PIN_STATE]} """ pin = int(command[0]) value = await self.core.get_pin_state(pin) if value: reply = json.dumps({"method": "pin_state_reply", "params": value}) else: reply = json.dumps({"method": "pin_state_reply", "params": "Unknown"}) await self.websocket.send(reply)
[ "async", "def", "get_pinstate_report", "(", "self", ",", "command", ")", ":", "pin", "=", "int", "(", "command", "[", "0", "]", ")", "value", "=", "await", "self", ".", "core", ".", "get_pin_state", "(", "pin", ")", "if", "value", ":", "reply", "=", "json", ".", "dumps", "(", "{", "\"method\"", ":", "\"pin_state_reply\"", ",", "\"params\"", ":", "value", "}", ")", "else", ":", "reply", "=", "json", ".", "dumps", "(", "{", "\"method\"", ":", "\"pin_state_reply\"", ",", "\"params\"", ":", "\"Unknown\"", "}", ")", "await", "self", ".", "websocket", ".", "send", "(", "reply", ")" ]
This method retrieves a Firmata pin_state report for a pin.. See: http://firmata.org/wiki/Protocol#Pin_State_Query :param command: {"method": "get_pin_state", "params": [PIN]} :returns: {"method": "get_pin_state_reply", "params": [PIN_NUMBER, PIN_MODE, PIN_STATE]}
[ "This", "method", "retrieves", "a", "Firmata", "pin_state", "report", "for", "a", "pin", ".." ]
python
train
41.8125
DLR-RM/RAFCON
source/rafcon/gui/helpers/label.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/helpers/label.py#L208-L217
def get_notebook_tab_title(notebook, page_num): """Helper function that gets a notebook's tab title given its page number :param notebook: The GTK notebook :param page_num: The page number of the tab, for which the title is required :return: The title of the tab """ child = notebook.get_nth_page(page_num) tab_label_eventbox = notebook.get_tab_label(child) return get_widget_title(tab_label_eventbox.get_tooltip_text())
[ "def", "get_notebook_tab_title", "(", "notebook", ",", "page_num", ")", ":", "child", "=", "notebook", ".", "get_nth_page", "(", "page_num", ")", "tab_label_eventbox", "=", "notebook", ".", "get_tab_label", "(", "child", ")", "return", "get_widget_title", "(", "tab_label_eventbox", ".", "get_tooltip_text", "(", ")", ")" ]
Helper function that gets a notebook's tab title given its page number :param notebook: The GTK notebook :param page_num: The page number of the tab, for which the title is required :return: The title of the tab
[ "Helper", "function", "that", "gets", "a", "notebook", "s", "tab", "title", "given", "its", "page", "number" ]
python
train
44.4
alex-kostirin/pyatomac
atomac/ldtpd/page_tab_list.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/page_tab_list.py#L73-L97
def selecttabindex(self, window_name, object_name, tab_index): """ Select tab based on index. @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param tab_index: tab to select @type data: integer @return: 1 on success. @rtype: integer """ children = self._get_tab_children(window_name, object_name) length = len(children) if tab_index < 0 or tab_index > length: raise LdtpServerException(u"Invalid tab index %s" % tab_index) tab_handle = children[tab_index] if not tab_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) tab_handle.Press() return 1
[ "def", "selecttabindex", "(", "self", ",", "window_name", ",", "object_name", ",", "tab_index", ")", ":", "children", "=", "self", ".", "_get_tab_children", "(", "window_name", ",", "object_name", ")", "length", "=", "len", "(", "children", ")", "if", "tab_index", "<", "0", "or", "tab_index", ">", "length", ":", "raise", "LdtpServerException", "(", "u\"Invalid tab index %s\"", "%", "tab_index", ")", "tab_handle", "=", "children", "[", "tab_index", "]", "if", "not", "tab_handle", ".", "AXEnabled", ":", "raise", "LdtpServerException", "(", "u\"Object %s state disabled\"", "%", "object_name", ")", "tab_handle", ".", "Press", "(", ")", "return", "1" ]
Select tab based on index. @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param tab_index: tab to select @type data: integer @return: 1 on success. @rtype: integer
[ "Select", "tab", "based", "on", "index", ".", "@param", "window_name", ":", "Window", "name", "to", "type", "in", "either", "full", "name", "LDTP", "s", "name", "convention", "or", "a", "Unix", "glob", ".", "@type", "window_name", ":", "string", "@param", "object_name", ":", "Object", "name", "to", "type", "in", "either", "full", "name", "LDTP", "s", "name", "convention", "or", "a", "Unix", "glob", ".", "@type", "object_name", ":", "string", "@param", "tab_index", ":", "tab", "to", "select", "@type", "data", ":", "integer" ]
python
valid
38.4
hasgeek/coaster
coaster/sqlalchemy/mixins.py
https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/sqlalchemy/mixins.py#L396-L414
def make_name(self, reserved=[]): """ Autogenerates a :attr:`name` from the :attr:`title`. If the auto-generated name is already in use in this model, :meth:`make_name` tries again by suffixing numbers starting with 2 until an available name is found. :param reserved: List or set of reserved names unavailable for use """ if self.title: if inspect(self).has_identity: def checkused(c): return bool(c in reserved or c in self.reserved_names or self.__class__.query.filter(self.__class__.id != self.id).filter_by(name=c).notempty()) else: def checkused(c): return bool(c in reserved or c in self.reserved_names or self.__class__.query.filter_by(name=c).notempty()) with self.__class__.query.session.no_autoflush: self.name = six.text_type(make_name(self.title_for_name, maxlength=self.__name_length__, checkused=checkused))
[ "def", "make_name", "(", "self", ",", "reserved", "=", "[", "]", ")", ":", "if", "self", ".", "title", ":", "if", "inspect", "(", "self", ")", ".", "has_identity", ":", "def", "checkused", "(", "c", ")", ":", "return", "bool", "(", "c", "in", "reserved", "or", "c", "in", "self", ".", "reserved_names", "or", "self", ".", "__class__", ".", "query", ".", "filter", "(", "self", ".", "__class__", ".", "id", "!=", "self", ".", "id", ")", ".", "filter_by", "(", "name", "=", "c", ")", ".", "notempty", "(", ")", ")", "else", ":", "def", "checkused", "(", "c", ")", ":", "return", "bool", "(", "c", "in", "reserved", "or", "c", "in", "self", ".", "reserved_names", "or", "self", ".", "__class__", ".", "query", ".", "filter_by", "(", "name", "=", "c", ")", ".", "notempty", "(", ")", ")", "with", "self", ".", "__class__", ".", "query", ".", "session", ".", "no_autoflush", ":", "self", ".", "name", "=", "six", ".", "text_type", "(", "make_name", "(", "self", ".", "title_for_name", ",", "maxlength", "=", "self", ".", "__name_length__", ",", "checkused", "=", "checkused", ")", ")" ]
Autogenerates a :attr:`name` from the :attr:`title`. If the auto-generated name is already in use in this model, :meth:`make_name` tries again by suffixing numbers starting with 2 until an available name is found. :param reserved: List or set of reserved names unavailable for use
[ "Autogenerates", "a", ":", "attr", ":", "name", "from", "the", ":", "attr", ":", "title", ".", "If", "the", "auto", "-", "generated", "name", "is", "already", "in", "use", "in", "this", "model", ":", "meth", ":", "make_name", "tries", "again", "by", "suffixing", "numbers", "starting", "with", "2", "until", "an", "available", "name", "is", "found", "." ]
python
train
54.368421
Guake/guake
guake/guake_app.py
https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/guake_app.py#L849-L855
def accel_move_tab_left(self, *args): # TODO KEYBINDINGS ONLY """ Callback to move a tab to the left """ pos = self.get_notebook().get_current_page() if pos != 0: self.move_tab(pos, pos - 1) return True
[ "def", "accel_move_tab_left", "(", "self", ",", "*", "args", ")", ":", "# TODO KEYBINDINGS ONLY", "pos", "=", "self", ".", "get_notebook", "(", ")", ".", "get_current_page", "(", ")", "if", "pos", "!=", "0", ":", "self", ".", "move_tab", "(", "pos", ",", "pos", "-", "1", ")", "return", "True" ]
Callback to move a tab to the left
[ "Callback", "to", "move", "a", "tab", "to", "the", "left" ]
python
train
35.428571
Dallinger/Dallinger
dallinger/models.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/models.py#L232-L247
def infos(self, type=None, failed=False): """Get all infos created by the participants nodes. Return a list of infos produced by nodes associated with the participant. If specified, ``type`` filters by class. By default, failed infos are excluded, to include only failed nodes use ``failed=True``, for all nodes use ``failed=all``. Note that failed filters the infos, not the nodes - infos from all nodes (whether failed or not) can be returned. """ nodes = self.nodes(failed="all") infos = [] for n in nodes: infos.extend(n.infos(type=type, failed=failed)) return infos
[ "def", "infos", "(", "self", ",", "type", "=", "None", ",", "failed", "=", "False", ")", ":", "nodes", "=", "self", ".", "nodes", "(", "failed", "=", "\"all\"", ")", "infos", "=", "[", "]", "for", "n", "in", "nodes", ":", "infos", ".", "extend", "(", "n", ".", "infos", "(", "type", "=", "type", ",", "failed", "=", "failed", ")", ")", "return", "infos" ]
Get all infos created by the participants nodes. Return a list of infos produced by nodes associated with the participant. If specified, ``type`` filters by class. By default, failed infos are excluded, to include only failed nodes use ``failed=True``, for all nodes use ``failed=all``. Note that failed filters the infos, not the nodes - infos from all nodes (whether failed or not) can be returned.
[ "Get", "all", "infos", "created", "by", "the", "participants", "nodes", "." ]
python
train
41.5625
jantman/awslimitchecker
awslimitchecker/services/cloudtrail.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/cloudtrail.py#L71-L106
def _find_usage_cloudtrail(self): """Calculate current usage for CloudTrail related metrics""" trail_list = self.conn.describe_trails()['trailList'] trail_count = len(trail_list) if trail_list else 0 for trail in trail_list: data_resource_count = 0 if self.conn._client_config.region_name == trail['HomeRegion']: response = self.conn.get_event_selectors( TrailName=trail['Name'] ) event_selectors = response['EventSelectors'] for event_selector in event_selectors: data_resource_count += len( event_selector.get('DataResources', []) ) self.limits['Event Selectors Per Trail']._add_current_usage( len(event_selectors), aws_type='AWS::CloudTrail::EventSelector', resource_id=trail['Name'] ) self.limits['Data Resources Per Trail']._add_current_usage( data_resource_count, aws_type='AWS::CloudTrail::DataResource', resource_id=trail['Name'] ) else: logger.debug( 'Ignoring event selectors and data resources for ' 'CloudTrail %s in non-home region' % trail['Name'] ) self.limits['Trails Per Region']._add_current_usage( trail_count, aws_type=self.aws_type )
[ "def", "_find_usage_cloudtrail", "(", "self", ")", ":", "trail_list", "=", "self", ".", "conn", ".", "describe_trails", "(", ")", "[", "'trailList'", "]", "trail_count", "=", "len", "(", "trail_list", ")", "if", "trail_list", "else", "0", "for", "trail", "in", "trail_list", ":", "data_resource_count", "=", "0", "if", "self", ".", "conn", ".", "_client_config", ".", "region_name", "==", "trail", "[", "'HomeRegion'", "]", ":", "response", "=", "self", ".", "conn", ".", "get_event_selectors", "(", "TrailName", "=", "trail", "[", "'Name'", "]", ")", "event_selectors", "=", "response", "[", "'EventSelectors'", "]", "for", "event_selector", "in", "event_selectors", ":", "data_resource_count", "+=", "len", "(", "event_selector", ".", "get", "(", "'DataResources'", ",", "[", "]", ")", ")", "self", ".", "limits", "[", "'Event Selectors Per Trail'", "]", ".", "_add_current_usage", "(", "len", "(", "event_selectors", ")", ",", "aws_type", "=", "'AWS::CloudTrail::EventSelector'", ",", "resource_id", "=", "trail", "[", "'Name'", "]", ")", "self", ".", "limits", "[", "'Data Resources Per Trail'", "]", ".", "_add_current_usage", "(", "data_resource_count", ",", "aws_type", "=", "'AWS::CloudTrail::DataResource'", ",", "resource_id", "=", "trail", "[", "'Name'", "]", ")", "else", ":", "logger", ".", "debug", "(", "'Ignoring event selectors and data resources for '", "'CloudTrail %s in non-home region'", "%", "trail", "[", "'Name'", "]", ")", "self", ".", "limits", "[", "'Trails Per Region'", "]", ".", "_add_current_usage", "(", "trail_count", ",", "aws_type", "=", "self", ".", "aws_type", ")" ]
Calculate current usage for CloudTrail related metrics
[ "Calculate", "current", "usage", "for", "CloudTrail", "related", "metrics" ]
python
train
42.583333
estnltk/estnltk
estnltk/vabamorf/morf.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/vabamorf/morf.py#L302-L308
def postprocess_result(morphresult, trim_phonetic, trim_compound): """Postprocess vabamorf wrapper output.""" word, analysis = morphresult return { 'text': deconvert(word), 'analysis': [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis] }
[ "def", "postprocess_result", "(", "morphresult", ",", "trim_phonetic", ",", "trim_compound", ")", ":", "word", ",", "analysis", "=", "morphresult", "return", "{", "'text'", ":", "deconvert", "(", "word", ")", ",", "'analysis'", ":", "[", "postprocess_analysis", "(", "a", ",", "trim_phonetic", ",", "trim_compound", ")", "for", "a", "in", "analysis", "]", "}" ]
Postprocess vabamorf wrapper output.
[ "Postprocess", "vabamorf", "wrapper", "output", "." ]
python
train
40.857143
quantopian/zipline
zipline/finance/position.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/position.py#L91-L129
def handle_split(self, asset, ratio): """ Update the position by the split ratio, and return the resulting fractional share that will be converted into cash. Returns the unused cash. """ if self.asset != asset: raise Exception("updating split with the wrong asset!") # adjust the # of shares by the ratio # (if we had 100 shares, and the ratio is 3, # we now have 33 shares) # (old_share_count / ratio = new_share_count) # (old_price * ratio = new_price) # e.g., 33.333 raw_share_count = self.amount / float(ratio) # e.g., 33 full_share_count = np.floor(raw_share_count) # e.g., 0.333 fractional_share_count = raw_share_count - full_share_count # adjust the cost basis to the nearest cent, e.g., 60.0 new_cost_basis = round(self.cost_basis * ratio, 2) self.cost_basis = new_cost_basis self.amount = full_share_count return_cash = round(float(fractional_share_count * new_cost_basis), 2) log.info("after split: " + str(self)) log.info("returning cash: " + str(return_cash)) # return the leftover cash, which will be converted into cash # (rounded to the nearest cent) return return_cash
[ "def", "handle_split", "(", "self", ",", "asset", ",", "ratio", ")", ":", "if", "self", ".", "asset", "!=", "asset", ":", "raise", "Exception", "(", "\"updating split with the wrong asset!\"", ")", "# adjust the # of shares by the ratio", "# (if we had 100 shares, and the ratio is 3,", "# we now have 33 shares)", "# (old_share_count / ratio = new_share_count)", "# (old_price * ratio = new_price)", "# e.g., 33.333", "raw_share_count", "=", "self", ".", "amount", "/", "float", "(", "ratio", ")", "# e.g., 33", "full_share_count", "=", "np", ".", "floor", "(", "raw_share_count", ")", "# e.g., 0.333", "fractional_share_count", "=", "raw_share_count", "-", "full_share_count", "# adjust the cost basis to the nearest cent, e.g., 60.0", "new_cost_basis", "=", "round", "(", "self", ".", "cost_basis", "*", "ratio", ",", "2", ")", "self", ".", "cost_basis", "=", "new_cost_basis", "self", ".", "amount", "=", "full_share_count", "return_cash", "=", "round", "(", "float", "(", "fractional_share_count", "*", "new_cost_basis", ")", ",", "2", ")", "log", ".", "info", "(", "\"after split: \"", "+", "str", "(", "self", ")", ")", "log", ".", "info", "(", "\"returning cash: \"", "+", "str", "(", "return_cash", ")", ")", "# return the leftover cash, which will be converted into cash", "# (rounded to the nearest cent)", "return", "return_cash" ]
Update the position by the split ratio, and return the resulting fractional share that will be converted into cash. Returns the unused cash.
[ "Update", "the", "position", "by", "the", "split", "ratio", "and", "return", "the", "resulting", "fractional", "share", "that", "will", "be", "converted", "into", "cash", "." ]
python
train
32.974359
chrisjsewell/jsonextended
jsonextended/edict.py
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L841-L871
def flatten2d(d, key_as_tuple=True, delim='.', list_of_dicts=None): """ get nested dict as {key:dict,...}, where key is tuple/string of all-1 nested keys NB: is same as flattennd(d,1,key_as_tuple,delim) Parameters ---------- d : dict key_as_tuple : bool whether keys are list of nested keys or delimited string of nested keys delim : str if key_as_tuple=False, delimiter for keys list_of_dicts: str or None if not None, flatten lists of dicts using this prefix Examples -------- >>> from pprint import pprint >>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}} >>> pprint(flatten2d(d)) {(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}} >>> pprint(flatten2d(d,key_as_tuple=False,delim=',')) {'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}} """ return flattennd(d, 1, key_as_tuple, delim, list_of_dicts=list_of_dicts)
[ "def", "flatten2d", "(", "d", ",", "key_as_tuple", "=", "True", ",", "delim", "=", "'.'", ",", "list_of_dicts", "=", "None", ")", ":", "return", "flattennd", "(", "d", ",", "1", ",", "key_as_tuple", ",", "delim", ",", "list_of_dicts", "=", "list_of_dicts", ")" ]
get nested dict as {key:dict,...}, where key is tuple/string of all-1 nested keys NB: is same as flattennd(d,1,key_as_tuple,delim) Parameters ---------- d : dict key_as_tuple : bool whether keys are list of nested keys or delimited string of nested keys delim : str if key_as_tuple=False, delimiter for keys list_of_dicts: str or None if not None, flatten lists of dicts using this prefix Examples -------- >>> from pprint import pprint >>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}} >>> pprint(flatten2d(d)) {(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}} >>> pprint(flatten2d(d,key_as_tuple=False,delim=',')) {'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}}
[ "get", "nested", "dict", "as", "{", "key", ":", "dict", "...", "}", "where", "key", "is", "tuple", "/", "string", "of", "all", "-", "1", "nested", "keys" ]
python
train
29.032258
Opentrons/opentrons
api/src/opentrons/deck_calibration/dc_main.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/deck_calibration/dc_main.py#L267-L286
def save_point(self) -> str: """ Indexes the measured data with the current point as a key and saves the current position once the 'Enter' key is pressed to the 'actual points' vector. """ if self._current_mount is left: msg = self.save_mount_offset() self._current_mount = right elif self._current_mount is types.Mount.LEFT: msg = self.save_mount_offset() self._current_mount = types.Mount.RIGHT else: pos = self._position()[:-1] self.actual_points[self._current_point] = pos log.debug("Saving {} for point {}".format( pos, self._current_point)) msg = 'saved #{}: {}'.format( self._current_point, self.actual_points[self._current_point]) return msg
[ "def", "save_point", "(", "self", ")", "->", "str", ":", "if", "self", ".", "_current_mount", "is", "left", ":", "msg", "=", "self", ".", "save_mount_offset", "(", ")", "self", ".", "_current_mount", "=", "right", "elif", "self", ".", "_current_mount", "is", "types", ".", "Mount", ".", "LEFT", ":", "msg", "=", "self", ".", "save_mount_offset", "(", ")", "self", ".", "_current_mount", "=", "types", ".", "Mount", ".", "RIGHT", "else", ":", "pos", "=", "self", ".", "_position", "(", ")", "[", ":", "-", "1", "]", "self", ".", "actual_points", "[", "self", ".", "_current_point", "]", "=", "pos", "log", ".", "debug", "(", "\"Saving {} for point {}\"", ".", "format", "(", "pos", ",", "self", ".", "_current_point", ")", ")", "msg", "=", "'saved #{}: {}'", ".", "format", "(", "self", ".", "_current_point", ",", "self", ".", "actual_points", "[", "self", ".", "_current_point", "]", ")", "return", "msg" ]
Indexes the measured data with the current point as a key and saves the current position once the 'Enter' key is pressed to the 'actual points' vector.
[ "Indexes", "the", "measured", "data", "with", "the", "current", "point", "as", "a", "key", "and", "saves", "the", "current", "position", "once", "the", "Enter", "key", "is", "pressed", "to", "the", "actual", "points", "vector", "." ]
python
train
41.5
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L4800-L4835
def create_storage_policy(policy_name, policy_dict, service_instance=None): ''' Creates a storage policy. Supported capability types: scalar, set, range. policy_name Name of the policy to create. The value of the argument will override any existing name in ``policy_dict``. policy_dict Dictionary containing the changes to apply to the policy. (example in salt.states.pbm) service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.create_storage_policy policy_name='policy name' policy_dict="$policy_dict" ''' log.trace('create storage policy \'%s\', dict = %s', policy_name, policy_dict) profile_manager = salt.utils.pbm.get_profile_manager(service_instance) policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec() # Hardcode the storage profile resource type policy_create_spec.resourceType = pbm.profile.ResourceType( resourceType=pbm.profile.ResourceTypeEnum.STORAGE) # Set name argument policy_dict['name'] = policy_name log.trace('Setting policy values in policy_update_spec') _apply_policy_config(policy_create_spec, policy_dict) salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec) return {'create_storage_policy': True}
[ "def", "create_storage_policy", "(", "policy_name", ",", "policy_dict", ",", "service_instance", "=", "None", ")", ":", "log", ".", "trace", "(", "'create storage policy \\'%s\\', dict = %s'", ",", "policy_name", ",", "policy_dict", ")", "profile_manager", "=", "salt", ".", "utils", ".", "pbm", ".", "get_profile_manager", "(", "service_instance", ")", "policy_create_spec", "=", "pbm", ".", "profile", ".", "CapabilityBasedProfileCreateSpec", "(", ")", "# Hardcode the storage profile resource type", "policy_create_spec", ".", "resourceType", "=", "pbm", ".", "profile", ".", "ResourceType", "(", "resourceType", "=", "pbm", ".", "profile", ".", "ResourceTypeEnum", ".", "STORAGE", ")", "# Set name argument", "policy_dict", "[", "'name'", "]", "=", "policy_name", "log", ".", "trace", "(", "'Setting policy values in policy_update_spec'", ")", "_apply_policy_config", "(", "policy_create_spec", ",", "policy_dict", ")", "salt", ".", "utils", ".", "pbm", ".", "create_storage_policy", "(", "profile_manager", ",", "policy_create_spec", ")", "return", "{", "'create_storage_policy'", ":", "True", "}" ]
Creates a storage policy. Supported capability types: scalar, set, range. policy_name Name of the policy to create. The value of the argument will override any existing name in ``policy_dict``. policy_dict Dictionary containing the changes to apply to the policy. (example in salt.states.pbm) service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.create_storage_policy policy_name='policy name' policy_dict="$policy_dict"
[ "Creates", "a", "storage", "policy", "." ]
python
train
37.805556
woolfson-group/isambard
isambard/external_programs/dssp.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/external_programs/dssp.py#L128-L177
def extract_solvent_accessibility_dssp(in_dssp, path=True): """Uses DSSP to extract solvent accessibilty information on every residue. Notes ----- For more information on the solvent accessibility metrics used in dssp, see: http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC In the dssp files value is labeled 'ACC'. Parameters ---------- in_dssp : str Path to DSSP file. path : bool Indicates if in_dssp is a path or a string. Returns ------- dssp_residues : list Each internal list contains: [0] int Residue number [1] str Chain identifier [2] str Residue type [3] int dssp solvent accessibilty """ if path: with open(in_dssp, 'r') as inf: dssp_out = inf.read() else: dssp_out = in_dssp[:] dssp_residues = [] go = False for line in dssp_out.splitlines(): if go: try: res_num = int(line[5:10].strip()) chain = line[10:12].strip() residue = line[13] acc = int(line[35:38].strip()) # It is IMPORTANT that acc remains the final value of the # returned list, due to its usage in # isambard.ampal.base_ampal.tag_dssp_solvent_accessibility dssp_residues.append([res_num, chain, residue, acc]) except ValueError: pass else: if line[2] == '#': go = True pass return dssp_residues
[ "def", "extract_solvent_accessibility_dssp", "(", "in_dssp", ",", "path", "=", "True", ")", ":", "if", "path", ":", "with", "open", "(", "in_dssp", ",", "'r'", ")", "as", "inf", ":", "dssp_out", "=", "inf", ".", "read", "(", ")", "else", ":", "dssp_out", "=", "in_dssp", "[", ":", "]", "dssp_residues", "=", "[", "]", "go", "=", "False", "for", "line", "in", "dssp_out", ".", "splitlines", "(", ")", ":", "if", "go", ":", "try", ":", "res_num", "=", "int", "(", "line", "[", "5", ":", "10", "]", ".", "strip", "(", ")", ")", "chain", "=", "line", "[", "10", ":", "12", "]", ".", "strip", "(", ")", "residue", "=", "line", "[", "13", "]", "acc", "=", "int", "(", "line", "[", "35", ":", "38", "]", ".", "strip", "(", ")", ")", "# It is IMPORTANT that acc remains the final value of the", "# returned list, due to its usage in", "# isambard.ampal.base_ampal.tag_dssp_solvent_accessibility", "dssp_residues", ".", "append", "(", "[", "res_num", ",", "chain", ",", "residue", ",", "acc", "]", ")", "except", "ValueError", ":", "pass", "else", ":", "if", "line", "[", "2", "]", "==", "'#'", ":", "go", "=", "True", "pass", "return", "dssp_residues" ]
Uses DSSP to extract solvent accessibilty information on every residue. Notes ----- For more information on the solvent accessibility metrics used in dssp, see: http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC In the dssp files value is labeled 'ACC'. Parameters ---------- in_dssp : str Path to DSSP file. path : bool Indicates if in_dssp is a path or a string. Returns ------- dssp_residues : list Each internal list contains: [0] int Residue number [1] str Chain identifier [2] str Residue type [3] int dssp solvent accessibilty
[ "Uses", "DSSP", "to", "extract", "solvent", "accessibilty", "information", "on", "every", "residue", "." ]
python
train
30.66
leandroarndt/djangospam
djangospam/akismet/moderator.py
https://github.com/leandroarndt/djangospam/blob/57fa9cfbf54a40f0e0652d0155dbb3451c14b69d/djangospam/akismet/moderator.py#L96-L126
def allow(self, comment, content_object, request): """Moderates comments.""" POST = urlencode({ "blog": settings.AKISMET_BLOG.encode("utf-8"), "user_ip": comment.ip_address, "user_agent": request.META.get('HTTP_USER_AGENT', ""). encode("utf-8"), "referrer": request.META.get('HTTP_REFERRER', ""). encode("utf-8"), "comment_author": comment.user_name.encode("utf-8"), "comment_author_email": comment.user_email.encode("utf-8"), "comment_author_url": comment.user_url.encode("utf-8"), "comment_content": comment.comment.encode("utf-8")}) connection = HTTPConnection(AKISMET_URL, AKISMET_PORT) connection.request("POST", AKISMET_PATH, POST, {"User-Agent": AKISMET_USERAGENT, "Content-type":"application/x-www-form-urlencoded" }) response = connection.getresponse() status, result = response.status, response.read() if result == "false": return True elif result == "true" and settings.DISCARD_SPAM: return False elif result == "true": comment.is_removed = True comment.is_public = False return True else: raise AkismetError(status, result)
[ "def", "allow", "(", "self", ",", "comment", ",", "content_object", ",", "request", ")", ":", "POST", "=", "urlencode", "(", "{", "\"blog\"", ":", "settings", ".", "AKISMET_BLOG", ".", "encode", "(", "\"utf-8\"", ")", ",", "\"user_ip\"", ":", "comment", ".", "ip_address", ",", "\"user_agent\"", ":", "request", ".", "META", ".", "get", "(", "'HTTP_USER_AGENT'", ",", "\"\"", ")", ".", "encode", "(", "\"utf-8\"", ")", ",", "\"referrer\"", ":", "request", ".", "META", ".", "get", "(", "'HTTP_REFERRER'", ",", "\"\"", ")", ".", "encode", "(", "\"utf-8\"", ")", ",", "\"comment_author\"", ":", "comment", ".", "user_name", ".", "encode", "(", "\"utf-8\"", ")", ",", "\"comment_author_email\"", ":", "comment", ".", "user_email", ".", "encode", "(", "\"utf-8\"", ")", ",", "\"comment_author_url\"", ":", "comment", ".", "user_url", ".", "encode", "(", "\"utf-8\"", ")", ",", "\"comment_content\"", ":", "comment", ".", "comment", ".", "encode", "(", "\"utf-8\"", ")", "}", ")", "connection", "=", "HTTPConnection", "(", "AKISMET_URL", ",", "AKISMET_PORT", ")", "connection", ".", "request", "(", "\"POST\"", ",", "AKISMET_PATH", ",", "POST", ",", "{", "\"User-Agent\"", ":", "AKISMET_USERAGENT", ",", "\"Content-type\"", ":", "\"application/x-www-form-urlencoded\"", "}", ")", "response", "=", "connection", ".", "getresponse", "(", ")", "status", ",", "result", "=", "response", ".", "status", ",", "response", ".", "read", "(", ")", "if", "result", "==", "\"false\"", ":", "return", "True", "elif", "result", "==", "\"true\"", "and", "settings", ".", "DISCARD_SPAM", ":", "return", "False", "elif", "result", "==", "\"true\"", ":", "comment", ".", "is_removed", "=", "True", "comment", ".", "is_public", "=", "False", "return", "True", "else", ":", "raise", "AkismetError", "(", "status", ",", "result", ")" ]
Moderates comments.
[ "Moderates", "comments", "." ]
python
train
47.516129
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L435-L451
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None): """Calculate accuracy for a set, given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: accuracy (scalar), weights """ with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]): del weights_fn predictions = tf.nn.sigmoid(logits) labels = tf.argmax(labels, -1) predictions = tf.argmax(predictions, -1) _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions) return accuracy, tf.constant(1.0)
[ "def", "sigmoid_accuracy_one_hot", "(", "logits", ",", "labels", ",", "weights_fn", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"sigmoid_accuracy_one_hot\"", ",", "values", "=", "[", "logits", ",", "labels", "]", ")", ":", "del", "weights_fn", "predictions", "=", "tf", ".", "nn", ".", "sigmoid", "(", "logits", ")", "labels", "=", "tf", ".", "argmax", "(", "labels", ",", "-", "1", ")", "predictions", "=", "tf", ".", "argmax", "(", "predictions", ",", "-", "1", ")", "_", ",", "accuracy", "=", "tf", ".", "metrics", ".", "accuracy", "(", "labels", "=", "labels", ",", "predictions", "=", "predictions", ")", "return", "accuracy", ",", "tf", ".", "constant", "(", "1.0", ")" ]
Calculate accuracy for a set, given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: accuracy (scalar), weights
[ "Calculate", "accuracy", "for", "a", "set", "given", "one", "-", "hot", "labels", "and", "logits", "." ]
python
train
41.529412
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/msvc9_support.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/msvc9_support.py#L8-L25
def patch_for_specialized_compiler(): """ Patch functions in distutils.msvc9compiler to use the standalone compiler build for Python (Windows only). Fall back to original behavior when the standalone compiler is not available. """ if 'distutils' not in globals(): # The module isn't available to be patched return if unpatched: # Already patched return unpatched.update(vars(distutils.msvc9compiler)) distutils.msvc9compiler.find_vcvarsall = find_vcvarsall distutils.msvc9compiler.query_vcvarsall = query_vcvarsall
[ "def", "patch_for_specialized_compiler", "(", ")", ":", "if", "'distutils'", "not", "in", "globals", "(", ")", ":", "# The module isn't available to be patched", "return", "if", "unpatched", ":", "# Already patched", "return", "unpatched", ".", "update", "(", "vars", "(", "distutils", ".", "msvc9compiler", ")", ")", "distutils", ".", "msvc9compiler", ".", "find_vcvarsall", "=", "find_vcvarsall", "distutils", ".", "msvc9compiler", ".", "query_vcvarsall", "=", "query_vcvarsall" ]
Patch functions in distutils.msvc9compiler to use the standalone compiler build for Python (Windows only). Fall back to original behavior when the standalone compiler is not available.
[ "Patch", "functions", "in", "distutils", ".", "msvc9compiler", "to", "use", "the", "standalone", "compiler", "build", "for", "Python", "(", "Windows", "only", ")", ".", "Fall", "back", "to", "original", "behavior", "when", "the", "standalone", "compiler", "is", "not", "available", "." ]
python
test
31.777778
prompt-toolkit/pyvim
pyvim/window_arrangement.py
https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/window_arrangement.py#L272-L281
def close_tab(self): """ Close active tab. """ if len(self.tab_pages) > 1: # Cannot close last tab. del self.tab_pages[self.active_tab_index] self.active_tab_index = max(0, self.active_tab_index - 1) # Clean up buffers. self._auto_close_new_empty_buffers()
[ "def", "close_tab", "(", "self", ")", ":", "if", "len", "(", "self", ".", "tab_pages", ")", ">", "1", ":", "# Cannot close last tab.", "del", "self", ".", "tab_pages", "[", "self", ".", "active_tab_index", "]", "self", ".", "active_tab_index", "=", "max", "(", "0", ",", "self", ".", "active_tab_index", "-", "1", ")", "# Clean up buffers.", "self", ".", "_auto_close_new_empty_buffers", "(", ")" ]
Close active tab.
[ "Close", "active", "tab", "." ]
python
train
32.1
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L9264-L9271
def s_l(l, alpha): """ get sigma as a function of degree l from Constable and Parker (1988) """ a2 = alpha**2 c_a = 0.547 s_l = np.sqrt(old_div(((c_a**(2. * l)) * a2), ((l + 1.) * (2. * l + 1.)))) return s_l
[ "def", "s_l", "(", "l", ",", "alpha", ")", ":", "a2", "=", "alpha", "**", "2", "c_a", "=", "0.547", "s_l", "=", "np", ".", "sqrt", "(", "old_div", "(", "(", "(", "c_a", "**", "(", "2.", "*", "l", ")", ")", "*", "a2", ")", ",", "(", "(", "l", "+", "1.", ")", "*", "(", "2.", "*", "l", "+", "1.", ")", ")", ")", ")", "return", "s_l" ]
get sigma as a function of degree l from Constable and Parker (1988)
[ "get", "sigma", "as", "a", "function", "of", "degree", "l", "from", "Constable", "and", "Parker", "(", "1988", ")" ]
python
train
28.5
awkman/pywifi
pywifi/wifi.py
https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/wifi.py#L36-L50
def interfaces(self): """Collect the available wlan interfaces.""" self._ifaces = [] wifi_ctrl = wifiutil.WifiUtil() for interface in wifi_ctrl.interfaces(): iface = Interface(interface) self._ifaces.append(iface) self._logger.info("Get interface: %s", iface.name()) if not self._ifaces: self._logger.error("Can't get wifi interface") return self._ifaces
[ "def", "interfaces", "(", "self", ")", ":", "self", ".", "_ifaces", "=", "[", "]", "wifi_ctrl", "=", "wifiutil", ".", "WifiUtil", "(", ")", "for", "interface", "in", "wifi_ctrl", ".", "interfaces", "(", ")", ":", "iface", "=", "Interface", "(", "interface", ")", "self", ".", "_ifaces", ".", "append", "(", "iface", ")", "self", ".", "_logger", ".", "info", "(", "\"Get interface: %s\"", ",", "iface", ".", "name", "(", ")", ")", "if", "not", "self", ".", "_ifaces", ":", "self", ".", "_logger", ".", "error", "(", "\"Can't get wifi interface\"", ")", "return", "self", ".", "_ifaces" ]
Collect the available wlan interfaces.
[ "Collect", "the", "available", "wlan", "interfaces", "." ]
python
train
29.333333
Alignak-monitoring/alignak
alignak/objects/hostgroup.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/hostgroup.py#L134-L167
def get_hosts_by_explosion(self, hostgroups): # pylint: disable=access-member-before-definition """ Get hosts of this group :param hostgroups: Hostgroup object :type hostgroups: alignak.objects.hostgroup.Hostgroups :return: list of hosts of this group :rtype: list """ # First we tag the hg so it will not be explode # if a son of it already call it self.already_exploded = True # Now the recursive part # rec_tag is set to False every HG we explode # so if True here, it must be a loop in HG # calls... not GOOD! if self.rec_tag: logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name()) return self.get_hosts() # Ok, not a loop, we tag it and continue self.rec_tag = True hg_mbrs = self.get_hostgroup_members() for hg_mbr in hg_mbrs: hostgroup = hostgroups.find_by_name(hg_mbr.strip()) if hostgroup is not None: value = hostgroup.get_hosts_by_explosion(hostgroups) if value is not None: self.add_members(value) return self.get_hosts()
[ "def", "get_hosts_by_explosion", "(", "self", ",", "hostgroups", ")", ":", "# pylint: disable=access-member-before-definition", "# First we tag the hg so it will not be explode", "# if a son of it already call it", "self", ".", "already_exploded", "=", "True", "# Now the recursive part", "# rec_tag is set to False every HG we explode", "# so if True here, it must be a loop in HG", "# calls... not GOOD!", "if", "self", ".", "rec_tag", ":", "logger", ".", "error", "(", "\"[hostgroup::%s] got a loop in hostgroup definition\"", ",", "self", ".", "get_name", "(", ")", ")", "return", "self", ".", "get_hosts", "(", ")", "# Ok, not a loop, we tag it and continue", "self", ".", "rec_tag", "=", "True", "hg_mbrs", "=", "self", ".", "get_hostgroup_members", "(", ")", "for", "hg_mbr", "in", "hg_mbrs", ":", "hostgroup", "=", "hostgroups", ".", "find_by_name", "(", "hg_mbr", ".", "strip", "(", ")", ")", "if", "hostgroup", "is", "not", "None", ":", "value", "=", "hostgroup", ".", "get_hosts_by_explosion", "(", "hostgroups", ")", "if", "value", "is", "not", "None", ":", "self", ".", "add_members", "(", "value", ")", "return", "self", ".", "get_hosts", "(", ")" ]
Get hosts of this group :param hostgroups: Hostgroup object :type hostgroups: alignak.objects.hostgroup.Hostgroups :return: list of hosts of this group :rtype: list
[ "Get", "hosts", "of", "this", "group" ]
python
train
35.352941
theduke/django-baseline
django_baseline/__init__.py
https://github.com/theduke/django-baseline/blob/7be8b956e53c70b35f34e1783a8fe8f716955afb/django_baseline/__init__.py#L43-L52
def user_has_group(user, group, superuser_skip=True): """ Check if a user is in a certaing group. By default, the check is skipped for superusers. """ if user.is_superuser and superuser_skip: return True return user.groups.filter(name=group).exists()
[ "def", "user_has_group", "(", "user", ",", "group", ",", "superuser_skip", "=", "True", ")", ":", "if", "user", ".", "is_superuser", "and", "superuser_skip", ":", "return", "True", "return", "user", ".", "groups", ".", "filter", "(", "name", "=", "group", ")", ".", "exists", "(", ")" ]
Check if a user is in a certaing group. By default, the check is skipped for superusers.
[ "Check", "if", "a", "user", "is", "in", "a", "certaing", "group", ".", "By", "default", "the", "check", "is", "skipped", "for", "superusers", "." ]
python
test
27.5
selectel/pyte
pyte/streams.py
https://github.com/selectel/pyte/blob/8adad489f86da1788a7995720c344a2fa44f244e/pyte/streams.py#L146-L164
def attach(self, screen): """Adds a given screen to the listener queue. :param pyte.screens.Screen screen: a screen to attach to. """ if self.listener is not None: warnings.warn("As of version 0.6.0 the listener queue is " "restricted to a single element. Existing " "listener {0} will be replaced." .format(self.listener), DeprecationWarning) if self.strict: for event in self.events: if not hasattr(screen, event): raise TypeError("{0} is missing {1}".format(screen, event)) self.listener = screen self._parser = None self._initialize_parser()
[ "def", "attach", "(", "self", ",", "screen", ")", ":", "if", "self", ".", "listener", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"As of version 0.6.0 the listener queue is \"", "\"restricted to a single element. Existing \"", "\"listener {0} will be replaced.\"", ".", "format", "(", "self", ".", "listener", ")", ",", "DeprecationWarning", ")", "if", "self", ".", "strict", ":", "for", "event", "in", "self", ".", "events", ":", "if", "not", "hasattr", "(", "screen", ",", "event", ")", ":", "raise", "TypeError", "(", "\"{0} is missing {1}\"", ".", "format", "(", "screen", ",", "event", ")", ")", "self", ".", "listener", "=", "screen", "self", ".", "_parser", "=", "None", "self", ".", "_initialize_parser", "(", ")" ]
Adds a given screen to the listener queue. :param pyte.screens.Screen screen: a screen to attach to.
[ "Adds", "a", "given", "screen", "to", "the", "listener", "queue", "." ]
python
train
38.526316
coinkite/connectrum
connectrum/svr_info.py
https://github.com/coinkite/connectrum/blob/99948f92cc5c3ecb1a8a70146294014e608e50fc/connectrum/svr_info.py#L171-L178
def from_json(self, fname): ''' Read contents of a CSV containing a list of servers. ''' with open(fname, 'rt') as fp: for row in json.load(fp): nn = ServerInfo.from_dict(row) self[str(nn)] = nn
[ "def", "from_json", "(", "self", ",", "fname", ")", ":", "with", "open", "(", "fname", ",", "'rt'", ")", "as", "fp", ":", "for", "row", "in", "json", ".", "load", "(", "fp", ")", ":", "nn", "=", "ServerInfo", ".", "from_dict", "(", "row", ")", "self", "[", "str", "(", "nn", ")", "]", "=", "nn" ]
Read contents of a CSV containing a list of servers.
[ "Read", "contents", "of", "a", "CSV", "containing", "a", "list", "of", "servers", "." ]
python
train
33.375
oscarlazoarjona/fast
fast/graphic.py
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/graphic.py#L476-L499
def bar_chart_mf(data, path_name): """Make a bar chart for data on MF quantities.""" N = len(data) ind = np.arange(N) # the x locations for the groups width = 0.8 # the width of the bars fig, ax = pyplot.subplots() rects1 = ax.bar(ind, data, width, color='g') # add some text for labels, title and axes ticks ax.set_ylabel('Population') ax.set_xticks(ind+width/2) labs = ['m='+str(i) for i in range(-N/2+1, N/2+1)] ax.set_xticklabels(labs) def autolabel(rects): # attach some text labels for rect in rects: rect.get_height() autolabel(rects1) pyplot.savefig(path_name) pyplot.close()
[ "def", "bar_chart_mf", "(", "data", ",", "path_name", ")", ":", "N", "=", "len", "(", "data", ")", "ind", "=", "np", ".", "arange", "(", "N", ")", "# the x locations for the groups\r", "width", "=", "0.8", "# the width of the bars\r", "fig", ",", "ax", "=", "pyplot", ".", "subplots", "(", ")", "rects1", "=", "ax", ".", "bar", "(", "ind", ",", "data", ",", "width", ",", "color", "=", "'g'", ")", "# add some text for labels, title and axes ticks\r", "ax", ".", "set_ylabel", "(", "'Population'", ")", "ax", ".", "set_xticks", "(", "ind", "+", "width", "/", "2", ")", "labs", "=", "[", "'m='", "+", "str", "(", "i", ")", "for", "i", "in", "range", "(", "-", "N", "/", "2", "+", "1", ",", "N", "/", "2", "+", "1", ")", "]", "ax", ".", "set_xticklabels", "(", "labs", ")", "def", "autolabel", "(", "rects", ")", ":", "# attach some text labels\r", "for", "rect", "in", "rects", ":", "rect", ".", "get_height", "(", ")", "autolabel", "(", "rects1", ")", "pyplot", ".", "savefig", "(", "path_name", ")", "pyplot", ".", "close", "(", ")" ]
Make a bar chart for data on MF quantities.
[ "Make", "a", "bar", "chart", "for", "data", "on", "MF", "quantities", "." ]
python
train
28.458333
DLR-RM/RAFCON
source/rafcon/gui/controllers/states_editor.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/states_editor.py#L285-L333
def add_state_editor(self, state_m): """Triggered whenever a state is selected. :param state_m: The selected state model. """ state_identifier = self.get_state_identifier(state_m) if state_identifier in self.closed_tabs: state_editor_ctrl = self.closed_tabs[state_identifier]['controller'] state_editor_view = state_editor_ctrl.view handler_id = self.closed_tabs[state_identifier]['source_code_changed_handler_id'] source_code_view_is_dirty = self.closed_tabs[state_identifier]['source_code_view_is_dirty'] del self.closed_tabs[state_identifier] # pages not in self.closed_tabs and self.tabs at the same time else: state_editor_view = StateEditorView() if isinstance(state_m, LibraryStateModel): state_editor_view['main_notebook_1'].set_current_page( state_editor_view['main_notebook_1'].page_num(state_editor_view.page_dict["Data Linkage"])) state_editor_ctrl = StateEditorController(state_m, state_editor_view) self.add_controller(state_identifier, state_editor_ctrl) if state_editor_ctrl.get_controller('source_ctrl') and state_m.state.get_next_upper_library_root_state() is None: # observe changed to set the mark dirty flag handler_id = state_editor_view.source_view.get_buffer().connect('changed', self.script_text_changed, state_m) self.view.get_top_widget().connect('draw', state_editor_view.source_view.on_draw) else: handler_id = None source_code_view_is_dirty = False (tab, inner_label, sticky_button) = create_tab_header('', self.on_tab_close_clicked, self.on_toggle_sticky_clicked, state_m) set_tab_label_texts(inner_label, state_m, source_code_view_is_dirty) state_editor_view.get_top_widget().title_label = inner_label state_editor_view.get_top_widget().sticky_button = sticky_button page_content = state_editor_view.get_top_widget() page_id = self.view.notebook.prepend_page(page_content, tab) page = self.view.notebook.get_nth_page(page_id) self.view.notebook.set_tab_reorderable(page, True) page.show_all() self.view.notebook.show() self.tabs[state_identifier] = {'page': page, 'state_m': state_m, 'controller': state_editor_ctrl, 'sm_id': self.model.selected_state_machine_id, 'is_sticky': False, 'source_code_view_is_dirty': source_code_view_is_dirty, 'source_code_changed_handler_id': handler_id} return page_id
[ "def", "add_state_editor", "(", "self", ",", "state_m", ")", ":", "state_identifier", "=", "self", ".", "get_state_identifier", "(", "state_m", ")", "if", "state_identifier", "in", "self", ".", "closed_tabs", ":", "state_editor_ctrl", "=", "self", ".", "closed_tabs", "[", "state_identifier", "]", "[", "'controller'", "]", "state_editor_view", "=", "state_editor_ctrl", ".", "view", "handler_id", "=", "self", ".", "closed_tabs", "[", "state_identifier", "]", "[", "'source_code_changed_handler_id'", "]", "source_code_view_is_dirty", "=", "self", ".", "closed_tabs", "[", "state_identifier", "]", "[", "'source_code_view_is_dirty'", "]", "del", "self", ".", "closed_tabs", "[", "state_identifier", "]", "# pages not in self.closed_tabs and self.tabs at the same time", "else", ":", "state_editor_view", "=", "StateEditorView", "(", ")", "if", "isinstance", "(", "state_m", ",", "LibraryStateModel", ")", ":", "state_editor_view", "[", "'main_notebook_1'", "]", ".", "set_current_page", "(", "state_editor_view", "[", "'main_notebook_1'", "]", ".", "page_num", "(", "state_editor_view", ".", "page_dict", "[", "\"Data Linkage\"", "]", ")", ")", "state_editor_ctrl", "=", "StateEditorController", "(", "state_m", ",", "state_editor_view", ")", "self", ".", "add_controller", "(", "state_identifier", ",", "state_editor_ctrl", ")", "if", "state_editor_ctrl", ".", "get_controller", "(", "'source_ctrl'", ")", "and", "state_m", ".", "state", ".", "get_next_upper_library_root_state", "(", ")", "is", "None", ":", "# observe changed to set the mark dirty flag", "handler_id", "=", "state_editor_view", ".", "source_view", ".", "get_buffer", "(", ")", ".", "connect", "(", "'changed'", ",", "self", ".", "script_text_changed", ",", "state_m", ")", "self", ".", "view", ".", "get_top_widget", "(", ")", ".", "connect", "(", "'draw'", ",", "state_editor_view", ".", "source_view", ".", "on_draw", ")", "else", ":", "handler_id", "=", "None", "source_code_view_is_dirty", "=", "False", "(", "tab", ",", "inner_label", ",", "sticky_button", ")", "=", "create_tab_header", "(", "''", ",", "self", ".", "on_tab_close_clicked", ",", "self", ".", "on_toggle_sticky_clicked", ",", "state_m", ")", "set_tab_label_texts", "(", "inner_label", ",", "state_m", ",", "source_code_view_is_dirty", ")", "state_editor_view", ".", "get_top_widget", "(", ")", ".", "title_label", "=", "inner_label", "state_editor_view", ".", "get_top_widget", "(", ")", ".", "sticky_button", "=", "sticky_button", "page_content", "=", "state_editor_view", ".", "get_top_widget", "(", ")", "page_id", "=", "self", ".", "view", ".", "notebook", ".", "prepend_page", "(", "page_content", ",", "tab", ")", "page", "=", "self", ".", "view", ".", "notebook", ".", "get_nth_page", "(", "page_id", ")", "self", ".", "view", ".", "notebook", ".", "set_tab_reorderable", "(", "page", ",", "True", ")", "page", ".", "show_all", "(", ")", "self", ".", "view", ".", "notebook", ".", "show", "(", ")", "self", ".", "tabs", "[", "state_identifier", "]", "=", "{", "'page'", ":", "page", ",", "'state_m'", ":", "state_m", ",", "'controller'", ":", "state_editor_ctrl", ",", "'sm_id'", ":", "self", ".", "model", ".", "selected_state_machine_id", ",", "'is_sticky'", ":", "False", ",", "'source_code_view_is_dirty'", ":", "source_code_view_is_dirty", ",", "'source_code_changed_handler_id'", ":", "handler_id", "}", "return", "page_id" ]
Triggered whenever a state is selected. :param state_m: The selected state model.
[ "Triggered", "whenever", "a", "state", "is", "selected", "." ]
python
train
58.714286
google/grr
grr/core/grr_response_core/lib/rdfvalues/crypto.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/crypto.py#L132-L160
def Verify(self, public_key): """Verifies the certificate using the given key. Args: public_key: The public key to use. Returns: True: Everything went well. Raises: VerificationError: The certificate did not verify. """ # TODO(amoser): We have to do this manually for now since cryptography does # not yet support cert verification. There is PR 2460: # https://github.com/pyca/cryptography/pull/2460/files # that will add it, once it's in we should switch to using this. # Note that all times here are in UTC. now = rdfvalue.RDFDatetime.Now().AsDatetime() if now > self._value.not_valid_after: raise VerificationError("Certificate expired!") if now < self._value.not_valid_before: raise VerificationError("Certificate not yet valid!") public_key.Verify( self._value.tbs_certificate_bytes, self._value.signature, hash_algorithm=self._value.signature_hash_algorithm) return True
[ "def", "Verify", "(", "self", ",", "public_key", ")", ":", "# TODO(amoser): We have to do this manually for now since cryptography does", "# not yet support cert verification. There is PR 2460:", "# https://github.com/pyca/cryptography/pull/2460/files", "# that will add it, once it's in we should switch to using this.", "# Note that all times here are in UTC.", "now", "=", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", ".", "AsDatetime", "(", ")", "if", "now", ">", "self", ".", "_value", ".", "not_valid_after", ":", "raise", "VerificationError", "(", "\"Certificate expired!\"", ")", "if", "now", "<", "self", ".", "_value", ".", "not_valid_before", ":", "raise", "VerificationError", "(", "\"Certificate not yet valid!\"", ")", "public_key", ".", "Verify", "(", "self", ".", "_value", ".", "tbs_certificate_bytes", ",", "self", ".", "_value", ".", "signature", ",", "hash_algorithm", "=", "self", ".", "_value", ".", "signature_hash_algorithm", ")", "return", "True" ]
Verifies the certificate using the given key. Args: public_key: The public key to use. Returns: True: Everything went well. Raises: VerificationError: The certificate did not verify.
[ "Verifies", "the", "certificate", "using", "the", "given", "key", "." ]
python
train
33.344828
Bachmann1234/diff-cover
diff_cover/report_generator.py
https://github.com/Bachmann1234/diff-cover/blob/901cb3fc986982961785e841658085ead453c6c9/diff_cover/report_generator.py#L237-L278
def _context(self): """ Return the context to pass to the template. The context is a dict of the form: { 'css_url': CSS_URL, 'report_name': REPORT_NAME, 'diff_name': DIFF_NAME, 'src_stats': {SRC_PATH: { 'percent_covered': PERCENT_COVERED, 'violation_lines': [LINE_NUM, ...] }, ... } 'total_num_lines': TOTAL_NUM_LINES, 'total_num_violations': TOTAL_NUM_VIOLATIONS, 'total_percent_covered': TOTAL_PERCENT_COVERED } """ # Calculate the information to pass to the template src_stats = { src: self._src_path_stats(src) for src in self.src_paths() } # Include snippet style info if we're displaying # source code snippets if self.INCLUDE_SNIPPETS: snippet_style = Snippet.style_defs() else: snippet_style = None return { 'css_url': self.css_url, 'report_name': self.coverage_report_name(), 'diff_name': self.diff_report_name(), 'src_stats': src_stats, 'total_num_lines': self.total_num_lines(), 'total_num_violations': self.total_num_violations(), 'total_percent_covered': self.total_percent_covered(), 'snippet_style': snippet_style }
[ "def", "_context", "(", "self", ")", ":", "# Calculate the information to pass to the template", "src_stats", "=", "{", "src", ":", "self", ".", "_src_path_stats", "(", "src", ")", "for", "src", "in", "self", ".", "src_paths", "(", ")", "}", "# Include snippet style info if we're displaying", "# source code snippets", "if", "self", ".", "INCLUDE_SNIPPETS", ":", "snippet_style", "=", "Snippet", ".", "style_defs", "(", ")", "else", ":", "snippet_style", "=", "None", "return", "{", "'css_url'", ":", "self", ".", "css_url", ",", "'report_name'", ":", "self", ".", "coverage_report_name", "(", ")", ",", "'diff_name'", ":", "self", ".", "diff_report_name", "(", ")", ",", "'src_stats'", ":", "src_stats", ",", "'total_num_lines'", ":", "self", ".", "total_num_lines", "(", ")", ",", "'total_num_violations'", ":", "self", ".", "total_num_violations", "(", ")", ",", "'total_percent_covered'", ":", "self", ".", "total_percent_covered", "(", ")", ",", "'snippet_style'", ":", "snippet_style", "}" ]
Return the context to pass to the template. The context is a dict of the form: { 'css_url': CSS_URL, 'report_name': REPORT_NAME, 'diff_name': DIFF_NAME, 'src_stats': {SRC_PATH: { 'percent_covered': PERCENT_COVERED, 'violation_lines': [LINE_NUM, ...] }, ... } 'total_num_lines': TOTAL_NUM_LINES, 'total_num_violations': TOTAL_NUM_VIOLATIONS, 'total_percent_covered': TOTAL_PERCENT_COVERED }
[ "Return", "the", "context", "to", "pass", "to", "the", "template", "." ]
python
train
33.666667
pybel/pybel
src/pybel/struct/graph.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/graph.py#L573-L575
def get_edge_annotations(self, u, v, key: str) -> Optional[AnnotationsDict]: """Get the annotations for a given edge.""" return self._get_edge_attr(u, v, key, ANNOTATIONS)
[ "def", "get_edge_annotations", "(", "self", ",", "u", ",", "v", ",", "key", ":", "str", ")", "->", "Optional", "[", "AnnotationsDict", "]", ":", "return", "self", ".", "_get_edge_attr", "(", "u", ",", "v", ",", "key", ",", "ANNOTATIONS", ")" ]
Get the annotations for a given edge.
[ "Get", "the", "annotations", "for", "a", "given", "edge", "." ]
python
train
61.666667
bxlab/bx-python
lib/bx/misc/binary_file.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/misc/binary_file.py#L145-L150
def write_c_string( self, value ): """ Read a zero terminated (C style) string """ self.file.write( value ) self.file.write( b'\0' )
[ "def", "write_c_string", "(", "self", ",", "value", ")", ":", "self", ".", "file", ".", "write", "(", "value", ")", "self", ".", "file", ".", "write", "(", "b'\\0'", ")" ]
Read a zero terminated (C style) string
[ "Read", "a", "zero", "terminated", "(", "C", "style", ")", "string" ]
python
train
27.833333
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L17815-L17836
def read_namespace_status(self, name, **kwargs): # noqa: E501 """read_namespace_status # noqa: E501 read status of the specified Namespace # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespace_status(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Namespace (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Namespace If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespace_status_with_http_info(name, **kwargs) # noqa: E501 else: (data) = self.read_namespace_status_with_http_info(name, **kwargs) # noqa: E501 return data
[ "def", "read_namespace_status", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "read_namespace_status_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "read_namespace_status_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
read_namespace_status # noqa: E501 read status of the specified Namespace # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespace_status(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Namespace (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Namespace If the method is called asynchronously, returns the request thread.
[ "read_namespace_status", "#", "noqa", ":", "E501" ]
python
train
45.545455
jim-easterbrook/Photini
src/photini/metadata.py
https://github.com/jim-easterbrook/Photini/blob/06f1b1988db23a5cad98bbc6c16406a64a6c556d/src/photini/metadata.py#L412-L436
def from_ISO_8601(cls, date_string, time_string, tz_string): """Sufficiently general ISO 8601 parser. Inputs must be in "basic" format, i.e. no '-' or ':' separators. See https://en.wikipedia.org/wiki/ISO_8601 """ # parse tz_string if tz_string: tz_offset = (int(tz_string[1:3]) * 60) + int(tz_string[3:]) if tz_string[0] == '-': tz_offset = -tz_offset else: tz_offset = None if time_string == '000000': # assume no time information time_string = '' tz_offset = None datetime_string = date_string + time_string[:13] precision = min((len(datetime_string) - 2) // 2, 7) if precision <= 0: return None fmt = ''.join(('%Y', '%m', '%d', '%H', '%M', '%S', '.%f')[:precision]) return cls( (datetime.strptime(datetime_string, fmt), precision, tz_offset))
[ "def", "from_ISO_8601", "(", "cls", ",", "date_string", ",", "time_string", ",", "tz_string", ")", ":", "# parse tz_string", "if", "tz_string", ":", "tz_offset", "=", "(", "int", "(", "tz_string", "[", "1", ":", "3", "]", ")", "*", "60", ")", "+", "int", "(", "tz_string", "[", "3", ":", "]", ")", "if", "tz_string", "[", "0", "]", "==", "'-'", ":", "tz_offset", "=", "-", "tz_offset", "else", ":", "tz_offset", "=", "None", "if", "time_string", "==", "'000000'", ":", "# assume no time information", "time_string", "=", "''", "tz_offset", "=", "None", "datetime_string", "=", "date_string", "+", "time_string", "[", ":", "13", "]", "precision", "=", "min", "(", "(", "len", "(", "datetime_string", ")", "-", "2", ")", "//", "2", ",", "7", ")", "if", "precision", "<=", "0", ":", "return", "None", "fmt", "=", "''", ".", "join", "(", "(", "'%Y'", ",", "'%m'", ",", "'%d'", ",", "'%H'", ",", "'%M'", ",", "'%S'", ",", "'.%f'", ")", "[", ":", "precision", "]", ")", "return", "cls", "(", "(", "datetime", ".", "strptime", "(", "datetime_string", ",", "fmt", ")", ",", "precision", ",", "tz_offset", ")", ")" ]
Sufficiently general ISO 8601 parser. Inputs must be in "basic" format, i.e. no '-' or ':' separators. See https://en.wikipedia.org/wiki/ISO_8601
[ "Sufficiently", "general", "ISO", "8601", "parser", "." ]
python
train
37.6
fracpete/python-weka-wrapper3
python/weka/core/classes.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/classes.py#L2001-L2011
def parameters(self, params): """ Sets the list of search parameters to use. :param params: list of AbstractSearchParameter objects :type params: list """ array = JavaArray(jobject=JavaArray.new_instance("weka.core.setupgenerator.AbstractParameter", len(params))) for idx, obj in enumerate(params): array[idx] = obj.jobject javabridge.call(self.jobject, "setParameters", "([Lweka/core/setupgenerator/AbstractParameter;)V", array.jobject)
[ "def", "parameters", "(", "self", ",", "params", ")", ":", "array", "=", "JavaArray", "(", "jobject", "=", "JavaArray", ".", "new_instance", "(", "\"weka.core.setupgenerator.AbstractParameter\"", ",", "len", "(", "params", ")", ")", ")", "for", "idx", ",", "obj", "in", "enumerate", "(", "params", ")", ":", "array", "[", "idx", "]", "=", "obj", ".", "jobject", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"setParameters\"", ",", "\"([Lweka/core/setupgenerator/AbstractParameter;)V\"", ",", "array", ".", "jobject", ")" ]
Sets the list of search parameters to use. :param params: list of AbstractSearchParameter objects :type params: list
[ "Sets", "the", "list", "of", "search", "parameters", "to", "use", "." ]
python
train
45.818182
tanghaibao/jcvi
jcvi/assembly/hic.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L429-L444
def O(self): """ Pairwise strandedness matrix. Each cell contains whether i-th and j-th contig are the same orientation +1, or opposite orientation -1. """ N = self.N tig_to_idx = self.tig_to_idx O = np.zeros((N, N), dtype=int) for (at, bt), (strandedness, md, mh) in self.orientations.items(): if not (at in tig_to_idx and bt in tig_to_idx): continue ai = tig_to_idx[at] bi = tig_to_idx[bt] score = strandedness * md O[ai, bi] = O[bi, ai] = score return O
[ "def", "O", "(", "self", ")", ":", "N", "=", "self", ".", "N", "tig_to_idx", "=", "self", ".", "tig_to_idx", "O", "=", "np", ".", "zeros", "(", "(", "N", ",", "N", ")", ",", "dtype", "=", "int", ")", "for", "(", "at", ",", "bt", ")", ",", "(", "strandedness", ",", "md", ",", "mh", ")", "in", "self", ".", "orientations", ".", "items", "(", ")", ":", "if", "not", "(", "at", "in", "tig_to_idx", "and", "bt", "in", "tig_to_idx", ")", ":", "continue", "ai", "=", "tig_to_idx", "[", "at", "]", "bi", "=", "tig_to_idx", "[", "bt", "]", "score", "=", "strandedness", "*", "md", "O", "[", "ai", ",", "bi", "]", "=", "O", "[", "bi", ",", "ai", "]", "=", "score", "return", "O" ]
Pairwise strandedness matrix. Each cell contains whether i-th and j-th contig are the same orientation +1, or opposite orientation -1.
[ "Pairwise", "strandedness", "matrix", ".", "Each", "cell", "contains", "whether", "i", "-", "th", "and", "j", "-", "th", "contig", "are", "the", "same", "orientation", "+", "1", "or", "opposite", "orientation", "-", "1", "." ]
python
train
36.8125
pallets/werkzeug
src/werkzeug/routing.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/routing.py#L1555-L1565
def add(self, rulefactory): """Add a new rule or factory to the map and bind it. Requires that the rule is not bound to another map. :param rulefactory: a :class:`Rule` or :class:`RuleFactory` """ for rule in rulefactory.get_rules(self): rule.bind(self) self._rules.append(rule) self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule) self._remap = True
[ "def", "add", "(", "self", ",", "rulefactory", ")", ":", "for", "rule", "in", "rulefactory", ".", "get_rules", "(", "self", ")", ":", "rule", ".", "bind", "(", "self", ")", "self", ".", "_rules", ".", "append", "(", "rule", ")", "self", ".", "_rules_by_endpoint", ".", "setdefault", "(", "rule", ".", "endpoint", ",", "[", "]", ")", ".", "append", "(", "rule", ")", "self", ".", "_remap", "=", "True" ]
Add a new rule or factory to the map and bind it. Requires that the rule is not bound to another map. :param rulefactory: a :class:`Rule` or :class:`RuleFactory`
[ "Add", "a", "new", "rule", "or", "factory", "to", "the", "map", "and", "bind", "it", ".", "Requires", "that", "the", "rule", "is", "not", "bound", "to", "another", "map", "." ]
python
train
40
matthieugouel/gibica
gibica/parser.py
https://github.com/matthieugouel/gibica/blob/65f937f7a6255078cc22eb7691a2897466032909/gibica/parser.py#L258-L290
def comparison(self): """ comparison: expr (('==' | '!=' | '<=' | '>=' | '<' | '>') expr)* """ node = self.expr() while self.token.nature in ( Nature.EQ, Nature.NE, Nature.LE, Nature.GE, Nature.LT, Nature.GT, ): token = self.token if token.nature == Nature.EQ: self._process(Nature.EQ) elif token.nature == Nature.NE: self._process(Nature.NE) elif token.nature == Nature.LE: self._process(Nature.LE) elif token.nature == Nature.GE: self._process(Nature.GE) elif token.nature == Nature.LT: self._process(Nature.LT) elif token.nature == Nature.GT: self._process(Nature.GT) else: self.error() node = BinaryOperation(left=node, op=token, right=self.expr()) return node
[ "def", "comparison", "(", "self", ")", ":", "node", "=", "self", ".", "expr", "(", ")", "while", "self", ".", "token", ".", "nature", "in", "(", "Nature", ".", "EQ", ",", "Nature", ".", "NE", ",", "Nature", ".", "LE", ",", "Nature", ".", "GE", ",", "Nature", ".", "LT", ",", "Nature", ".", "GT", ",", ")", ":", "token", "=", "self", ".", "token", "if", "token", ".", "nature", "==", "Nature", ".", "EQ", ":", "self", ".", "_process", "(", "Nature", ".", "EQ", ")", "elif", "token", ".", "nature", "==", "Nature", ".", "NE", ":", "self", ".", "_process", "(", "Nature", ".", "NE", ")", "elif", "token", ".", "nature", "==", "Nature", ".", "LE", ":", "self", ".", "_process", "(", "Nature", ".", "LE", ")", "elif", "token", ".", "nature", "==", "Nature", ".", "GE", ":", "self", ".", "_process", "(", "Nature", ".", "GE", ")", "elif", "token", ".", "nature", "==", "Nature", ".", "LT", ":", "self", ".", "_process", "(", "Nature", ".", "LT", ")", "elif", "token", ".", "nature", "==", "Nature", ".", "GT", ":", "self", ".", "_process", "(", "Nature", ".", "GT", ")", "else", ":", "self", ".", "error", "(", ")", "node", "=", "BinaryOperation", "(", "left", "=", "node", ",", "op", "=", "token", ",", "right", "=", "self", ".", "expr", "(", ")", ")", "return", "node" ]
comparison: expr (('==' | '!=' | '<=' | '>=' | '<' | '>') expr)*
[ "comparison", ":", "expr", "((", "==", "|", "!", "=", "|", "<", "=", "|", ">", "=", "|", "<", "|", ">", ")", "expr", ")", "*" ]
python
train
29.787879
PGower/PyCanvas
pycanvas/apis/quiz_reports.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/quiz_reports.py#L46-L91
def create_quiz_report(self, quiz_id, course_id, quiz_report_report_type, include=None, quiz_report_includes_all_versions=None): """ Create a quiz report. Create and return a new report for this quiz. If a previously generated report matches the arguments and is still current (i.e. there have been no new submissions), it will be returned. *Responses* * <code>400 Bad Request</code> if the specified report type is invalid * <code>409 Conflict</code> if a quiz report of the specified type is already being generated """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - quiz_id """ID""" path["quiz_id"] = quiz_id # REQUIRED - quiz_report[report_type] """The type of report to be generated.""" self._validate_enum(quiz_report_report_type, ["student_analysis", "item_analysis"]) data["quiz_report[report_type]"] = quiz_report_report_type # OPTIONAL - quiz_report[includes_all_versions] """Whether the report should consider all submissions or only the most recent. Defaults to false, ignored for item_analysis.""" if quiz_report_includes_all_versions is not None: data["quiz_report[includes_all_versions]"] = quiz_report_includes_all_versions # OPTIONAL - include """Whether the output should include documents for the file and/or progress objects associated with this report. (Note: JSON-API only)""" if include is not None: self._validate_enum(include, ["file", "progress"]) data["include"] = include self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, single_item=True)
[ "def", "create_quiz_report", "(", "self", ",", "quiz_id", ",", "course_id", ",", "quiz_report_report_type", ",", "include", "=", "None", ",", "quiz_report_includes_all_versions", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", "course_id", "# REQUIRED - PATH - quiz_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"quiz_id\"", "]", "=", "quiz_id", "# REQUIRED - quiz_report[report_type]\r", "\"\"\"The type of report to be generated.\"\"\"", "self", ".", "_validate_enum", "(", "quiz_report_report_type", ",", "[", "\"student_analysis\"", ",", "\"item_analysis\"", "]", ")", "data", "[", "\"quiz_report[report_type]\"", "]", "=", "quiz_report_report_type", "# OPTIONAL - quiz_report[includes_all_versions]\r", "\"\"\"Whether the report should consider all submissions or only the most\r\n recent. Defaults to false, ignored for item_analysis.\"\"\"", "if", "quiz_report_includes_all_versions", "is", "not", "None", ":", "data", "[", "\"quiz_report[includes_all_versions]\"", "]", "=", "quiz_report_includes_all_versions", "# OPTIONAL - include\r", "\"\"\"Whether the output should include documents for the file and/or progress\r\n objects associated with this report. (Note: JSON-API only)\"\"\"", "if", "include", "is", "not", "None", ":", "self", ".", "_validate_enum", "(", "include", ",", "[", "\"file\"", ",", "\"progress\"", "]", ")", "data", "[", "\"include\"", "]", "=", "include", "self", ".", "logger", ".", "debug", "(", "\"POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"POST\"", ",", "\"/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "single_item", "=", "True", ")" ]
Create a quiz report. Create and return a new report for this quiz. If a previously generated report matches the arguments and is still current (i.e. there have been no new submissions), it will be returned. *Responses* * <code>400 Bad Request</code> if the specified report type is invalid * <code>409 Conflict</code> if a quiz report of the specified type is already being generated
[ "Create", "a", "quiz", "report", ".", "Create", "and", "return", "a", "new", "report", "for", "this", "quiz", ".", "If", "a", "previously", "generated", "report", "matches", "the", "arguments", "and", "is", "still", "current", "(", "i", ".", "e", ".", "there", "have", "been", "no", "new", "submissions", ")", "it", "will", "be", "returned", ".", "*", "Responses", "*", "*", "<code", ">", "400", "Bad", "Request<", "/", "code", ">", "if", "the", "specified", "report", "type", "is", "invalid", "*", "<code", ">", "409", "Conflict<", "/", "code", ">", "if", "a", "quiz", "report", "of", "the", "specified", "type", "is", "already", "being", "generated" ]
python
train
46.152174
blockstack/virtualchain
virtualchain/lib/blockchain/bitcoin_blockchain/authproxy.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/authproxy.py#L203-L251
def getinfo(self): """ Backwards-compatibility for 0.14 and later """ try: old_getinfo = AuthServiceProxy(self.__service_url, 'getinfo', self.__timeout, self.__conn, True) res = old_getinfo() if 'error' not in res: # 0.13 and earlier return res except JSONRPCException: pass network_info = self.getnetworkinfo() blockchain_info = self.getblockchaininfo() try: wallet_info = self.getwalletinfo() except: wallet_info = { 'walletversion': None, 'balance': None, 'keypoololdest': None, 'keypoolsize': None, 'paytxfee': None, } res = { 'version': network_info['version'], 'protocolversion': network_info['protocolversion'], 'walletversion': wallet_info['walletversion'], 'balance': wallet_info['balance'], 'blocks': blockchain_info['blocks'], 'timeoffset': network_info['timeoffset'], 'connections': network_info['connections'], 'proxy': network_info['networks'], 'difficulty': blockchain_info['difficulty'], 'testnet': blockchain_info['chain'] == 'testnet', 'keypoololdest': wallet_info['keypoololdest'], 'keypoolsize': wallet_info['keypoolsize'], 'paytxfee': wallet_info['paytxfee'], 'errors': network_info['warnings'], } for k in ['unlocked_until', 'relayfee', 'paytxfee']: if wallet_info.has_key(k): res[k] = wallet_info[k] return res
[ "def", "getinfo", "(", "self", ")", ":", "try", ":", "old_getinfo", "=", "AuthServiceProxy", "(", "self", ".", "__service_url", ",", "'getinfo'", ",", "self", ".", "__timeout", ",", "self", ".", "__conn", ",", "True", ")", "res", "=", "old_getinfo", "(", ")", "if", "'error'", "not", "in", "res", ":", "# 0.13 and earlier", "return", "res", "except", "JSONRPCException", ":", "pass", "network_info", "=", "self", ".", "getnetworkinfo", "(", ")", "blockchain_info", "=", "self", ".", "getblockchaininfo", "(", ")", "try", ":", "wallet_info", "=", "self", ".", "getwalletinfo", "(", ")", "except", ":", "wallet_info", "=", "{", "'walletversion'", ":", "None", ",", "'balance'", ":", "None", ",", "'keypoololdest'", ":", "None", ",", "'keypoolsize'", ":", "None", ",", "'paytxfee'", ":", "None", ",", "}", "res", "=", "{", "'version'", ":", "network_info", "[", "'version'", "]", ",", "'protocolversion'", ":", "network_info", "[", "'protocolversion'", "]", ",", "'walletversion'", ":", "wallet_info", "[", "'walletversion'", "]", ",", "'balance'", ":", "wallet_info", "[", "'balance'", "]", ",", "'blocks'", ":", "blockchain_info", "[", "'blocks'", "]", ",", "'timeoffset'", ":", "network_info", "[", "'timeoffset'", "]", ",", "'connections'", ":", "network_info", "[", "'connections'", "]", ",", "'proxy'", ":", "network_info", "[", "'networks'", "]", ",", "'difficulty'", ":", "blockchain_info", "[", "'difficulty'", "]", ",", "'testnet'", ":", "blockchain_info", "[", "'chain'", "]", "==", "'testnet'", ",", "'keypoololdest'", ":", "wallet_info", "[", "'keypoololdest'", "]", ",", "'keypoolsize'", ":", "wallet_info", "[", "'keypoolsize'", "]", ",", "'paytxfee'", ":", "wallet_info", "[", "'paytxfee'", "]", ",", "'errors'", ":", "network_info", "[", "'warnings'", "]", ",", "}", "for", "k", "in", "[", "'unlocked_until'", ",", "'relayfee'", ",", "'paytxfee'", "]", ":", "if", "wallet_info", ".", "has_key", "(", "k", ")", ":", "res", "[", "k", "]", "=", "wallet_info", "[", "k", "]", "return", "res" ]
Backwards-compatibility for 0.14 and later
[ "Backwards", "-", "compatibility", "for", "0", ".", "14", "and", "later" ]
python
train
34.571429
StanfordVL/robosuite
robosuite/models/objects/generated_objects.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/models/objects/generated_objects.py#L309-L325
def _get_randomized_range(val, provided_range, default_range): """ Helper to initialize by either value or a range Returns a range to randomize from """ if val is None: if provided_range is None: return default_range else: return provided_range else: if provided_range is not None: raise ValueError('Value {} overrides range {}' .format(str(val), str(provided_range))) return [val]
[ "def", "_get_randomized_range", "(", "val", ",", "provided_range", ",", "default_range", ")", ":", "if", "val", "is", "None", ":", "if", "provided_range", "is", "None", ":", "return", "default_range", "else", ":", "return", "provided_range", "else", ":", "if", "provided_range", "is", "not", "None", ":", "raise", "ValueError", "(", "'Value {} overrides range {}'", ".", "format", "(", "str", "(", "val", ")", ",", "str", "(", "provided_range", ")", ")", ")", "return", "[", "val", "]" ]
Helper to initialize by either value or a range Returns a range to randomize from
[ "Helper", "to", "initialize", "by", "either", "value", "or", "a", "range", "Returns", "a", "range", "to", "randomize", "from" ]
python
train
32.117647
GoogleCloudPlatform/appengine-gcs-client
python/src/cloudstorage/storage_api.py
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/storage_api.py#L639-L650
def find_newline(self, size=-1): """Search for newline char in buffer starting from current offset. Args: size: number of bytes to search. -1 means all. Returns: offset of newline char in buffer. -1 if doesn't exist. """ if size < 0: return self._buffer.find('\n', self._offset) return self._buffer.find('\n', self._offset, self._offset + size)
[ "def", "find_newline", "(", "self", ",", "size", "=", "-", "1", ")", ":", "if", "size", "<", "0", ":", "return", "self", ".", "_buffer", ".", "find", "(", "'\\n'", ",", "self", ".", "_offset", ")", "return", "self", ".", "_buffer", ".", "find", "(", "'\\n'", ",", "self", ".", "_offset", ",", "self", ".", "_offset", "+", "size", ")" ]
Search for newline char in buffer starting from current offset. Args: size: number of bytes to search. -1 means all. Returns: offset of newline char in buffer. -1 if doesn't exist.
[ "Search", "for", "newline", "char", "in", "buffer", "starting", "from", "current", "offset", "." ]
python
train
31.416667
albertyw/pyziptax
pyziptax/ziptax.py
https://github.com/albertyw/pyziptax/blob/c56dd440e4cadff7f2dd4b72e5dcced06a44969d/pyziptax/ziptax.py#L24-L34
def get_rate(self, zipcode, city=None, state=None, multiple_rates=False): """ Finds sales tax for given info. Returns Decimal of the tax rate, e.g. 8.750. """ data = self.make_request_data(zipcode, city, state) r = requests.get(self.url, params=data) resp = r.json() return self.process_response(resp, multiple_rates)
[ "def", "get_rate", "(", "self", ",", "zipcode", ",", "city", "=", "None", ",", "state", "=", "None", ",", "multiple_rates", "=", "False", ")", ":", "data", "=", "self", ".", "make_request_data", "(", "zipcode", ",", "city", ",", "state", ")", "r", "=", "requests", ".", "get", "(", "self", ".", "url", ",", "params", "=", "data", ")", "resp", "=", "r", ".", "json", "(", ")", "return", "self", ".", "process_response", "(", "resp", ",", "multiple_rates", ")" ]
Finds sales tax for given info. Returns Decimal of the tax rate, e.g. 8.750.
[ "Finds", "sales", "tax", "for", "given", "info", ".", "Returns", "Decimal", "of", "the", "tax", "rate", "e", ".", "g", ".", "8", ".", "750", "." ]
python
valid
33.909091
djgagne/hagelslag
hagelslag/data/ModelGrid.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/ModelGrid.py#L55-L94
def load_data_old(self): """ Loads time series of 2D data grids from each opened file. The code handles loading a full time series from one file or individual time steps from multiple files. Missing files are supported. """ units = "" if len(self.file_objects) == 1 and self.file_objects[0] is not None: data = self.file_objects[0].variables[self.variable][self.forecast_hours] if hasattr(self.file_objects[0].variables[self.variable], "units"): units = self.file_objects[0].variables[self.variable].units elif len(self.file_objects) > 1: grid_shape = [len(self.file_objects), 1, 1] for file_object in self.file_objects: if file_object is not None: if self.variable in file_object.variables.keys(): grid_shape = file_object.variables[self.variable].shape elif self.variable.ljust(6, "_") in file_object.variables.keys(): grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape else: print("{0} not found".format(self.variable)) raise KeyError break data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2])) for f, file_object in enumerate(self.file_objects): if file_object is not None: if self.variable in file_object.variables.keys(): var_name = self.variable elif self.variable.ljust(6, "_") in file_object.variables.keys(): var_name = self.variable.ljust(6, "_") else: print("{0} not found".format(self.variable)) raise KeyError data[f] = file_object.variables[var_name][0] if units == "" and hasattr(file_object.variables[var_name], "units"): units = file_object.variables[var_name].units else: data = None return data, units
[ "def", "load_data_old", "(", "self", ")", ":", "units", "=", "\"\"", "if", "len", "(", "self", ".", "file_objects", ")", "==", "1", "and", "self", ".", "file_objects", "[", "0", "]", "is", "not", "None", ":", "data", "=", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "self", ".", "variable", "]", "[", "self", ".", "forecast_hours", "]", "if", "hasattr", "(", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "self", ".", "variable", "]", ",", "\"units\"", ")", ":", "units", "=", "self", ".", "file_objects", "[", "0", "]", ".", "variables", "[", "self", ".", "variable", "]", ".", "units", "elif", "len", "(", "self", ".", "file_objects", ")", ">", "1", ":", "grid_shape", "=", "[", "len", "(", "self", ".", "file_objects", ")", ",", "1", ",", "1", "]", "for", "file_object", "in", "self", ".", "file_objects", ":", "if", "file_object", "is", "not", "None", ":", "if", "self", ".", "variable", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "grid_shape", "=", "file_object", ".", "variables", "[", "self", ".", "variable", "]", ".", "shape", "elif", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "grid_shape", "=", "file_object", ".", "variables", "[", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "]", ".", "shape", "else", ":", "print", "(", "\"{0} not found\"", ".", "format", "(", "self", ".", "variable", ")", ")", "raise", "KeyError", "break", "data", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "file_objects", ")", ",", "grid_shape", "[", "1", "]", ",", "grid_shape", "[", "2", "]", ")", ")", "for", "f", ",", "file_object", "in", "enumerate", "(", "self", ".", "file_objects", ")", ":", "if", "file_object", "is", "not", "None", ":", "if", "self", ".", "variable", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "var_name", "=", "self", ".", "variable", "elif", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "in", "file_object", ".", "variables", ".", "keys", "(", ")", ":", "var_name", "=", "self", ".", "variable", ".", "ljust", "(", "6", ",", "\"_\"", ")", "else", ":", "print", "(", "\"{0} not found\"", ".", "format", "(", "self", ".", "variable", ")", ")", "raise", "KeyError", "data", "[", "f", "]", "=", "file_object", ".", "variables", "[", "var_name", "]", "[", "0", "]", "if", "units", "==", "\"\"", "and", "hasattr", "(", "file_object", ".", "variables", "[", "var_name", "]", ",", "\"units\"", ")", ":", "units", "=", "file_object", ".", "variables", "[", "var_name", "]", ".", "units", "else", ":", "data", "=", "None", "return", "data", ",", "units" ]
Loads time series of 2D data grids from each opened file. The code handles loading a full time series from one file or individual time steps from multiple files. Missing files are supported.
[ "Loads", "time", "series", "of", "2D", "data", "grids", "from", "each", "opened", "file", ".", "The", "code", "handles", "loading", "a", "full", "time", "series", "from", "one", "file", "or", "individual", "time", "steps", "from", "multiple", "files", ".", "Missing", "files", "are", "supported", "." ]
python
train
53.225
tanghaibao/jcvi
jcvi/formats/gff.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L376-L390
def to_range(obj, score=None, id=None, strand=None): """ Given a gffutils object, convert it to a range object """ from jcvi.utils.range import Range if score or id: _score = score if score else obj.score _id = id if id else obj.id return Range(seqid=obj.seqid, start=obj.start, end=obj.end, \ score=_score, id=_id) elif strand: return (obj.seqid, obj.start, obj.end, obj.strand) return (obj.seqid, obj.start, obj.end)
[ "def", "to_range", "(", "obj", ",", "score", "=", "None", ",", "id", "=", "None", ",", "strand", "=", "None", ")", ":", "from", "jcvi", ".", "utils", ".", "range", "import", "Range", "if", "score", "or", "id", ":", "_score", "=", "score", "if", "score", "else", "obj", ".", "score", "_id", "=", "id", "if", "id", "else", "obj", ".", "id", "return", "Range", "(", "seqid", "=", "obj", ".", "seqid", ",", "start", "=", "obj", ".", "start", ",", "end", "=", "obj", ".", "end", ",", "score", "=", "_score", ",", "id", "=", "_id", ")", "elif", "strand", ":", "return", "(", "obj", ".", "seqid", ",", "obj", ".", "start", ",", "obj", ".", "end", ",", "obj", ".", "strand", ")", "return", "(", "obj", ".", "seqid", ",", "obj", ".", "start", ",", "obj", ".", "end", ")" ]
Given a gffutils object, convert it to a range object
[ "Given", "a", "gffutils", "object", "convert", "it", "to", "a", "range", "object" ]
python
train
31.866667
bkabrda/anymarkup-core
anymarkup_core/__init__.py
https://github.com/bkabrda/anymarkup-core/blob/299935092fc2650cca4e32ec92441786918f9bab/anymarkup_core/__init__.py#L76-L117
def parse(inp, format=None, encoding='utf-8', force_types=True): """Parse input from file-like object, unicode string or byte string. Args: inp: file-like object, unicode string or byte string with the markup format: explicitly override the guessed `inp` markup format encoding: `inp` encoding, defaults to utf-8 force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, backend return value is used Returns: parsed input (dict or list) containing unicode values Raises: AnyMarkupError if a problem occurs while parsing or inp """ proper_inp = inp if hasattr(inp, 'read'): proper_inp = inp.read() # if proper_inp is unicode, encode it if isinstance(proper_inp, six.text_type): proper_inp = proper_inp.encode(encoding) # try to guess markup type fname = None if hasattr(inp, 'name'): fname = inp.name fmt = _get_format(format, fname, proper_inp) # make it look like file-like bytes-yielding object proper_inp = six.BytesIO(proper_inp) try: res = _do_parse(proper_inp, fmt, encoding, force_types) except Exception as e: # I wish there was only Python 3 and I could just use "raise ... from e" raise AnyMarkupError(e, traceback.format_exc()) if res is None: res = {} return res
[ "def", "parse", "(", "inp", ",", "format", "=", "None", ",", "encoding", "=", "'utf-8'", ",", "force_types", "=", "True", ")", ":", "proper_inp", "=", "inp", "if", "hasattr", "(", "inp", ",", "'read'", ")", ":", "proper_inp", "=", "inp", ".", "read", "(", ")", "# if proper_inp is unicode, encode it", "if", "isinstance", "(", "proper_inp", ",", "six", ".", "text_type", ")", ":", "proper_inp", "=", "proper_inp", ".", "encode", "(", "encoding", ")", "# try to guess markup type", "fname", "=", "None", "if", "hasattr", "(", "inp", ",", "'name'", ")", ":", "fname", "=", "inp", ".", "name", "fmt", "=", "_get_format", "(", "format", ",", "fname", ",", "proper_inp", ")", "# make it look like file-like bytes-yielding object", "proper_inp", "=", "six", ".", "BytesIO", "(", "proper_inp", ")", "try", ":", "res", "=", "_do_parse", "(", "proper_inp", ",", "fmt", ",", "encoding", ",", "force_types", ")", "except", "Exception", "as", "e", ":", "# I wish there was only Python 3 and I could just use \"raise ... from e\"", "raise", "AnyMarkupError", "(", "e", ",", "traceback", ".", "format_exc", "(", ")", ")", "if", "res", "is", "None", ":", "res", "=", "{", "}", "return", "res" ]
Parse input from file-like object, unicode string or byte string. Args: inp: file-like object, unicode string or byte string with the markup format: explicitly override the guessed `inp` markup format encoding: `inp` encoding, defaults to utf-8 force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, backend return value is used Returns: parsed input (dict or list) containing unicode values Raises: AnyMarkupError if a problem occurs while parsing or inp
[ "Parse", "input", "from", "file", "-", "like", "object", "unicode", "string", "or", "byte", "string", "." ]
python
train
35.690476
jaredLunde/redis_structures
redis_structures/debug/__init__.py
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/debug/__init__.py#L950-L961
def list(self, size=1000, tree_depth=1): """ Creates a random #list @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|[value1, value2]| 2=|[[value1, value2], [value1, value2]]| -> random #list """ if not tree_depth: return self._map_type() return list(self.deque(size, tree_depth-1) for x in range(size))
[ "def", "list", "(", "self", ",", "size", "=", "1000", ",", "tree_depth", "=", "1", ")", ":", "if", "not", "tree_depth", ":", "return", "self", ".", "_map_type", "(", ")", "return", "list", "(", "self", ".", "deque", "(", "size", ",", "tree_depth", "-", "1", ")", "for", "x", "in", "range", "(", "size", ")", ")" ]
Creates a random #list @size: #int number of random values to include in each @tree_depth @tree_depth: #int dict tree dimensions size, i.e. 1=|[value1, value2]| 2=|[[value1, value2], [value1, value2]]| -> random #list
[ "Creates", "a", "random", "#list" ]
python
train
38.75
ml4ai/delphi
delphi/translators/for2py/pyTranslate.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/translators/for2py/pyTranslate.py#L1151-L1199
def printArray(self, node, printState: PrintState): """ Prints out the array declaration in a format of Array class object declaration. 'arrayName = Array(Type, [bounds])' """ if ( self.nameMapper[node["name"]] not in printState.definedVars and self.nameMapper[node["name"]] not in printState.globalVars ): printState.definedVars += [self.nameMapper[node["name"]]] assert int(node["count"]) > 0 printState.definedVars += [node["name"]] varType = "" if node["type"].upper() == "INTEGER": varType = "int" elif node["type"].upper() in ("DOUBLE", "REAL"): varType = "float" elif node["type"].upper() == "CHARACTER": varType = "str" elif node["isDevTypeVar"]: varType = node["type"].lower() + "()" assert varType != "" self.pyStrings.append(f"{node['name']} = Array({varType}, [") for i in range(0, int(node["count"])): loBound = node["low" + str(i + 1)] upBound = node["up" + str(i + 1)] dimensions = f"({loBound}, {upBound})" if i < int(node["count"]) - 1: self.pyStrings.append(f"{dimensions}, ") else: self.pyStrings.append(f"{dimensions}") self.pyStrings.append("])") if node["isDevTypeVar"]: self.pyStrings.append(printState.sep) # This may require updating later when we have to deal with the # multi-dimensional derived type arrays upBound = node["up1"] self.pyStrings.append( f"for z in range(1, {upBound}+1):" + printState.sep ) self.pyStrings.append( f" obj = {node['type']}()" + printState.sep ) self.pyStrings.append( f" {node['name']}.set_(z, obj)" + printState.sep )
[ "def", "printArray", "(", "self", ",", "node", ",", "printState", ":", "PrintState", ")", ":", "if", "(", "self", ".", "nameMapper", "[", "node", "[", "\"name\"", "]", "]", "not", "in", "printState", ".", "definedVars", "and", "self", ".", "nameMapper", "[", "node", "[", "\"name\"", "]", "]", "not", "in", "printState", ".", "globalVars", ")", ":", "printState", ".", "definedVars", "+=", "[", "self", ".", "nameMapper", "[", "node", "[", "\"name\"", "]", "]", "]", "assert", "int", "(", "node", "[", "\"count\"", "]", ")", ">", "0", "printState", ".", "definedVars", "+=", "[", "node", "[", "\"name\"", "]", "]", "varType", "=", "\"\"", "if", "node", "[", "\"type\"", "]", ".", "upper", "(", ")", "==", "\"INTEGER\"", ":", "varType", "=", "\"int\"", "elif", "node", "[", "\"type\"", "]", ".", "upper", "(", ")", "in", "(", "\"DOUBLE\"", ",", "\"REAL\"", ")", ":", "varType", "=", "\"float\"", "elif", "node", "[", "\"type\"", "]", ".", "upper", "(", ")", "==", "\"CHARACTER\"", ":", "varType", "=", "\"str\"", "elif", "node", "[", "\"isDevTypeVar\"", "]", ":", "varType", "=", "node", "[", "\"type\"", "]", ".", "lower", "(", ")", "+", "\"()\"", "assert", "varType", "!=", "\"\"", "self", ".", "pyStrings", ".", "append", "(", "f\"{node['name']} = Array({varType}, [\"", ")", "for", "i", "in", "range", "(", "0", ",", "int", "(", "node", "[", "\"count\"", "]", ")", ")", ":", "loBound", "=", "node", "[", "\"low\"", "+", "str", "(", "i", "+", "1", ")", "]", "upBound", "=", "node", "[", "\"up\"", "+", "str", "(", "i", "+", "1", ")", "]", "dimensions", "=", "f\"({loBound}, {upBound})\"", "if", "i", "<", "int", "(", "node", "[", "\"count\"", "]", ")", "-", "1", ":", "self", ".", "pyStrings", ".", "append", "(", "f\"{dimensions}, \"", ")", "else", ":", "self", ".", "pyStrings", ".", "append", "(", "f\"{dimensions}\"", ")", "self", ".", "pyStrings", ".", "append", "(", "\"])\"", ")", "if", "node", "[", "\"isDevTypeVar\"", "]", ":", "self", ".", "pyStrings", ".", "append", "(", "printState", ".", "sep", ")", "# This may require updating later when we have to deal with the", "# multi-dimensional derived type arrays", "upBound", "=", "node", "[", "\"up1\"", "]", "self", ".", "pyStrings", ".", "append", "(", "f\"for z in range(1, {upBound}+1):\"", "+", "printState", ".", "sep", ")", "self", ".", "pyStrings", ".", "append", "(", "f\" obj = {node['type']}()\"", "+", "printState", ".", "sep", ")", "self", ".", "pyStrings", ".", "append", "(", "f\" {node['name']}.set_(z, obj)\"", "+", "printState", ".", "sep", ")" ]
Prints out the array declaration in a format of Array class object declaration. 'arrayName = Array(Type, [bounds])'
[ "Prints", "out", "the", "array", "declaration", "in", "a", "format", "of", "Array", "class", "object", "declaration", ".", "arrayName", "=", "Array", "(", "Type", "[", "bounds", "]", ")" ]
python
train
42.163265
deshima-dev/decode
decode/core/array/functions.py
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/core/array/functions.py#L124-L141
def zeros_like(array, dtype=None, keepmeta=True): """Create an array of zeros with the same shape and type as the input array. Args: array (xarray.DataArray): The shape and data-type of it define these same attributes of the output array. dtype (data-type, optional): If specified, this function overrides the data-type of the output array. keepmeta (bool, optional): Whether *coords, attrs, and name of the input array are kept in the output one. Default is True. Returns: array (decode.array): Decode array filled with zeros. """ if keepmeta: return xr.zeros_like(array, dtype) else: return dc.zeros(array.shape, dtype)
[ "def", "zeros_like", "(", "array", ",", "dtype", "=", "None", ",", "keepmeta", "=", "True", ")", ":", "if", "keepmeta", ":", "return", "xr", ".", "zeros_like", "(", "array", ",", "dtype", ")", "else", ":", "return", "dc", ".", "zeros", "(", "array", ".", "shape", ",", "dtype", ")" ]
Create an array of zeros with the same shape and type as the input array. Args: array (xarray.DataArray): The shape and data-type of it define these same attributes of the output array. dtype (data-type, optional): If specified, this function overrides the data-type of the output array. keepmeta (bool, optional): Whether *coords, attrs, and name of the input array are kept in the output one. Default is True. Returns: array (decode.array): Decode array filled with zeros.
[ "Create", "an", "array", "of", "zeros", "with", "the", "same", "shape", "and", "type", "as", "the", "input", "array", "." ]
python
train
39.666667
edx/edx-enterprise
enterprise/api/v1/serializers.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api/v1/serializers.py#L382-L401
def to_representation(self, instance): """ Return the updated course data dictionary. Arguments: instance (dict): The course data. Returns: dict: The updated course data. """ updated_course = copy.deepcopy(instance) enterprise_customer_catalog = self.context['enterprise_customer_catalog'] updated_course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url( updated_course['key'] ) for course_run in updated_course['course_runs']: course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url( course_run['key'] ) return updated_course
[ "def", "to_representation", "(", "self", ",", "instance", ")", ":", "updated_course", "=", "copy", ".", "deepcopy", "(", "instance", ")", "enterprise_customer_catalog", "=", "self", ".", "context", "[", "'enterprise_customer_catalog'", "]", "updated_course", "[", "'enrollment_url'", "]", "=", "enterprise_customer_catalog", ".", "get_course_enrollment_url", "(", "updated_course", "[", "'key'", "]", ")", "for", "course_run", "in", "updated_course", "[", "'course_runs'", "]", ":", "course_run", "[", "'enrollment_url'", "]", "=", "enterprise_customer_catalog", ".", "get_course_run_enrollment_url", "(", "course_run", "[", "'key'", "]", ")", "return", "updated_course" ]
Return the updated course data dictionary. Arguments: instance (dict): The course data. Returns: dict: The updated course data.
[ "Return", "the", "updated", "course", "data", "dictionary", "." ]
python
valid
36.55
pyrogram/pyrogram
pyrogram/client/methods/password/change_cloud_password.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/methods/password/change_cloud_password.py#L27-L72
def change_cloud_password( self, current_password: str, new_password: str, new_hint: str = "" ) -> bool: """Use this method to change your Two-Step Verification password (Cloud Password) with a new one. Args: current_password (``str``): Your current password. new_password (``str``): Your new password. new_hint (``str``, *optional*): A new password hint. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` in case there is no cloud password to change. """ r = self.send(functions.account.GetPassword()) if not r.has_password: raise ValueError("There is no cloud password to change") r.new_algo.salt1 += os.urandom(32) new_hash = btoi(compute_hash(r.new_algo, new_password)) new_hash = itob(pow(r.new_algo.g, new_hash, btoi(r.new_algo.p))) self.send( functions.account.UpdatePasswordSettings( password=compute_check(r, current_password), new_settings=types.account.PasswordInputSettings( new_algo=r.new_algo, new_password_hash=new_hash, hint=new_hint ) ) ) return True
[ "def", "change_cloud_password", "(", "self", ",", "current_password", ":", "str", ",", "new_password", ":", "str", ",", "new_hint", ":", "str", "=", "\"\"", ")", "->", "bool", ":", "r", "=", "self", ".", "send", "(", "functions", ".", "account", ".", "GetPassword", "(", ")", ")", "if", "not", "r", ".", "has_password", ":", "raise", "ValueError", "(", "\"There is no cloud password to change\"", ")", "r", ".", "new_algo", ".", "salt1", "+=", "os", ".", "urandom", "(", "32", ")", "new_hash", "=", "btoi", "(", "compute_hash", "(", "r", ".", "new_algo", ",", "new_password", ")", ")", "new_hash", "=", "itob", "(", "pow", "(", "r", ".", "new_algo", ".", "g", ",", "new_hash", ",", "btoi", "(", "r", ".", "new_algo", ".", "p", ")", ")", ")", "self", ".", "send", "(", "functions", ".", "account", ".", "UpdatePasswordSettings", "(", "password", "=", "compute_check", "(", "r", ",", "current_password", ")", ",", "new_settings", "=", "types", ".", "account", ".", "PasswordInputSettings", "(", "new_algo", "=", "r", ".", "new_algo", ",", "new_password_hash", "=", "new_hash", ",", "hint", "=", "new_hint", ")", ")", ")", "return", "True" ]
Use this method to change your Two-Step Verification password (Cloud Password) with a new one. Args: current_password (``str``): Your current password. new_password (``str``): Your new password. new_hint (``str``, *optional*): A new password hint. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` in case there is no cloud password to change.
[ "Use", "this", "method", "to", "change", "your", "Two", "-", "Step", "Verification", "password", "(", "Cloud", "Password", ")", "with", "a", "new", "one", "." ]
python
train
30.543478
codeforamerica/epa_python
epa/pcs/pcs.py
https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L173-L182
def sludge(self, column=None, value=None, **kwargs): """ Sludge information describes the volumn of sludge produced at a facility, identification information on a sludge handler, and classification/permitting information on a facility that handles sludge, such as a pretreatment POTW. >>> PCS().sludge('county_name', 'San Francisco') """ return self._resolve_call('PCS_SLUDGE', column, value, **kwargs)
[ "def", "sludge", "(", "self", ",", "column", "=", "None", ",", "value", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_resolve_call", "(", "'PCS_SLUDGE'", ",", "column", ",", "value", ",", "*", "*", "kwargs", ")" ]
Sludge information describes the volumn of sludge produced at a facility, identification information on a sludge handler, and classification/permitting information on a facility that handles sludge, such as a pretreatment POTW. >>> PCS().sludge('county_name', 'San Francisco')
[ "Sludge", "information", "describes", "the", "volumn", "of", "sludge", "produced", "at", "a", "facility", "identification", "information", "on", "a", "sludge", "handler", "and", "classification", "/", "permitting", "information", "on", "a", "facility", "that", "handles", "sludge", "such", "as", "a", "pretreatment", "POTW", "." ]
python
train
45.8
casastorta/python-sar
sar/multiparser.py
https://github.com/casastorta/python-sar/blob/e6d8bb86524102d677f37e985302fad34e3297c1/sar/multiparser.py#L132-L180
def __split_file(self): ''' Splits combined SAR output file (in ASCII format) in order to extract info we need for it, in the format we want. :return: ``List``-style of SAR file sections separated by the type of info they contain (SAR file sections) without parsing what is exactly what at this point ''' # Filename passed checks through __init__ if (self.__filename and os.access(self.__filename, os.R_OK)): fhandle = None try: fhandle = os.open(self.__filename, os.O_RDONLY) except OSError: print(("Couldn't open file %s" % (self.__filename))) fhandle = None if (fhandle): try: sarmap = mmap.mmap(fhandle, length=0, prot=mmap.PROT_READ) except (TypeError, IndexError): os.close(fhandle) traceback.print_exc() #sys.exit(-1) return False sfpos = sarmap.find(PATTERN_MULTISPLIT, 0) while (sfpos > -1): '''Split by day found''' self.__splitpointers.append(sfpos) # Iterate for new position try: sfpos = sarmap.find(PATTERN_MULTISPLIT, (sfpos + 1)) except ValueError: print("ValueError on mmap.find()") return True if (self.__splitpointers): # Not sure if this will work - if empty set # goes back as True here return True return False
[ "def", "__split_file", "(", "self", ")", ":", "# Filename passed checks through __init__", "if", "(", "self", ".", "__filename", "and", "os", ".", "access", "(", "self", ".", "__filename", ",", "os", ".", "R_OK", ")", ")", ":", "fhandle", "=", "None", "try", ":", "fhandle", "=", "os", ".", "open", "(", "self", ".", "__filename", ",", "os", ".", "O_RDONLY", ")", "except", "OSError", ":", "print", "(", "(", "\"Couldn't open file %s\"", "%", "(", "self", ".", "__filename", ")", ")", ")", "fhandle", "=", "None", "if", "(", "fhandle", ")", ":", "try", ":", "sarmap", "=", "mmap", ".", "mmap", "(", "fhandle", ",", "length", "=", "0", ",", "prot", "=", "mmap", ".", "PROT_READ", ")", "except", "(", "TypeError", ",", "IndexError", ")", ":", "os", ".", "close", "(", "fhandle", ")", "traceback", ".", "print_exc", "(", ")", "#sys.exit(-1)", "return", "False", "sfpos", "=", "sarmap", ".", "find", "(", "PATTERN_MULTISPLIT", ",", "0", ")", "while", "(", "sfpos", ">", "-", "1", ")", ":", "'''Split by day found'''", "self", ".", "__splitpointers", ".", "append", "(", "sfpos", ")", "# Iterate for new position", "try", ":", "sfpos", "=", "sarmap", ".", "find", "(", "PATTERN_MULTISPLIT", ",", "(", "sfpos", "+", "1", ")", ")", "except", "ValueError", ":", "print", "(", "\"ValueError on mmap.find()\"", ")", "return", "True", "if", "(", "self", ".", "__splitpointers", ")", ":", "# Not sure if this will work - if empty set", "# goes back as True here", "return", "True", "return", "False" ]
Splits combined SAR output file (in ASCII format) in order to extract info we need for it, in the format we want. :return: ``List``-style of SAR file sections separated by the type of info they contain (SAR file sections) without parsing what is exactly what at this point
[ "Splits", "combined", "SAR", "output", "file", "(", "in", "ASCII", "format", ")", "in", "order", "to", "extract", "info", "we", "need", "for", "it", "in", "the", "format", "we", "want", ".", ":", "return", ":", "List", "-", "style", "of", "SAR", "file", "sections", "separated", "by", "the", "type", "of", "info", "they", "contain", "(", "SAR", "file", "sections", ")", "without", "parsing", "what", "is", "exactly", "what", "at", "this", "point" ]
python
train
33.591837
thombashi/typepy
typepy/converter/_datetime.py
https://github.com/thombashi/typepy/blob/8209d1df4f2a7f196a9fa4bfb0708c5ff648461f/typepy/converter/_datetime.py#L131-L147
def __validate_datetime_string(self): """ This will require validating version string (such as "3.3.5"). A version string could be converted to a datetime value if this validation is not executed. """ try: try: StrictVersion(self._value) raise TypeConversionError( "invalid datetime string: version string found {}".format(self._value) ) except ValueError: pass except TypeError: raise TypeConversionError("invalid datetime string: type={}".format(type(self._value)))
[ "def", "__validate_datetime_string", "(", "self", ")", ":", "try", ":", "try", ":", "StrictVersion", "(", "self", ".", "_value", ")", "raise", "TypeConversionError", "(", "\"invalid datetime string: version string found {}\"", ".", "format", "(", "self", ".", "_value", ")", ")", "except", "ValueError", ":", "pass", "except", "TypeError", ":", "raise", "TypeConversionError", "(", "\"invalid datetime string: type={}\"", ".", "format", "(", "type", "(", "self", ".", "_value", ")", ")", ")" ]
This will require validating version string (such as "3.3.5"). A version string could be converted to a datetime value if this validation is not executed.
[ "This", "will", "require", "validating", "version", "string", "(", "such", "as", "3", ".", "3", ".", "5", ")", ".", "A", "version", "string", "could", "be", "converted", "to", "a", "datetime", "value", "if", "this", "validation", "is", "not", "executed", "." ]
python
train
36.941176