repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
edeposit/edeposit.amqp.harvester
src/edeposit/amqp/harvester/scrappers/cpress_cz.py
https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/cpress_cz.py#L205-L227
def _parse_description(html_chunk): """ Parse description of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Description as string or None if not found. """ description_tag = html_chunk.match( ["div", {"class": "kniha_detail_text"}], "p" ) if not description_tag: return None description = get_first_content(description_tag) description = description.replace("<br />", "\n") description = description.replace("<br/>", "\n") return dhtmlparser.removeTags(description).strip()
[ "def", "_parse_description", "(", "html_chunk", ")", ":", "description_tag", "=", "html_chunk", ".", "match", "(", "[", "\"div\"", ",", "{", "\"class\"", ":", "\"kniha_detail_text\"", "}", "]", ",", "\"p\"", ")", "if", "not", "description_tag", ":", "return", "None", "description", "=", "get_first_content", "(", "description_tag", ")", "description", "=", "description", ".", "replace", "(", "\"<br />\"", ",", "\"\\n\"", ")", "description", "=", "description", ".", "replace", "(", "\"<br/>\"", ",", "\"\\n\"", ")", "return", "dhtmlparser", ".", "removeTags", "(", "description", ")", ".", "strip", "(", ")" ]
Parse description of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Description as string or None if not found.
[ "Parse", "description", "of", "the", "book", "." ]
python
train
26.304348
f3at/feat
src/feat/models/effect.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/effect.py#L127-L136
def context_value(name): """ Returns an effect that drops the current value, and replaces it with the value from the context with the given name. """ def context_value(_value, context, **_params): return defer.succeed(context[name]) return context_value
[ "def", "context_value", "(", "name", ")", ":", "def", "context_value", "(", "_value", ",", "context", ",", "*", "*", "_params", ")", ":", "return", "defer", ".", "succeed", "(", "context", "[", "name", "]", ")", "return", "context_value" ]
Returns an effect that drops the current value, and replaces it with the value from the context with the given name.
[ "Returns", "an", "effect", "that", "drops", "the", "current", "value", "and", "replaces", "it", "with", "the", "value", "from", "the", "context", "with", "the", "given", "name", "." ]
python
train
27.8
kmmbvnr/django-any
django_any/models.py
https://github.com/kmmbvnr/django-any/blob/6f64ebd05476e2149e2e71deeefbb10f8edfc412/django_any/models.py#L311-L322
def any_slug_field(field, **kwargs): """ Return random value for SlugField >>> result = any_field(models.SlugField()) >>> type(result) <type 'str'> >>> from django.core.validators import slug_re >>> re.match(slug_re, result) is not None True """ letters = ascii_letters + digits + '_-' return xunit.any_string(letters = letters, max_length = field.max_length)
[ "def", "any_slug_field", "(", "field", ",", "*", "*", "kwargs", ")", ":", "letters", "=", "ascii_letters", "+", "digits", "+", "'_-'", "return", "xunit", ".", "any_string", "(", "letters", "=", "letters", ",", "max_length", "=", "field", ".", "max_length", ")" ]
Return random value for SlugField >>> result = any_field(models.SlugField()) >>> type(result) <type 'str'> >>> from django.core.validators import slug_re >>> re.match(slug_re, result) is not None True
[ "Return", "random", "value", "for", "SlugField", ">>>", "result", "=", "any_field", "(", "models", ".", "SlugField", "()", ")", ">>>", "type", "(", "result", ")", "<type", "str", ">", ">>>", "from", "django", ".", "core", ".", "validators", "import", "slug_re", ">>>", "re", ".", "match", "(", "slug_re", "result", ")", "is", "not", "None", "True" ]
python
test
33.583333
sprockets/sprockets.mixins.amqp
sprockets/mixins/amqp/__init__.py
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L390-L401
def close(self): """Cleanly shutdown the connection to RabbitMQ :raises: sprockets.mixins.amqp.ConnectionStateError """ if not self.closable: LOGGER.warning('Closed called while %s', self.state_description) raise ConnectionStateError(self.state_description) self.state = self.STATE_CLOSING LOGGER.info('Closing RabbitMQ connection') self.connection.close()
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "closable", ":", "LOGGER", ".", "warning", "(", "'Closed called while %s'", ",", "self", ".", "state_description", ")", "raise", "ConnectionStateError", "(", "self", ".", "state_description", ")", "self", ".", "state", "=", "self", ".", "STATE_CLOSING", "LOGGER", ".", "info", "(", "'Closing RabbitMQ connection'", ")", "self", ".", "connection", ".", "close", "(", ")" ]
Cleanly shutdown the connection to RabbitMQ :raises: sprockets.mixins.amqp.ConnectionStateError
[ "Cleanly", "shutdown", "the", "connection", "to", "RabbitMQ" ]
python
train
35.583333
mrstephenneal/dirutility
dirutility/backup.py
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/backup.py#L89-L93
def _backup_pb_tqdm(self, dirs): """Create a backup with a tqdm progress bar.""" with ZipFile(self.zip_filename, 'w') as backup_zip: for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)): backup_zip.write(path, path[len(self.source):len(path)])
[ "def", "_backup_pb_tqdm", "(", "self", ",", "dirs", ")", ":", "with", "ZipFile", "(", "self", ".", "zip_filename", ",", "'w'", ")", "as", "backup_zip", ":", "for", "path", "in", "tqdm", "(", "dirs", ",", "desc", "=", "'Writing Zip Files'", ",", "total", "=", "len", "(", "dirs", ")", ")", ":", "backup_zip", ".", "write", "(", "path", ",", "path", "[", "len", "(", "self", ".", "source", ")", ":", "len", "(", "path", ")", "]", ")" ]
Create a backup with a tqdm progress bar.
[ "Create", "a", "backup", "with", "a", "tqdm", "progress", "bar", "." ]
python
train
59.2
python-diamond/Diamond
src/collectors/netstat/netstat.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/netstat/netstat.py#L34-L42
def get_default_config(self): """ Returns the default collector settings """ config = super(NetstatCollector, self).get_default_config() config.update({ 'path': 'netstat', }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "NetstatCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'netstat'", ",", "}", ")", "return", "config" ]
Returns the default collector settings
[ "Returns", "the", "default", "collector", "settings" ]
python
train
28.444444
andreikop/python-ws-discovery
wsdiscovery/cmdline.py
https://github.com/andreikop/python-ws-discovery/blob/a7b852cf43115c6f986e509b1870d6963e76687f/wsdiscovery/cmdline.py#L64-L74
def discover(scope, loglevel, capture): "Discover systems using WS-Discovery" if loglevel: level = getattr(logging, loglevel, None) if not level: print("Invalid log level '%s'" % loglevel) return logger.setLevel(level) run(scope=scope, capture=capture)
[ "def", "discover", "(", "scope", ",", "loglevel", ",", "capture", ")", ":", "if", "loglevel", ":", "level", "=", "getattr", "(", "logging", ",", "loglevel", ",", "None", ")", "if", "not", "level", ":", "print", "(", "\"Invalid log level '%s'\"", "%", "loglevel", ")", "return", "logger", ".", "setLevel", "(", "level", ")", "run", "(", "scope", "=", "scope", ",", "capture", "=", "capture", ")" ]
Discover systems using WS-Discovery
[ "Discover", "systems", "using", "WS", "-", "Discovery" ]
python
test
27.454545
libyal/dtfabric
dtfabric/reader.py
https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/reader.py#L914-L943
def _ReadStreamDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads a stream data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: StreamDefinition: stream data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """ if is_member: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE) else: supported_definition_values = ( self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE) return self._ReadElementSequenceDataTypeDefinition( definitions_registry, definition_values, data_types.StreamDefinition, definition_name, supported_definition_values)
[ "def", "_ReadStreamDataTypeDefinition", "(", "self", ",", "definitions_registry", ",", "definition_values", ",", "definition_name", ",", "is_member", "=", "False", ")", ":", "if", "is_member", ":", "supported_definition_values", "=", "(", "self", ".", "_SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE", ")", "else", ":", "supported_definition_values", "=", "(", "self", ".", "_SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE", ")", "return", "self", ".", "_ReadElementSequenceDataTypeDefinition", "(", "definitions_registry", ",", "definition_values", ",", "data_types", ".", "StreamDefinition", ",", "definition_name", ",", "supported_definition_values", ")" ]
Reads a stream data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: StreamDefinition: stream data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
[ "Reads", "a", "stream", "data", "type", "definition", "." ]
python
train
36.866667
not-na/peng3d
peng3d/model.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/model.py#L316-L325
def setParent(self,parent): """ Sets the parent of this bone for all entities. Note that this method must be called before many other methods to ensure internal state has been initialized. This method also registers this bone as a child of its parent. """ self.parent = parent self.parent.child_bones[self.name]=self
[ "def", "setParent", "(", "self", ",", "parent", ")", ":", "self", ".", "parent", "=", "parent", "self", ".", "parent", ".", "child_bones", "[", "self", ".", "name", "]", "=", "self" ]
Sets the parent of this bone for all entities. Note that this method must be called before many other methods to ensure internal state has been initialized. This method also registers this bone as a child of its parent.
[ "Sets", "the", "parent", "of", "this", "bone", "for", "all", "entities", ".", "Note", "that", "this", "method", "must", "be", "called", "before", "many", "other", "methods", "to", "ensure", "internal", "state", "has", "been", "initialized", ".", "This", "method", "also", "registers", "this", "bone", "as", "a", "child", "of", "its", "parent", "." ]
python
test
38.1
Julius2342/pyvlx
pyvlx/opening_device.py
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/opening_device.py#L43-L53
async def open(self, wait_for_completion=True): """Open window. Parameters: * wait_for_completion: If set, function will return after device has reached target position. """ await self.set_position( position=Position(position_percent=0), wait_for_completion=wait_for_completion)
[ "async", "def", "open", "(", "self", ",", "wait_for_completion", "=", "True", ")", ":", "await", "self", ".", "set_position", "(", "position", "=", "Position", "(", "position_percent", "=", "0", ")", ",", "wait_for_completion", "=", "wait_for_completion", ")" ]
Open window. Parameters: * wait_for_completion: If set, function will return after device has reached target position.
[ "Open", "window", "." ]
python
train
32.181818
heitzmann/gdspy
gdspy/__init__.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L3322-L3352
def add(self, cell, overwrite_duplicate=False): """ Add one or more cells to the library. Parameters ---------- cell : ``Cell`` of list of ``Cell`` Cells to be included in the library. overwrite_duplicate : bool If True an existing cell with the same name in the library will be overwritten. Returns ------- out : ``GdsLibrary`` This object. """ if isinstance(cell, Cell): if (not overwrite_duplicate and cell.name in self.cell_dict and self.cell_dict[cell.name] is not cell): raise ValueError("[GDSPY] cell named {0} already present in " "library.".format(cell.name)) self.cell_dict[cell.name] = cell else: for c in cell: if (not overwrite_duplicate and c.name in self.cell_dict and self.cell_dict[c.name] is not c): raise ValueError("[GDSPY] cell named {0} already present " "in library.".format(c.name)) self.cell_dict[c.name] = c return self
[ "def", "add", "(", "self", ",", "cell", ",", "overwrite_duplicate", "=", "False", ")", ":", "if", "isinstance", "(", "cell", ",", "Cell", ")", ":", "if", "(", "not", "overwrite_duplicate", "and", "cell", ".", "name", "in", "self", ".", "cell_dict", "and", "self", ".", "cell_dict", "[", "cell", ".", "name", "]", "is", "not", "cell", ")", ":", "raise", "ValueError", "(", "\"[GDSPY] cell named {0} already present in \"", "\"library.\"", ".", "format", "(", "cell", ".", "name", ")", ")", "self", ".", "cell_dict", "[", "cell", ".", "name", "]", "=", "cell", "else", ":", "for", "c", "in", "cell", ":", "if", "(", "not", "overwrite_duplicate", "and", "c", ".", "name", "in", "self", ".", "cell_dict", "and", "self", ".", "cell_dict", "[", "c", ".", "name", "]", "is", "not", "c", ")", ":", "raise", "ValueError", "(", "\"[GDSPY] cell named {0} already present \"", "\"in library.\"", ".", "format", "(", "c", ".", "name", ")", ")", "self", ".", "cell_dict", "[", "c", ".", "name", "]", "=", "c", "return", "self" ]
Add one or more cells to the library. Parameters ---------- cell : ``Cell`` of list of ``Cell`` Cells to be included in the library. overwrite_duplicate : bool If True an existing cell with the same name in the library will be overwritten. Returns ------- out : ``GdsLibrary`` This object.
[ "Add", "one", "or", "more", "cells", "to", "the", "library", "." ]
python
train
38.290323
log2timeline/plaso
plaso/parsers/plist_plugins/airport.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/plist_plugins/airport.py#L25-L57
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): """Extracts relevant Airport entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. """ if 'RememberedNetworks' not in match: return for wifi in match['RememberedNetworks']: ssid = wifi.get('SSIDString', 'UNKNOWN_SSID') security_type = wifi.get('SecurityType', 'UNKNOWN_SECURITY_TYPE') event_data = plist_event.PlistTimeEventData() event_data.desc = ( '[WiFi] Connected to network: <{0:s}> using security {1:s}').format( ssid, security_type) event_data.key = 'item' event_data.root = '/RememberedNetworks' datetime_value = wifi.get('LastConnected', None) if datetime_value: event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) else: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "GetEntries", "(", "self", ",", "parser_mediator", ",", "match", "=", "None", ",", "*", "*", "unused_kwargs", ")", ":", "if", "'RememberedNetworks'", "not", "in", "match", ":", "return", "for", "wifi", "in", "match", "[", "'RememberedNetworks'", "]", ":", "ssid", "=", "wifi", ".", "get", "(", "'SSIDString'", ",", "'UNKNOWN_SSID'", ")", "security_type", "=", "wifi", ".", "get", "(", "'SecurityType'", ",", "'UNKNOWN_SECURITY_TYPE'", ")", "event_data", "=", "plist_event", ".", "PlistTimeEventData", "(", ")", "event_data", ".", "desc", "=", "(", "'[WiFi] Connected to network: <{0:s}> using security {1:s}'", ")", ".", "format", "(", "ssid", ",", "security_type", ")", "event_data", ".", "key", "=", "'item'", "event_data", ".", "root", "=", "'/RememberedNetworks'", "datetime_value", "=", "wifi", ".", "get", "(", "'LastConnected'", ",", "None", ")", "if", "datetime_value", ":", "event", "=", "time_events", ".", "PythonDatetimeEvent", "(", "datetime_value", ",", "definitions", ".", "TIME_DESCRIPTION_WRITTEN", ")", "else", ":", "date_time", "=", "dfdatetime_semantic_time", ".", "SemanticTime", "(", "'Not set'", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "definitions", ".", "TIME_DESCRIPTION_NOT_A_TIME", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
Extracts relevant Airport entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
[ "Extracts", "relevant", "Airport", "entries", "." ]
python
train
38.212121
mikedh/trimesh
trimesh/collision.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/collision.py#L373-L439
def in_collision_other(self, other_manager, return_names=False, return_data=False): """ Check if any object from this manager collides with any object from another manager. Parameters ------------------- other_manager : CollisionManager Another collision manager object return_names : bool If true, a set is returned containing the names of all pairs of objects in collision. return_data : bool If true, a list of ContactData is returned as well Returns ------------- is_collision : bool True if a collision occurred between any pair of objects and False otherwise names : set of 2-tup The set of pairwise collisions. Each tuple contains two names (first from this manager, second from the other_manager) indicating that the two corresponding objects are in collision. contacts : list of ContactData All contacts detected """ cdata = fcl.CollisionData() if return_names or return_data: cdata = fcl.CollisionData( request=fcl.CollisionRequest( num_max_contacts=100000, enable_contact=True)) self._manager.collide(other_manager._manager, cdata, fcl.defaultCollisionCallback) result = cdata.result.is_collision objs_in_collision = set() contact_data = [] if return_names or return_data: for contact in cdata.result.contacts: reverse = False names = (self._extract_name(contact.o1), other_manager._extract_name(contact.o2)) if names[0] is None: names = (self._extract_name(contact.o2), other_manager._extract_name(contact.o1)) reverse = True if return_names: objs_in_collision.add(names) if return_data: if reverse: names = reversed(names) contact_data.append(ContactData(names, contact)) if return_names and return_data: return result, objs_in_collision, contact_data elif return_names: return result, objs_in_collision elif return_data: return result, contact_data else: return result
[ "def", "in_collision_other", "(", "self", ",", "other_manager", ",", "return_names", "=", "False", ",", "return_data", "=", "False", ")", ":", "cdata", "=", "fcl", ".", "CollisionData", "(", ")", "if", "return_names", "or", "return_data", ":", "cdata", "=", "fcl", ".", "CollisionData", "(", "request", "=", "fcl", ".", "CollisionRequest", "(", "num_max_contacts", "=", "100000", ",", "enable_contact", "=", "True", ")", ")", "self", ".", "_manager", ".", "collide", "(", "other_manager", ".", "_manager", ",", "cdata", ",", "fcl", ".", "defaultCollisionCallback", ")", "result", "=", "cdata", ".", "result", ".", "is_collision", "objs_in_collision", "=", "set", "(", ")", "contact_data", "=", "[", "]", "if", "return_names", "or", "return_data", ":", "for", "contact", "in", "cdata", ".", "result", ".", "contacts", ":", "reverse", "=", "False", "names", "=", "(", "self", ".", "_extract_name", "(", "contact", ".", "o1", ")", ",", "other_manager", ".", "_extract_name", "(", "contact", ".", "o2", ")", ")", "if", "names", "[", "0", "]", "is", "None", ":", "names", "=", "(", "self", ".", "_extract_name", "(", "contact", ".", "o2", ")", ",", "other_manager", ".", "_extract_name", "(", "contact", ".", "o1", ")", ")", "reverse", "=", "True", "if", "return_names", ":", "objs_in_collision", ".", "add", "(", "names", ")", "if", "return_data", ":", "if", "reverse", ":", "names", "=", "reversed", "(", "names", ")", "contact_data", ".", "append", "(", "ContactData", "(", "names", ",", "contact", ")", ")", "if", "return_names", "and", "return_data", ":", "return", "result", ",", "objs_in_collision", ",", "contact_data", "elif", "return_names", ":", "return", "result", ",", "objs_in_collision", "elif", "return_data", ":", "return", "result", ",", "contact_data", "else", ":", "return", "result" ]
Check if any object from this manager collides with any object from another manager. Parameters ------------------- other_manager : CollisionManager Another collision manager object return_names : bool If true, a set is returned containing the names of all pairs of objects in collision. return_data : bool If true, a list of ContactData is returned as well Returns ------------- is_collision : bool True if a collision occurred between any pair of objects and False otherwise names : set of 2-tup The set of pairwise collisions. Each tuple contains two names (first from this manager, second from the other_manager) indicating that the two corresponding objects are in collision. contacts : list of ContactData All contacts detected
[ "Check", "if", "any", "object", "from", "this", "manager", "collides", "with", "any", "object", "from", "another", "manager", "." ]
python
train
37.373134
miguelgrinberg/python-socketio
socketio/asyncio_namespace.py
https://github.com/miguelgrinberg/python-socketio/blob/c0c1bf8d21e3597389b18938550a0724dd9676b7/socketio/asyncio_namespace.py#L73-L83
async def close_room(self, room, namespace=None): """Close a room. The only difference with the :func:`socketio.Server.close_room` method is that when the ``namespace`` argument is not given the namespace associated with the class is used. Note: this method is a coroutine. """ return await self.server.close_room( room, namespace=namespace or self.namespace)
[ "async", "def", "close_room", "(", "self", ",", "room", ",", "namespace", "=", "None", ")", ":", "return", "await", "self", ".", "server", ".", "close_room", "(", "room", ",", "namespace", "=", "namespace", "or", "self", ".", "namespace", ")" ]
Close a room. The only difference with the :func:`socketio.Server.close_room` method is that when the ``namespace`` argument is not given the namespace associated with the class is used. Note: this method is a coroutine.
[ "Close", "a", "room", "." ]
python
train
38.090909
ralphje/imagemounter
imagemounter/volume_system.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L411-L482
def detect(self, volume_system, vstype='detect'): """Finds and mounts all volumes based on mmls.""" try: cmd = ['mmls'] if volume_system.parent.offset: cmd.extend(['-o', str(volume_system.parent.offset // volume_system.disk.block_size)]) if vstype in ('dos', 'mac', 'bsd', 'sun', 'gpt'): cmd.extend(['-t', vstype]) cmd.append(volume_system.parent.get_raw_path()) output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = 'multi' except Exception as e: # some bug in sleuthkit makes detection sometimes difficult, so we hack around it: if hasattr(e, 'output') and "(GPT or DOS at 0)" in e.output.decode() and vstype != 'gpt': volume_system.vstype = 'gpt' # noinspection PyBroadException try: logger.warning("Error in retrieving volume info: mmls couldn't decide between GPT and DOS, " "choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True) cmd = ['mmls', '-t', 'gpt', self.parent.get_raw_path()] output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = 'multi' except Exception as e: logger.exception("Failed executing mmls command") raise SubsystemError(e) else: logger.exception("Failed executing mmls command") raise SubsystemError(e) output = output.split("Description", 1)[-1] for line in output.splitlines(): if not line: continue # noinspection PyBroadException try: values = line.split(None, 5) # sometimes there are only 5 elements available description = '' index, slot, start, end, length = values[0:5] if len(values) > 5: description = values[5] volume = volume_system._make_subvolume( index=self._format_index(volume_system, int(index[:-1])), offset=int(start) * volume_system.disk.block_size, size=int(length) * volume_system.disk.block_size ) volume.info['fsdescription'] = description except Exception: logger.exception("Error while parsing mmls output") continue if slot.lower() == 'meta': volume.flag = 'meta' logger.info("Found meta volume: block offset: {0}, length: {1}".format(start, length)) elif slot.lower().startswith('-----'): volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start, length)) else: volume.flag = 'alloc' if ":" in slot: volume.slot = _util.determine_slot(*slot.split(':')) else: volume.slot = _util.determine_slot(-1, slot) volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(start, length, volume.info['fsdescription'])) yield volume
[ "def", "detect", "(", "self", ",", "volume_system", ",", "vstype", "=", "'detect'", ")", ":", "try", ":", "cmd", "=", "[", "'mmls'", "]", "if", "volume_system", ".", "parent", ".", "offset", ":", "cmd", ".", "extend", "(", "[", "'-o'", ",", "str", "(", "volume_system", ".", "parent", ".", "offset", "//", "volume_system", ".", "disk", ".", "block_size", ")", "]", ")", "if", "vstype", "in", "(", "'dos'", ",", "'mac'", ",", "'bsd'", ",", "'sun'", ",", "'gpt'", ")", ":", "cmd", ".", "extend", "(", "[", "'-t'", ",", "vstype", "]", ")", "cmd", ".", "append", "(", "volume_system", ".", "parent", ".", "get_raw_path", "(", ")", ")", "output", "=", "_util", ".", "check_output_", "(", "cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "volume_system", ".", "volume_source", "=", "'multi'", "except", "Exception", "as", "e", ":", "# some bug in sleuthkit makes detection sometimes difficult, so we hack around it:", "if", "hasattr", "(", "e", ",", "'output'", ")", "and", "\"(GPT or DOS at 0)\"", "in", "e", ".", "output", ".", "decode", "(", ")", "and", "vstype", "!=", "'gpt'", ":", "volume_system", ".", "vstype", "=", "'gpt'", "# noinspection PyBroadException", "try", ":", "logger", ".", "warning", "(", "\"Error in retrieving volume info: mmls couldn't decide between GPT and DOS, \"", "\"choosing GPT for you. Use --vstype=dos to force DOS.\"", ",", "exc_info", "=", "True", ")", "cmd", "=", "[", "'mmls'", ",", "'-t'", ",", "'gpt'", ",", "self", ".", "parent", ".", "get_raw_path", "(", ")", "]", "output", "=", "_util", ".", "check_output_", "(", "cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "volume_system", ".", "volume_source", "=", "'multi'", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "\"Failed executing mmls command\"", ")", "raise", "SubsystemError", "(", "e", ")", "else", ":", "logger", ".", "exception", "(", "\"Failed executing mmls command\"", ")", "raise", "SubsystemError", "(", "e", ")", "output", "=", "output", ".", "split", "(", "\"Description\"", ",", "1", ")", "[", "-", "1", "]", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "if", "not", "line", ":", "continue", "# noinspection PyBroadException", "try", ":", "values", "=", "line", ".", "split", "(", "None", ",", "5", ")", "# sometimes there are only 5 elements available", "description", "=", "''", "index", ",", "slot", ",", "start", ",", "end", ",", "length", "=", "values", "[", "0", ":", "5", "]", "if", "len", "(", "values", ")", ">", "5", ":", "description", "=", "values", "[", "5", "]", "volume", "=", "volume_system", ".", "_make_subvolume", "(", "index", "=", "self", ".", "_format_index", "(", "volume_system", ",", "int", "(", "index", "[", ":", "-", "1", "]", ")", ")", ",", "offset", "=", "int", "(", "start", ")", "*", "volume_system", ".", "disk", ".", "block_size", ",", "size", "=", "int", "(", "length", ")", "*", "volume_system", ".", "disk", ".", "block_size", ")", "volume", ".", "info", "[", "'fsdescription'", "]", "=", "description", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Error while parsing mmls output\"", ")", "continue", "if", "slot", ".", "lower", "(", ")", "==", "'meta'", ":", "volume", ".", "flag", "=", "'meta'", "logger", ".", "info", "(", "\"Found meta volume: block offset: {0}, length: {1}\"", ".", "format", "(", "start", ",", "length", ")", ")", "elif", "slot", ".", "lower", "(", ")", ".", "startswith", "(", "'-----'", ")", ":", "volume", ".", "flag", "=", "'unalloc'", "logger", ".", "info", "(", "\"Found unallocated space: block offset: {0}, length: {1}\"", ".", "format", "(", "start", ",", "length", ")", ")", "else", ":", "volume", ".", "flag", "=", "'alloc'", "if", "\":\"", "in", "slot", ":", "volume", ".", "slot", "=", "_util", ".", "determine_slot", "(", "*", "slot", ".", "split", "(", "':'", ")", ")", "else", ":", "volume", ".", "slot", "=", "_util", ".", "determine_slot", "(", "-", "1", ",", "slot", ")", "volume_system", ".", "_assign_disktype_data", "(", "volume", ")", "logger", ".", "info", "(", "\"Found allocated {2}: block offset: {0}, length: {1} \"", ".", "format", "(", "start", ",", "length", ",", "volume", ".", "info", "[", "'fsdescription'", "]", ")", ")", "yield", "volume" ]
Finds and mounts all volumes based on mmls.
[ "Finds", "and", "mounts", "all", "volumes", "based", "on", "mmls", "." ]
python
train
48.166667
cltk/cltk
cltk/inflection/old_norse/verbs.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/inflection/old_norse/verbs.py#L855-L892
def past_active(self): """ Weak verbs I >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["kalla", "kallaði", "kallaðinn"]) >>> verb.past_active() ['kallaða', 'kallaðir', 'kallaði', 'kölluðum', 'kölluðuð', 'kölluðu'] II >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["mæla", "mælti", "mæltr"]) >>> verb.past_active() ['mælta', 'mæltir', 'mælti', 'mæltum', 'mæltuð', 'mæltu'] III >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["telja", "taldi", "talinn"]) >>> verb.past_active() ['talda', 'taldir', 'taldi', 'töldum', 'tölduð', 'töldu'] IV >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["vaka", "vakti", "vakat"]) >>> verb.past_active() ['vakta', 'vaktir', 'vakti', 'vöktum', 'vöktuð', 'vöktu'] :return: """ forms = [] stem = self.sfg3et[:-1] forms.append(stem+"a") forms.append(self.sfg3et+"r") forms.append(self.sfg3et) forms.append(apply_u_umlaut(stem)+"um") forms.append(apply_u_umlaut(stem)+"uð") forms.append(apply_u_umlaut(stem)+"u") return forms
[ "def", "past_active", "(", "self", ")", ":", "forms", "=", "[", "]", "stem", "=", "self", ".", "sfg3et", "[", ":", "-", "1", "]", "forms", ".", "append", "(", "stem", "+", "\"a\"", ")", "forms", ".", "append", "(", "self", ".", "sfg3et", "+", "\"r\"", ")", "forms", ".", "append", "(", "self", ".", "sfg3et", ")", "forms", ".", "append", "(", "apply_u_umlaut", "(", "stem", ")", "+", "\"um\"", ")", "forms", ".", "append", "(", "apply_u_umlaut", "(", "stem", ")", "+", "\"uð\")", "", "forms", ".", "append", "(", "apply_u_umlaut", "(", "stem", ")", "+", "\"u\"", ")", "return", "forms" ]
Weak verbs I >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["kalla", "kallaði", "kallaðinn"]) >>> verb.past_active() ['kallaða', 'kallaðir', 'kallaði', 'kölluðum', 'kölluðuð', 'kölluðu'] II >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["mæla", "mælti", "mæltr"]) >>> verb.past_active() ['mælta', 'mæltir', 'mælti', 'mæltum', 'mæltuð', 'mæltu'] III >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["telja", "taldi", "talinn"]) >>> verb.past_active() ['talda', 'taldir', 'taldi', 'töldum', 'tölduð', 'töldu'] IV >>> verb = WeakOldNorseVerb() >>> verb.set_canonic_forms(["vaka", "vakti", "vakat"]) >>> verb.past_active() ['vakta', 'vaktir', 'vakti', 'vöktum', 'vöktuð', 'vöktu'] :return:
[ "Weak", "verbs", "I", ">>>", "verb", "=", "WeakOldNorseVerb", "()", ">>>", "verb", ".", "set_canonic_forms", "(", "[", "kalla", "kallaði", "kallaðinn", "]", ")", ">>>", "verb", ".", "past_active", "()", "[", "kallaða", "kallaðir", "kallaði", "kölluðum", "kölluðuð", "kölluðu", "]" ]
python
train
32.210526
cloudant/python-cloudant
src/cloudant/client.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/client.py#L145-L194
def connect(self): """ Starts up an authentication session for the client using cookie authentication if necessary. """ if self.r_session: self.session_logout() if self.admin_party: self._use_iam = False self.r_session = ClientSession( timeout=self._timeout ) elif self._use_basic_auth: self._use_iam = False self.r_session = BasicSession( self._user, self._auth_token, self.server_url, timeout=self._timeout ) elif self._use_iam: self.r_session = IAMSession( self._auth_token, self.server_url, auto_renew=self._auto_renew, client_id=self._iam_client_id, client_secret=self._iam_client_secret, timeout=self._timeout ) else: self.r_session = CookieSession( self._user, self._auth_token, self.server_url, auto_renew=self._auto_renew, timeout=self._timeout ) # If a Transport Adapter was supplied add it to the session if self.adapter is not None: self.r_session.mount(self.server_url, self.adapter) if self._client_user_header is not None: self.r_session.headers.update(self._client_user_header) self.session_login() # Utilize an event hook to append to the response message # using :func:`~cloudant.common_util.append_response_error_content` self.r_session.hooks['response'].append(append_response_error_content)
[ "def", "connect", "(", "self", ")", ":", "if", "self", ".", "r_session", ":", "self", ".", "session_logout", "(", ")", "if", "self", ".", "admin_party", ":", "self", ".", "_use_iam", "=", "False", "self", ".", "r_session", "=", "ClientSession", "(", "timeout", "=", "self", ".", "_timeout", ")", "elif", "self", ".", "_use_basic_auth", ":", "self", ".", "_use_iam", "=", "False", "self", ".", "r_session", "=", "BasicSession", "(", "self", ".", "_user", ",", "self", ".", "_auth_token", ",", "self", ".", "server_url", ",", "timeout", "=", "self", ".", "_timeout", ")", "elif", "self", ".", "_use_iam", ":", "self", ".", "r_session", "=", "IAMSession", "(", "self", ".", "_auth_token", ",", "self", ".", "server_url", ",", "auto_renew", "=", "self", ".", "_auto_renew", ",", "client_id", "=", "self", ".", "_iam_client_id", ",", "client_secret", "=", "self", ".", "_iam_client_secret", ",", "timeout", "=", "self", ".", "_timeout", ")", "else", ":", "self", ".", "r_session", "=", "CookieSession", "(", "self", ".", "_user", ",", "self", ".", "_auth_token", ",", "self", ".", "server_url", ",", "auto_renew", "=", "self", ".", "_auto_renew", ",", "timeout", "=", "self", ".", "_timeout", ")", "# If a Transport Adapter was supplied add it to the session", "if", "self", ".", "adapter", "is", "not", "None", ":", "self", ".", "r_session", ".", "mount", "(", "self", ".", "server_url", ",", "self", ".", "adapter", ")", "if", "self", ".", "_client_user_header", "is", "not", "None", ":", "self", ".", "r_session", ".", "headers", ".", "update", "(", "self", ".", "_client_user_header", ")", "self", ".", "session_login", "(", ")", "# Utilize an event hook to append to the response message", "# using :func:`~cloudant.common_util.append_response_error_content`", "self", ".", "r_session", ".", "hooks", "[", "'response'", "]", ".", "append", "(", "append_response_error_content", ")" ]
Starts up an authentication session for the client using cookie authentication if necessary.
[ "Starts", "up", "an", "authentication", "session", "for", "the", "client", "using", "cookie", "authentication", "if", "necessary", "." ]
python
train
34.12
watson-developer-cloud/python-sdk
ibm_watson/natural_language_understanding_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/natural_language_understanding_v1.py#L689-L696
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'limit') and self.limit is not None: _dict['limit'] = self.limit if hasattr(self, 'model') and self.model is not None: _dict['model'] = self.model return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'limit'", ")", "and", "self", ".", "limit", "is", "not", "None", ":", "_dict", "[", "'limit'", "]", "=", "self", ".", "limit", "if", "hasattr", "(", "self", ",", "'model'", ")", "and", "self", ".", "model", "is", "not", "None", ":", "_dict", "[", "'model'", "]", "=", "self", ".", "model", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
40
KE-works/pykechain
pykechain/models/scope.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/scope.py#L199-L211
def add_member(self, member): """ Add a single member to the scope. You may only edit the list of members if the pykechain credentials allow this. :param member: single username to be added to the scope list of members :type member: basestring :raises APIError: when unable to update the scope member """ select_action = 'add_member' self._update_scope_project_team(select_action=select_action, user=member, user_type='member')
[ "def", "add_member", "(", "self", ",", "member", ")", ":", "select_action", "=", "'add_member'", "self", ".", "_update_scope_project_team", "(", "select_action", "=", "select_action", ",", "user", "=", "member", ",", "user_type", "=", "'member'", ")" ]
Add a single member to the scope. You may only edit the list of members if the pykechain credentials allow this. :param member: single username to be added to the scope list of members :type member: basestring :raises APIError: when unable to update the scope member
[ "Add", "a", "single", "member", "to", "the", "scope", "." ]
python
train
37.692308
DinoTools/python-overpy
overpy/__init__.py
https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L586-L594
def get_ways(self, way_id=None, **kwargs): """ Alias for get_elements() but filter the result by Way :param way_id: The Id of the way :type way_id: Integer :return: List of elements """ return self.get_elements(Way, elem_id=way_id, **kwargs)
[ "def", "get_ways", "(", "self", ",", "way_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "get_elements", "(", "Way", ",", "elem_id", "=", "way_id", ",", "*", "*", "kwargs", ")" ]
Alias for get_elements() but filter the result by Way :param way_id: The Id of the way :type way_id: Integer :return: List of elements
[ "Alias", "for", "get_elements", "()", "but", "filter", "the", "result", "by", "Way" ]
python
train
32.222222
wandb/client
wandb/vendor/prompt_toolkit/layout/mouse_handlers.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/mouse_handlers.py#L24-L29
def set_mouse_handler_for_range(self, x_min, x_max, y_min, y_max, handler=None): """ Set mouse handler for a region. """ for x, y in product(range(x_min, x_max), range(y_min, y_max)): self.mouse_handlers[x,y] = handler
[ "def", "set_mouse_handler_for_range", "(", "self", ",", "x_min", ",", "x_max", ",", "y_min", ",", "y_max", ",", "handler", "=", "None", ")", ":", "for", "x", ",", "y", "in", "product", "(", "range", "(", "x_min", ",", "x_max", ")", ",", "range", "(", "y_min", ",", "y_max", ")", ")", ":", "self", ".", "mouse_handlers", "[", "x", ",", "y", "]", "=", "handler" ]
Set mouse handler for a region.
[ "Set", "mouse", "handler", "for", "a", "region", "." ]
python
train
42.833333
pantsbuild/pants
src/python/pants/engine/build_files.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/engine/build_files.py#L80-L164
def hydrate_struct(address_mapper, address): """Given an AddressMapper and an Address, resolve a Struct from a BUILD file. Recursively collects any embedded addressables within the Struct, but will not walk into a dependencies field, since those should be requested explicitly by rules. """ address_family = yield Get(AddressFamily, Dir(address.spec_path)) struct = address_family.addressables.get(address) addresses = address_family.addressables if not struct or address not in addresses: _raise_did_you_mean(address_family, address.target_name) # TODO: This is effectively: "get the BuildFileAddress for this Address". # see https://github.com/pantsbuild/pants/issues/6657 address = next(build_address for build_address in addresses if build_address == address) inline_dependencies = [] def maybe_append(outer_key, value): if isinstance(value, six.string_types): if outer_key != 'dependencies': inline_dependencies.append(Address.parse(value, relative_to=address.spec_path, subproject_roots=address_mapper.subproject_roots)) elif isinstance(value, Struct): collect_inline_dependencies(value) def collect_inline_dependencies(item): for key, value in sorted(item._asdict().items(), key=_key_func): if not AddressableDescriptor.is_addressable(item, key): continue if isinstance(value, MutableMapping): for _, v in sorted(value.items(), key=_key_func): maybe_append(key, v) elif isinstance(value, MutableSequence): for v in value: maybe_append(key, v) else: maybe_append(key, value) # Recursively collect inline dependencies from the fields of the struct into `inline_dependencies`. collect_inline_dependencies(struct) # And then hydrate the inline dependencies. hydrated_inline_dependencies = yield [Get(HydratedStruct, Address, a) for a in inline_dependencies] dependencies = [d.value for d in hydrated_inline_dependencies] def maybe_consume(outer_key, value): if isinstance(value, six.string_types): if outer_key == 'dependencies': # Don't recurse into the dependencies field of a Struct, since those will be explicitly # requested by tasks. But do ensure that their addresses are absolute, since we're # about to lose the context in which they were declared. value = Address.parse(value, relative_to=address.spec_path, subproject_roots=address_mapper.subproject_roots) else: value = dependencies[maybe_consume.idx] maybe_consume.idx += 1 elif isinstance(value, Struct): value = consume_dependencies(value) return value # NB: Some pythons throw an UnboundLocalError for `idx` if it is a simple local variable. maybe_consume.idx = 0 # 'zip' the previously-requested dependencies back together as struct fields. def consume_dependencies(item, args=None): hydrated_args = args or {} for key, value in sorted(item._asdict().items(), key=_key_func): if not AddressableDescriptor.is_addressable(item, key): hydrated_args[key] = value continue if isinstance(value, MutableMapping): container_type = type(value) hydrated_args[key] = container_type((k, maybe_consume(key, v)) for k, v in sorted(value.items(), key=_key_func)) elif isinstance(value, MutableSequence): container_type = type(value) hydrated_args[key] = container_type(maybe_consume(key, v) for v in value) else: hydrated_args[key] = maybe_consume(key, value) return _hydrate(type(item), address.spec_path, **hydrated_args) yield HydratedStruct(consume_dependencies(struct, args={'address': address}))
[ "def", "hydrate_struct", "(", "address_mapper", ",", "address", ")", ":", "address_family", "=", "yield", "Get", "(", "AddressFamily", ",", "Dir", "(", "address", ".", "spec_path", ")", ")", "struct", "=", "address_family", ".", "addressables", ".", "get", "(", "address", ")", "addresses", "=", "address_family", ".", "addressables", "if", "not", "struct", "or", "address", "not", "in", "addresses", ":", "_raise_did_you_mean", "(", "address_family", ",", "address", ".", "target_name", ")", "# TODO: This is effectively: \"get the BuildFileAddress for this Address\".", "# see https://github.com/pantsbuild/pants/issues/6657", "address", "=", "next", "(", "build_address", "for", "build_address", "in", "addresses", "if", "build_address", "==", "address", ")", "inline_dependencies", "=", "[", "]", "def", "maybe_append", "(", "outer_key", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "if", "outer_key", "!=", "'dependencies'", ":", "inline_dependencies", ".", "append", "(", "Address", ".", "parse", "(", "value", ",", "relative_to", "=", "address", ".", "spec_path", ",", "subproject_roots", "=", "address_mapper", ".", "subproject_roots", ")", ")", "elif", "isinstance", "(", "value", ",", "Struct", ")", ":", "collect_inline_dependencies", "(", "value", ")", "def", "collect_inline_dependencies", "(", "item", ")", ":", "for", "key", ",", "value", "in", "sorted", "(", "item", ".", "_asdict", "(", ")", ".", "items", "(", ")", ",", "key", "=", "_key_func", ")", ":", "if", "not", "AddressableDescriptor", ".", "is_addressable", "(", "item", ",", "key", ")", ":", "continue", "if", "isinstance", "(", "value", ",", "MutableMapping", ")", ":", "for", "_", ",", "v", "in", "sorted", "(", "value", ".", "items", "(", ")", ",", "key", "=", "_key_func", ")", ":", "maybe_append", "(", "key", ",", "v", ")", "elif", "isinstance", "(", "value", ",", "MutableSequence", ")", ":", "for", "v", "in", "value", ":", "maybe_append", "(", "key", ",", "v", ")", "else", ":", "maybe_append", "(", "key", ",", "value", ")", "# Recursively collect inline dependencies from the fields of the struct into `inline_dependencies`.", "collect_inline_dependencies", "(", "struct", ")", "# And then hydrate the inline dependencies.", "hydrated_inline_dependencies", "=", "yield", "[", "Get", "(", "HydratedStruct", ",", "Address", ",", "a", ")", "for", "a", "in", "inline_dependencies", "]", "dependencies", "=", "[", "d", ".", "value", "for", "d", "in", "hydrated_inline_dependencies", "]", "def", "maybe_consume", "(", "outer_key", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "if", "outer_key", "==", "'dependencies'", ":", "# Don't recurse into the dependencies field of a Struct, since those will be explicitly", "# requested by tasks. But do ensure that their addresses are absolute, since we're", "# about to lose the context in which they were declared.", "value", "=", "Address", ".", "parse", "(", "value", ",", "relative_to", "=", "address", ".", "spec_path", ",", "subproject_roots", "=", "address_mapper", ".", "subproject_roots", ")", "else", ":", "value", "=", "dependencies", "[", "maybe_consume", ".", "idx", "]", "maybe_consume", ".", "idx", "+=", "1", "elif", "isinstance", "(", "value", ",", "Struct", ")", ":", "value", "=", "consume_dependencies", "(", "value", ")", "return", "value", "# NB: Some pythons throw an UnboundLocalError for `idx` if it is a simple local variable.", "maybe_consume", ".", "idx", "=", "0", "# 'zip' the previously-requested dependencies back together as struct fields.", "def", "consume_dependencies", "(", "item", ",", "args", "=", "None", ")", ":", "hydrated_args", "=", "args", "or", "{", "}", "for", "key", ",", "value", "in", "sorted", "(", "item", ".", "_asdict", "(", ")", ".", "items", "(", ")", ",", "key", "=", "_key_func", ")", ":", "if", "not", "AddressableDescriptor", ".", "is_addressable", "(", "item", ",", "key", ")", ":", "hydrated_args", "[", "key", "]", "=", "value", "continue", "if", "isinstance", "(", "value", ",", "MutableMapping", ")", ":", "container_type", "=", "type", "(", "value", ")", "hydrated_args", "[", "key", "]", "=", "container_type", "(", "(", "k", ",", "maybe_consume", "(", "key", ",", "v", ")", ")", "for", "k", ",", "v", "in", "sorted", "(", "value", ".", "items", "(", ")", ",", "key", "=", "_key_func", ")", ")", "elif", "isinstance", "(", "value", ",", "MutableSequence", ")", ":", "container_type", "=", "type", "(", "value", ")", "hydrated_args", "[", "key", "]", "=", "container_type", "(", "maybe_consume", "(", "key", ",", "v", ")", "for", "v", "in", "value", ")", "else", ":", "hydrated_args", "[", "key", "]", "=", "maybe_consume", "(", "key", ",", "value", ")", "return", "_hydrate", "(", "type", "(", "item", ")", ",", "address", ".", "spec_path", ",", "*", "*", "hydrated_args", ")", "yield", "HydratedStruct", "(", "consume_dependencies", "(", "struct", ",", "args", "=", "{", "'address'", ":", "address", "}", ")", ")" ]
Given an AddressMapper and an Address, resolve a Struct from a BUILD file. Recursively collects any embedded addressables within the Struct, but will not walk into a dependencies field, since those should be requested explicitly by rules.
[ "Given", "an", "AddressMapper", "and", "an", "Address", "resolve", "a", "Struct", "from", "a", "BUILD", "file", "." ]
python
train
44.752941
graphql-python/graphql-core-next
graphql/utilities/schema_printer.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/utilities/schema_printer.py#L306-L308
def print_value(value: Any, type_: GraphQLInputType) -> str: """Convenience function for printing a Python value""" return print_ast(ast_from_value(value, type_))
[ "def", "print_value", "(", "value", ":", "Any", ",", "type_", ":", "GraphQLInputType", ")", "->", "str", ":", "return", "print_ast", "(", "ast_from_value", "(", "value", ",", "type_", ")", ")" ]
Convenience function for printing a Python value
[ "Convenience", "function", "for", "printing", "a", "Python", "value" ]
python
train
56
guaix-ucm/numina
numina/array/wavecalib/resample.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/resample.py#L102-L132
def shiftx_image2d_flux(image2d_orig, xoffset): """Resample 2D image using a shift in the x direction (flux is preserved). Parameters ---------- image2d_orig : numpy array 2D image to be resampled. xoffset : float Offset to be applied. Returns ------- image2d_resampled : numpy array Resampled 2D image. """ if image2d_orig.ndim == 1: naxis1 = image2d_orig.size elif image2d_orig.ndim == 2: naxis2, naxis1 = image2d_orig.shape else: print('>>> image2d_orig.shape:', image2d_orig.shape) raise ValueError('Unexpected number of dimensions') return resample_image2d_flux(image2d_orig, naxis1=naxis1, cdelt1=1, crval1=1, crpix1=1, coeff=[xoffset, 1])
[ "def", "shiftx_image2d_flux", "(", "image2d_orig", ",", "xoffset", ")", ":", "if", "image2d_orig", ".", "ndim", "==", "1", ":", "naxis1", "=", "image2d_orig", ".", "size", "elif", "image2d_orig", ".", "ndim", "==", "2", ":", "naxis2", ",", "naxis1", "=", "image2d_orig", ".", "shape", "else", ":", "print", "(", "'>>> image2d_orig.shape:'", ",", "image2d_orig", ".", "shape", ")", "raise", "ValueError", "(", "'Unexpected number of dimensions'", ")", "return", "resample_image2d_flux", "(", "image2d_orig", ",", "naxis1", "=", "naxis1", ",", "cdelt1", "=", "1", ",", "crval1", "=", "1", ",", "crpix1", "=", "1", ",", "coeff", "=", "[", "xoffset", ",", "1", "]", ")" ]
Resample 2D image using a shift in the x direction (flux is preserved). Parameters ---------- image2d_orig : numpy array 2D image to be resampled. xoffset : float Offset to be applied. Returns ------- image2d_resampled : numpy array Resampled 2D image.
[ "Resample", "2D", "image", "using", "a", "shift", "in", "the", "x", "direction", "(", "flux", "is", "preserved", ")", "." ]
python
train
28.806452
alexras/pylsdj
pylsdj/instrument.py
https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/instrument.py#L79-L86
def import_lsdinst(self, struct_data): """import from an lsdinst struct""" self.name = struct_data['name'] self.automate = struct_data['data']['automate'] self.pan = struct_data['data']['pan'] if self.table is not None: self.table.import_lsdinst(struct_data)
[ "def", "import_lsdinst", "(", "self", ",", "struct_data", ")", ":", "self", ".", "name", "=", "struct_data", "[", "'name'", "]", "self", ".", "automate", "=", "struct_data", "[", "'data'", "]", "[", "'automate'", "]", "self", ".", "pan", "=", "struct_data", "[", "'data'", "]", "[", "'pan'", "]", "if", "self", ".", "table", "is", "not", "None", ":", "self", ".", "table", ".", "import_lsdinst", "(", "struct_data", ")" ]
import from an lsdinst struct
[ "import", "from", "an", "lsdinst", "struct" ]
python
train
38
edeposit/edeposit.amqp.harvester
src/edeposit/amqp/harvester/scrappers/zonerpress_cz/zonerpress_api.py
https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/zonerpress_cz/zonerpress_api.py#L115-L158
def has_neigh(tag_name, params=None, content=None, left=True): """ This function generates functions, which matches all tags with neighbours defined by parameters. Args: tag_name (str): Tag has to have neighbour with this tagname. params (dict): Tag has to have neighbour with this parameters. params (str): Tag has to have neighbour with this content. left (bool, default True): Tag has to have neigbour on the left, or right (set to ``False``). Returns: bool: True for every matching tag. Note: This function can be used as parameter for ``.find()`` method in HTMLElement. """ def has_neigh_closure(element): if not element.parent \ or not (element.isTag() and not element.isEndTag()): return False # filter only visible tags/neighbours childs = element.parent.childs childs = filter( lambda x: (x.isTag() and not x.isEndTag()) \ or x.getContent().strip() or x is element, childs ) if len(childs) <= 1: return False ioe = childs.index(element) if left and ioe > 0: return is_equal_tag(childs[ioe - 1], tag_name, params, content) if not left and ioe + 1 < len(childs): return is_equal_tag(childs[ioe + 1], tag_name, params, content) return False return has_neigh_closure
[ "def", "has_neigh", "(", "tag_name", ",", "params", "=", "None", ",", "content", "=", "None", ",", "left", "=", "True", ")", ":", "def", "has_neigh_closure", "(", "element", ")", ":", "if", "not", "element", ".", "parent", "or", "not", "(", "element", ".", "isTag", "(", ")", "and", "not", "element", ".", "isEndTag", "(", ")", ")", ":", "return", "False", "# filter only visible tags/neighbours", "childs", "=", "element", ".", "parent", ".", "childs", "childs", "=", "filter", "(", "lambda", "x", ":", "(", "x", ".", "isTag", "(", ")", "and", "not", "x", ".", "isEndTag", "(", ")", ")", "or", "x", ".", "getContent", "(", ")", ".", "strip", "(", ")", "or", "x", "is", "element", ",", "childs", ")", "if", "len", "(", "childs", ")", "<=", "1", ":", "return", "False", "ioe", "=", "childs", ".", "index", "(", "element", ")", "if", "left", "and", "ioe", ">", "0", ":", "return", "is_equal_tag", "(", "childs", "[", "ioe", "-", "1", "]", ",", "tag_name", ",", "params", ",", "content", ")", "if", "not", "left", "and", "ioe", "+", "1", "<", "len", "(", "childs", ")", ":", "return", "is_equal_tag", "(", "childs", "[", "ioe", "+", "1", "]", ",", "tag_name", ",", "params", ",", "content", ")", "return", "False", "return", "has_neigh_closure" ]
This function generates functions, which matches all tags with neighbours defined by parameters. Args: tag_name (str): Tag has to have neighbour with this tagname. params (dict): Tag has to have neighbour with this parameters. params (str): Tag has to have neighbour with this content. left (bool, default True): Tag has to have neigbour on the left, or right (set to ``False``). Returns: bool: True for every matching tag. Note: This function can be used as parameter for ``.find()`` method in HTMLElement.
[ "This", "function", "generates", "functions", "which", "matches", "all", "tags", "with", "neighbours", "defined", "by", "parameters", "." ]
python
train
32.909091
apache/spark
python/pyspark/streaming/dstream.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L239-L245
def groupByKey(self, numPartitions=None): """ Return a new DStream by applying groupByKey on each RDD. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.transform(lambda rdd: rdd.groupByKey(numPartitions))
[ "def", "groupByKey", "(", "self", ",", "numPartitions", "=", "None", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_sc", ".", "defaultParallelism", "return", "self", ".", "transform", "(", "lambda", "rdd", ":", "rdd", ".", "groupByKey", "(", "numPartitions", ")", ")" ]
Return a new DStream by applying groupByKey on each RDD.
[ "Return", "a", "new", "DStream", "by", "applying", "groupByKey", "on", "each", "RDD", "." ]
python
train
41
KxSystems/pyq
src/pyq/__init__.py
https://github.com/KxSystems/pyq/blob/ad7b807abde94615a7344aaa930bb01fb1552cc5/src/pyq/__init__.py#L485-L497
def update(self, columns=(), by=(), where=(), **kwds): """update from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.update('a*2', ... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE a b ---- 1 10 2 20 6 30 """ return self._seu('update', columns, by, where, kwds)
[ "def", "update", "(", "self", ",", "columns", "=", "(", ")", ",", "by", "=", "(", ")", ",", "where", "=", "(", ")", ",", "*", "*", "kwds", ")", ":", "return", "self", ".", "_seu", "(", "'update'", ",", "columns", ",", "by", ",", "where", ",", "kwds", ")" ]
update from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.update('a*2', ... where='b > 20').show() # doctest: +NORMALIZE_WHITESPACE a b ---- 1 10 2 20 6 30
[ "update", "from", "self" ]
python
train
27.615385
skyfielders/python-skyfield
skyfield/timelib.py
https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/timelib.py#L540-L557
def _utc_year(self): """Return a fractional UTC year, for convenience when plotting. An experiment, probably superseded by the ``J`` attribute below. """ d = self._utc_float() - 1721059.5 #d += offset C = 365 * 100 + 24 d -= 365 d += d // C - d // (4 * C) d += 365 # Y = d / C * 100 # print(Y) K = 365 * 3 + 366 d -= (d + K*7//8) // K # d -= d // 1461.0 return d / 365.0
[ "def", "_utc_year", "(", "self", ")", ":", "d", "=", "self", ".", "_utc_float", "(", ")", "-", "1721059.5", "#d += offset", "C", "=", "365", "*", "100", "+", "24", "d", "-=", "365", "d", "+=", "d", "//", "C", "-", "d", "//", "(", "4", "*", "C", ")", "d", "+=", "365", "# Y = d / C * 100", "# print(Y)", "K", "=", "365", "*", "3", "+", "366", "d", "-=", "(", "d", "+", "K", "*", "7", "//", "8", ")", "//", "K", "# d -= d // 1461.0", "return", "d", "/", "365.0" ]
Return a fractional UTC year, for convenience when plotting. An experiment, probably superseded by the ``J`` attribute below.
[ "Return", "a", "fractional", "UTC", "year", "for", "convenience", "when", "plotting", "." ]
python
train
26.388889
budacom/trading-bots
trading_bots/contrib/clients.py
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L424-L426
def fetch_open_orders(self, limit: int) -> List[Order]: """Fetch latest open orders, must provide a limit.""" return self._fetch_orders_limit(self._open_orders, limit)
[ "def", "fetch_open_orders", "(", "self", ",", "limit", ":", "int", ")", "->", "List", "[", "Order", "]", ":", "return", "self", ".", "_fetch_orders_limit", "(", "self", ".", "_open_orders", ",", "limit", ")" ]
Fetch latest open orders, must provide a limit.
[ "Fetch", "latest", "open", "orders", "must", "provide", "a", "limit", "." ]
python
train
60.333333
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/internal/formatting/deserialize.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/internal/formatting/deserialize.py#L76-L88
def _verified_version_from_id(version_id): # type: (int) -> SerializationVersion """Load a message :class:`SerializationVersion` for the specified version ID. :param int version_id: Message format version ID :return: Message format version :rtype: SerializationVersion :raises NotSupportedError: if unsupported version ID is received """ try: return SerializationVersion(version_id) except ValueError as error: raise NotSupportedError("Unsupported version {}".format(version_id), error)
[ "def", "_verified_version_from_id", "(", "version_id", ")", ":", "# type: (int) -> SerializationVersion", "try", ":", "return", "SerializationVersion", "(", "version_id", ")", "except", "ValueError", "as", "error", ":", "raise", "NotSupportedError", "(", "\"Unsupported version {}\"", ".", "format", "(", "version_id", ")", ",", "error", ")" ]
Load a message :class:`SerializationVersion` for the specified version ID. :param int version_id: Message format version ID :return: Message format version :rtype: SerializationVersion :raises NotSupportedError: if unsupported version ID is received
[ "Load", "a", "message", ":", "class", ":", "SerializationVersion", "for", "the", "specified", "version", "ID", "." ]
python
train
40.538462
robinandeer/puzzle
puzzle/log.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/log.py#L7-L31
def configure_stream(level='WARNING'): """Configure root logger using a standard stream handler. Args: level (string, optional): lowest level to log to the console Returns: logging.RootLogger: root logger instance with attached handler """ # get the root logger root_logger = logging.getLogger() # set the logger level to the same as will be used by the handler root_logger.setLevel(level) # customize formatter, align each column template = "[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s" formatter = logging.Formatter(template) # add a basic STDERR handler to the logger console = logging.StreamHandler() console.setLevel(level) console.setFormatter(formatter) root_logger.addHandler(console) return root_logger
[ "def", "configure_stream", "(", "level", "=", "'WARNING'", ")", ":", "# get the root logger", "root_logger", "=", "logging", ".", "getLogger", "(", ")", "# set the logger level to the same as will be used by the handler", "root_logger", ".", "setLevel", "(", "level", ")", "# customize formatter, align each column", "template", "=", "\"[%(asctime)s] %(name)-25s %(levelname)-8s %(message)s\"", "formatter", "=", "logging", ".", "Formatter", "(", "template", ")", "# add a basic STDERR handler to the logger", "console", "=", "logging", ".", "StreamHandler", "(", ")", "console", ".", "setLevel", "(", "level", ")", "console", ".", "setFormatter", "(", "formatter", ")", "root_logger", ".", "addHandler", "(", "console", ")", "return", "root_logger" ]
Configure root logger using a standard stream handler. Args: level (string, optional): lowest level to log to the console Returns: logging.RootLogger: root logger instance with attached handler
[ "Configure", "root", "logger", "using", "a", "standard", "stream", "handler", "." ]
python
train
31.44
coinbase/coinbase-python
coinbase/wallet/client.py
https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L534-L539
def commit_withdrawal(self, account_id, withdrawal_id, **params): """https://developers.coinbase.com/api/v2#commit-a-withdrawal""" response = self._post( 'v2', 'accounts', account_id, 'withdrawals', withdrawal_id, 'commit', data=params) return self._make_api_object(response, Withdrawal)
[ "def", "commit_withdrawal", "(", "self", ",", "account_id", ",", "withdrawal_id", ",", "*", "*", "params", ")", ":", "response", "=", "self", ".", "_post", "(", "'v2'", ",", "'accounts'", ",", "account_id", ",", "'withdrawals'", ",", "withdrawal_id", ",", "'commit'", ",", "data", "=", "params", ")", "return", "self", ".", "_make_api_object", "(", "response", ",", "Withdrawal", ")" ]
https://developers.coinbase.com/api/v2#commit-a-withdrawal
[ "https", ":", "//", "developers", ".", "coinbase", ".", "com", "/", "api", "/", "v2#commit", "-", "a", "-", "withdrawal" ]
python
train
55
skggm/skggm
inverse_covariance/profiling/graphs.py
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/graphs.py#L5-L61
def lattice(prng, n_features, alpha, random_sign=False, low=0.3, high=0.7): """Returns the adjacency matrix for a lattice network. The resulting network is a Toeplitz matrix with random values summing between -1 and 1 and zeros along the diagonal. The range of the values can be controlled via the parameters low and high. If random_sign is false, all entries will be negative, otherwise their sign will be modulated at random with probability 1/2. Each row has maximum edges of np.ceil(alpha * n_features). Parameters ----------- n_features : int alpha : float (0, 1) The complexity / sparsity factor. random sign : bool (default=False) Randomly modulate each entry by 1 or -1 with probability of 1/2. low : float (0, 1) (default=0.3) Lower bound for np.random.RandomState.uniform before normalization. high : float (0, 1) > low (default=0.7) Upper bound for np.random.RandomState.uniform before normalization. """ degree = int(1 + np.round(alpha * n_features / 2.)) if random_sign: sign_row = -1.0 * np.ones(degree) + 2 * ( prng.uniform(low=0, high=1, size=degree) > .5 ) else: sign_row = -1.0 * np.ones(degree) # in the *very unlikely* event that we draw a bad row that sums to zero # (which is only possible when random_sign=True), we try again up to # MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of # values something is probably wrong and we raise. MAX_ATTEMPTS = 5 attempt = 0 row = np.zeros((n_features,)) while np.sum(row) == 0 and attempt < MAX_ATTEMPTS: row = np.zeros((n_features,)) row[1 : 1 + degree] = sign_row * prng.uniform(low=low, high=high, size=degree) attempt += 1 if np.sum(row) == 0: raise Exception("InvalidLattice", "Rows sum to 0.") return # sum-normalize and keep signs row /= np.abs(np.sum(row)) return sp.linalg.toeplitz(c=row, r=row)
[ "def", "lattice", "(", "prng", ",", "n_features", ",", "alpha", ",", "random_sign", "=", "False", ",", "low", "=", "0.3", ",", "high", "=", "0.7", ")", ":", "degree", "=", "int", "(", "1", "+", "np", ".", "round", "(", "alpha", "*", "n_features", "/", "2.", ")", ")", "if", "random_sign", ":", "sign_row", "=", "-", "1.0", "*", "np", ".", "ones", "(", "degree", ")", "+", "2", "*", "(", "prng", ".", "uniform", "(", "low", "=", "0", ",", "high", "=", "1", ",", "size", "=", "degree", ")", ">", ".5", ")", "else", ":", "sign_row", "=", "-", "1.0", "*", "np", ".", "ones", "(", "degree", ")", "# in the *very unlikely* event that we draw a bad row that sums to zero", "# (which is only possible when random_sign=True), we try again up to", "# MAX_ATTEMPTS=5 times. If we are still unable to draw a good set of", "# values something is probably wrong and we raise.", "MAX_ATTEMPTS", "=", "5", "attempt", "=", "0", "row", "=", "np", ".", "zeros", "(", "(", "n_features", ",", ")", ")", "while", "np", ".", "sum", "(", "row", ")", "==", "0", "and", "attempt", "<", "MAX_ATTEMPTS", ":", "row", "=", "np", ".", "zeros", "(", "(", "n_features", ",", ")", ")", "row", "[", "1", ":", "1", "+", "degree", "]", "=", "sign_row", "*", "prng", ".", "uniform", "(", "low", "=", "low", ",", "high", "=", "high", ",", "size", "=", "degree", ")", "attempt", "+=", "1", "if", "np", ".", "sum", "(", "row", ")", "==", "0", ":", "raise", "Exception", "(", "\"InvalidLattice\"", ",", "\"Rows sum to 0.\"", ")", "return", "# sum-normalize and keep signs", "row", "/=", "np", ".", "abs", "(", "np", ".", "sum", "(", "row", ")", ")", "return", "sp", ".", "linalg", ".", "toeplitz", "(", "c", "=", "row", ",", "r", "=", "row", ")" ]
Returns the adjacency matrix for a lattice network. The resulting network is a Toeplitz matrix with random values summing between -1 and 1 and zeros along the diagonal. The range of the values can be controlled via the parameters low and high. If random_sign is false, all entries will be negative, otherwise their sign will be modulated at random with probability 1/2. Each row has maximum edges of np.ceil(alpha * n_features). Parameters ----------- n_features : int alpha : float (0, 1) The complexity / sparsity factor. random sign : bool (default=False) Randomly modulate each entry by 1 or -1 with probability of 1/2. low : float (0, 1) (default=0.3) Lower bound for np.random.RandomState.uniform before normalization. high : float (0, 1) > low (default=0.7) Upper bound for np.random.RandomState.uniform before normalization.
[ "Returns", "the", "adjacency", "matrix", "for", "a", "lattice", "network", "." ]
python
train
34.561404
miso-belica/sumy
sumy/evaluation/content_based.py
https://github.com/miso-belica/sumy/blob/099ab4938e2c1b6a011297375586bac2953641b9/sumy/evaluation/content_based.py#L36-L57
def unit_overlap(evaluated_model, reference_model): """ Computes unit overlap of two text documents. Documents has to be represented as TF models of non-empty document. :returns float: 0 <= overlap <= 1, where 0 means no match and 1 means exactly the same. """ if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)): raise ValueError( "Arguments has to be instances of 'sumy.models.TfDocumentModel'") terms1 = frozenset(evaluated_model.terms) terms2 = frozenset(reference_model.terms) if not terms1 and not terms2: raise ValueError( "Documents can't be empty. Please pass the valid documents.") common_terms_count = len(terms1 & terms2) return common_terms_count / (len(terms1) + len(terms2) - common_terms_count)
[ "def", "unit_overlap", "(", "evaluated_model", ",", "reference_model", ")", ":", "if", "not", "(", "isinstance", "(", "evaluated_model", ",", "TfModel", ")", "and", "isinstance", "(", "reference_model", ",", "TfModel", ")", ")", ":", "raise", "ValueError", "(", "\"Arguments has to be instances of 'sumy.models.TfDocumentModel'\"", ")", "terms1", "=", "frozenset", "(", "evaluated_model", ".", "terms", ")", "terms2", "=", "frozenset", "(", "reference_model", ".", "terms", ")", "if", "not", "terms1", "and", "not", "terms2", ":", "raise", "ValueError", "(", "\"Documents can't be empty. Please pass the valid documents.\"", ")", "common_terms_count", "=", "len", "(", "terms1", "&", "terms2", ")", "return", "common_terms_count", "/", "(", "len", "(", "terms1", ")", "+", "len", "(", "terms2", ")", "-", "common_terms_count", ")" ]
Computes unit overlap of two text documents. Documents has to be represented as TF models of non-empty document. :returns float: 0 <= overlap <= 1, where 0 means no match and 1 means exactly the same.
[ "Computes", "unit", "overlap", "of", "two", "text", "documents", ".", "Documents", "has", "to", "be", "represented", "as", "TF", "models", "of", "non", "-", "empty", "document", "." ]
python
train
37.636364
textbook/atmdb
atmdb/client.py
https://github.com/textbook/atmdb/blob/cab14547d2e777a1e26c2560266365c484855789/atmdb/client.py#L59-L94
async def get_data(self, url): """Get data from the TMDb API via :py:func:`aiohttp.get`. Notes: Updates configuration (if required) on successful requests. Arguments: url (:py:class:`str`): The endpoint URL and params. Returns: :py:class:`dict`: The parsed JSON result. """ logger.debug('making request to %r', url) with aiohttp.ClientSession() as session: async with session.get(url, headers=self.headers) as response: body = json.loads((await response.read()).decode('utf-8')) if response.status == HTTPStatus.OK: if url != self.url_builder('configuration'): await self._update_config() return body elif response.status == HTTPStatus.TOO_MANY_REQUESTS: timeout = self.calculate_timeout( response.headers['Retry-After'], ) logger.warning( 'Request limit exceeded, waiting %s seconds', timeout, ) await asyncio.sleep(timeout) return await self.get_data(url) logger.warning( 'request failed %s: %r', response.status, body.get('status_message', '<no message>') )
[ "async", "def", "get_data", "(", "self", ",", "url", ")", ":", "logger", ".", "debug", "(", "'making request to %r'", ",", "url", ")", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "async", "with", "session", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "headers", ")", "as", "response", ":", "body", "=", "json", ".", "loads", "(", "(", "await", "response", ".", "read", "(", ")", ")", ".", "decode", "(", "'utf-8'", ")", ")", "if", "response", ".", "status", "==", "HTTPStatus", ".", "OK", ":", "if", "url", "!=", "self", ".", "url_builder", "(", "'configuration'", ")", ":", "await", "self", ".", "_update_config", "(", ")", "return", "body", "elif", "response", ".", "status", "==", "HTTPStatus", ".", "TOO_MANY_REQUESTS", ":", "timeout", "=", "self", ".", "calculate_timeout", "(", "response", ".", "headers", "[", "'Retry-After'", "]", ",", ")", "logger", ".", "warning", "(", "'Request limit exceeded, waiting %s seconds'", ",", "timeout", ",", ")", "await", "asyncio", ".", "sleep", "(", "timeout", ")", "return", "await", "self", ".", "get_data", "(", "url", ")", "logger", ".", "warning", "(", "'request failed %s: %r'", ",", "response", ".", "status", ",", "body", ".", "get", "(", "'status_message'", ",", "'<no message>'", ")", ")" ]
Get data from the TMDb API via :py:func:`aiohttp.get`. Notes: Updates configuration (if required) on successful requests. Arguments: url (:py:class:`str`): The endpoint URL and params. Returns: :py:class:`dict`: The parsed JSON result.
[ "Get", "data", "from", "the", "TMDb", "API", "via", ":", "py", ":", "func", ":", "aiohttp", ".", "get", "." ]
python
train
39.527778
libyal/libbde
setup.py
https://github.com/libyal/libbde/blob/5f59d11dbb52690b4155f2cc3fcb1ac512d076a8/setup.py#L60-L75
def _RunCommand(self, command): """Runs the command.""" arguments = shlex.split(command) process = subprocess.Popen( arguments, stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) if not process: raise RuntimeError("Running: {0:s} failed.".format(command)) output, error = process.communicate() if process.returncode != 0: error = "\n".join(error.split("\n")[-5:]) raise RuntimeError("Running: {0:s} failed with error:\n{1:s}.".format( command, error)) return output
[ "def", "_RunCommand", "(", "self", ",", "command", ")", ":", "arguments", "=", "shlex", ".", "split", "(", "command", ")", "process", "=", "subprocess", ".", "Popen", "(", "arguments", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "universal_newlines", "=", "True", ")", "if", "not", "process", ":", "raise", "RuntimeError", "(", "\"Running: {0:s} failed.\"", ".", "format", "(", "command", ")", ")", "output", ",", "error", "=", "process", ".", "communicate", "(", ")", "if", "process", ".", "returncode", "!=", "0", ":", "error", "=", "\"\\n\"", ".", "join", "(", "error", ".", "split", "(", "\"\\n\"", ")", "[", "-", "5", ":", "]", ")", "raise", "RuntimeError", "(", "\"Running: {0:s} failed with error:\\n{1:s}.\"", ".", "format", "(", "command", ",", "error", ")", ")", "return", "output" ]
Runs the command.
[ "Runs", "the", "command", "." ]
python
train
34.125
assemblerflow/flowcraft
flowcraft/templates/process_viral_assembly.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_viral_assembly.py#L451-L562
def main(sample_id, assembly_file, minsize): """Main executor of the process_mapping template. Parameters ---------- sample_id : str Sample Identification string. assembly: str Path to the fatsa file generated by the assembler. minsize: str Min contig size to be considered a complete ORF """ logger.info("Starting assembly file processing") warnings = [] fails = "" # Parse the spades assembly file and perform the first filtering. logger.info("Starting assembly parsing") assembly_obj = Assembly(assembly_file, 0, 0, sample_id, minsize) if 'spades' in assembly_file: assembler = "SPAdes" else: assembler = "MEGAHIT" with open(".warnings", "w") as warn_fh: t_80 = int(minsize) * 0.8 t_150 = int(minsize) * 1.5 # Check if assembly size of the first assembly is lower than 80% of the # estimated genome size - DENV ORF has min 10k nt. If True, redo the filtering without the # k-mer coverage filter assembly_len = assembly_obj.get_assembly_length() logger.debug("Checking assembly length: {}".format(assembly_len)) if assembly_obj.nORFs < 1: warn_msg = "No complete ORFs found." warn_fh.write(warn_msg) fails = warn_msg if assembly_len < t_80: logger.warning("Assembly size ({}) smaller than the minimum " "threshold of 80% of expected genome size. " "Applying contig filters without the k-mer " "coverage filter".format(assembly_len)) assembly_len = assembly_obj.get_assembly_length() logger.debug("Checking updated assembly length: " "{}".format(assembly_len)) if assembly_len < t_80: warn_msg = "Assembly size smaller than the minimum" \ " threshold of 80% of expected genome size: {}".format( assembly_len) logger.warning(warn_msg) warn_fh.write(warn_msg) fails = warn_msg if assembly_len > t_150: warn_msg = "Assembly size ({}) larger than the maximum" \ " threshold of 150% of expected genome size.".format( assembly_len) logger.warning(warn_msg) warn_fh.write(warn_msg) fails = warn_msg # Write json report with open(".report.json", "w") as json_report: json_dic = { "tableRow": [{ "sample": sample_id, "data": [ {"header": "Contigs ({})".format(assembler), "value": len(assembly_obj.contigs), "table": "assembly", "columnBar": True}, {"header": "Assembled BP ({})".format(assembler), "value": assembly_len, "table": "assembly", "columnBar": True}, {"header": "ORFs", "value": assembly_obj.nORFs, "table": "assembly", "columnBar":False} ] }], } if warnings: json_dic["warnings"] = [{ "sample": sample_id, "table": "assembly", "value": warnings }] if fails: json_dic["fail"] = [{ "sample": sample_id, "table": "assembly", "value": [fails] }] json_report.write(json.dumps(json_dic, separators=(",", ":"))) with open(".status", "w") as status_fh: status_fh.write("pass")
[ "def", "main", "(", "sample_id", ",", "assembly_file", ",", "minsize", ")", ":", "logger", ".", "info", "(", "\"Starting assembly file processing\"", ")", "warnings", "=", "[", "]", "fails", "=", "\"\"", "# Parse the spades assembly file and perform the first filtering.", "logger", ".", "info", "(", "\"Starting assembly parsing\"", ")", "assembly_obj", "=", "Assembly", "(", "assembly_file", ",", "0", ",", "0", ",", "sample_id", ",", "minsize", ")", "if", "'spades'", "in", "assembly_file", ":", "assembler", "=", "\"SPAdes\"", "else", ":", "assembler", "=", "\"MEGAHIT\"", "with", "open", "(", "\".warnings\"", ",", "\"w\"", ")", "as", "warn_fh", ":", "t_80", "=", "int", "(", "minsize", ")", "*", "0.8", "t_150", "=", "int", "(", "minsize", ")", "*", "1.5", "# Check if assembly size of the first assembly is lower than 80% of the", "# estimated genome size - DENV ORF has min 10k nt. If True, redo the filtering without the", "# k-mer coverage filter", "assembly_len", "=", "assembly_obj", ".", "get_assembly_length", "(", ")", "logger", ".", "debug", "(", "\"Checking assembly length: {}\"", ".", "format", "(", "assembly_len", ")", ")", "if", "assembly_obj", ".", "nORFs", "<", "1", ":", "warn_msg", "=", "\"No complete ORFs found.\"", "warn_fh", ".", "write", "(", "warn_msg", ")", "fails", "=", "warn_msg", "if", "assembly_len", "<", "t_80", ":", "logger", ".", "warning", "(", "\"Assembly size ({}) smaller than the minimum \"", "\"threshold of 80% of expected genome size. \"", "\"Applying contig filters without the k-mer \"", "\"coverage filter\"", ".", "format", "(", "assembly_len", ")", ")", "assembly_len", "=", "assembly_obj", ".", "get_assembly_length", "(", ")", "logger", ".", "debug", "(", "\"Checking updated assembly length: \"", "\"{}\"", ".", "format", "(", "assembly_len", ")", ")", "if", "assembly_len", "<", "t_80", ":", "warn_msg", "=", "\"Assembly size smaller than the minimum\"", "\" threshold of 80% of expected genome size: {}\"", ".", "format", "(", "assembly_len", ")", "logger", ".", "warning", "(", "warn_msg", ")", "warn_fh", ".", "write", "(", "warn_msg", ")", "fails", "=", "warn_msg", "if", "assembly_len", ">", "t_150", ":", "warn_msg", "=", "\"Assembly size ({}) larger than the maximum\"", "\" threshold of 150% of expected genome size.\"", ".", "format", "(", "assembly_len", ")", "logger", ".", "warning", "(", "warn_msg", ")", "warn_fh", ".", "write", "(", "warn_msg", ")", "fails", "=", "warn_msg", "# Write json report", "with", "open", "(", "\".report.json\"", ",", "\"w\"", ")", "as", "json_report", ":", "json_dic", "=", "{", "\"tableRow\"", ":", "[", "{", "\"sample\"", ":", "sample_id", ",", "\"data\"", ":", "[", "{", "\"header\"", ":", "\"Contigs ({})\"", ".", "format", "(", "assembler", ")", ",", "\"value\"", ":", "len", "(", "assembly_obj", ".", "contigs", ")", ",", "\"table\"", ":", "\"assembly\"", ",", "\"columnBar\"", ":", "True", "}", ",", "{", "\"header\"", ":", "\"Assembled BP ({})\"", ".", "format", "(", "assembler", ")", ",", "\"value\"", ":", "assembly_len", ",", "\"table\"", ":", "\"assembly\"", ",", "\"columnBar\"", ":", "True", "}", ",", "{", "\"header\"", ":", "\"ORFs\"", ",", "\"value\"", ":", "assembly_obj", ".", "nORFs", ",", "\"table\"", ":", "\"assembly\"", ",", "\"columnBar\"", ":", "False", "}", "]", "}", "]", ",", "}", "if", "warnings", ":", "json_dic", "[", "\"warnings\"", "]", "=", "[", "{", "\"sample\"", ":", "sample_id", ",", "\"table\"", ":", "\"assembly\"", ",", "\"value\"", ":", "warnings", "}", "]", "if", "fails", ":", "json_dic", "[", "\"fail\"", "]", "=", "[", "{", "\"sample\"", ":", "sample_id", ",", "\"table\"", ":", "\"assembly\"", ",", "\"value\"", ":", "[", "fails", "]", "}", "]", "json_report", ".", "write", "(", "json", ".", "dumps", "(", "json_dic", ",", "separators", "=", "(", "\",\"", ",", "\":\"", ")", ")", ")", "with", "open", "(", "\".status\"", ",", "\"w\"", ")", "as", "status_fh", ":", "status_fh", ".", "write", "(", "\"pass\"", ")" ]
Main executor of the process_mapping template. Parameters ---------- sample_id : str Sample Identification string. assembly: str Path to the fatsa file generated by the assembler. minsize: str Min contig size to be considered a complete ORF
[ "Main", "executor", "of", "the", "process_mapping", "template", "." ]
python
test
33.3125
mseclab/PyJFuzz
pyjfuzz/core/pjf_mutators.py
https://github.com/mseclab/PyJFuzz/blob/f777067076f62c9ab74ffea6e90fd54402b7a1b4/pyjfuzz/core/pjf_mutators.py#L133-L140
def get_mutator(self, obj, obj_type): """ Get a random mutator for the given type """ if obj_type == unicode: obj_type = str obj = str(obj) return self._get_random(obj_type)(obj)
[ "def", "get_mutator", "(", "self", ",", "obj", ",", "obj_type", ")", ":", "if", "obj_type", "==", "unicode", ":", "obj_type", "=", "str", "obj", "=", "str", "(", "obj", ")", "return", "self", ".", "_get_random", "(", "obj_type", ")", "(", "obj", ")" ]
Get a random mutator for the given type
[ "Get", "a", "random", "mutator", "for", "the", "given", "type" ]
python
test
29.375
fr33jc/bang
bang/providers/hpcloud/load_balancer.py
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/hpcloud/load_balancer.py#L60-L75
def find_lb_by_name(self, name): """ Look up a LBaaS instance by name (rather than id) :attr string name: The LBaaS name assigned at creation time :rtype :class:`dict` """ log.debug("Finding load balancers matching name '%s'" % name) matching = filter(lambda l: l['name'] == name, self.list_lbs()) if len(matching) > 1: raise ValueError("Ambiguous; more than one load balancer matched '%s'" % name) if matching: log.info("Found existing load balancer, %s" % matching[0]['id']) return matching[0] return None
[ "def", "find_lb_by_name", "(", "self", ",", "name", ")", ":", "log", ".", "debug", "(", "\"Finding load balancers matching name '%s'\"", "%", "name", ")", "matching", "=", "filter", "(", "lambda", "l", ":", "l", "[", "'name'", "]", "==", "name", ",", "self", ".", "list_lbs", "(", ")", ")", "if", "len", "(", "matching", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Ambiguous; more than one load balancer matched '%s'\"", "%", "name", ")", "if", "matching", ":", "log", ".", "info", "(", "\"Found existing load balancer, %s\"", "%", "matching", "[", "0", "]", "[", "'id'", "]", ")", "return", "matching", "[", "0", "]", "return", "None" ]
Look up a LBaaS instance by name (rather than id) :attr string name: The LBaaS name assigned at creation time :rtype :class:`dict`
[ "Look", "up", "a", "LBaaS", "instance", "by", "name", "(", "rather", "than", "id", ")" ]
python
train
38.1875
tanghaibao/goatools
goatools/associations.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/associations.py#L40-L47
def dnld_annotation(assc_file, prt=sys.stdout): """Download gaf, gpad, or gpi from http://current.geneontology.org/annotations/""" if not os.path.isfile(assc_file): # assc_http = "http://geneontology.org/gene-associations/" assc_http = "http://current.geneontology.org/annotations/" _, assc_base = os.path.split(assc_file) src = os.path.join(assc_http, "{ASSC}.gz".format(ASSC=assc_base)) dnld_file(src, assc_file, prt, loading_bar=None)
[ "def", "dnld_annotation", "(", "assc_file", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "assc_file", ")", ":", "# assc_http = \"http://geneontology.org/gene-associations/\"", "assc_http", "=", "\"http://current.geneontology.org/annotations/\"", "_", ",", "assc_base", "=", "os", ".", "path", ".", "split", "(", "assc_file", ")", "src", "=", "os", ".", "path", ".", "join", "(", "assc_http", ",", "\"{ASSC}.gz\"", ".", "format", "(", "ASSC", "=", "assc_base", ")", ")", "dnld_file", "(", "src", ",", "assc_file", ",", "prt", ",", "loading_bar", "=", "None", ")" ]
Download gaf, gpad, or gpi from http://current.geneontology.org/annotations/
[ "Download", "gaf", "gpad", "or", "gpi", "from", "http", ":", "//", "current", ".", "geneontology", ".", "org", "/", "annotations", "/" ]
python
train
59.75
abe-winter/pg13-py
pg13/pg.py
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/pg.py#L286-L291
def delete(self,pool_or_cursor): ".. warning:: pgmock doesn't support delete yet, so this isn't tested" vals=self.pkey_vals() whereclause=' and '.join('%s=%%s'%k for k in self.PKEY.split(',')) q='delete from %s where %s'%(self.TABLE,whereclause) commit_or_execute(pool_or_cursor,q,vals)
[ "def", "delete", "(", "self", ",", "pool_or_cursor", ")", ":", "vals", "=", "self", ".", "pkey_vals", "(", ")", "whereclause", "=", "' and '", ".", "join", "(", "'%s=%%s'", "%", "k", "for", "k", "in", "self", ".", "PKEY", ".", "split", "(", "','", ")", ")", "q", "=", "'delete from %s where %s'", "%", "(", "self", ".", "TABLE", ",", "whereclause", ")", "commit_or_execute", "(", "pool_or_cursor", ",", "q", ",", "vals", ")" ]
.. warning:: pgmock doesn't support delete yet, so this isn't tested
[ "..", "warning", "::", "pgmock", "doesn", "t", "support", "delete", "yet", "so", "this", "isn", "t", "tested" ]
python
train
51
waleedka/hiddenlayer
demos/tf_cifar10.py
https://github.com/waleedka/hiddenlayer/blob/294f8732b271cbdd6310c55bdf5ce855cbf61c75/demos/tf_cifar10.py#L121-L169
def model(self, inputs, mode='train'): """Build a simple convnet (BN before ReLU). Args: inputs: a tensor of size [batch_size, height, width, channels] mode: string in ['train', 'test'] Returns: the last op containing the predictions Note: Best score Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656 Worst score Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874 """ # Extract features training = (mode == 'train') with tf.variable_scope('conv1') as scope: conv = tf.layers.conv2d(inputs=inputs, filters=16, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) conv = tf.layers.conv2d(inputs=bn, filters=16, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name) with tf.variable_scope('conv2') as scope: conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name) with tf.variable_scope('conv3') as scope: conv = tf.layers.conv2d(inputs=pool, filters=32, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) conv = tf.layers.conv2d(inputs=bn, filters=32, kernel_size=[3, 3], padding='SAME') bn = tf.layers.batch_normalization(inputs=conv, training=training) bn = tf.nn.relu(bn) pool = tf.layers.max_pooling2d(bn, pool_size=[2, 2], strides=2, padding='SAME', name=scope.name) # Classify with tf.variable_scope('fc') as scope: flat = tf.layers.flatten(pool) fc = tf.layers.dense(inputs=flat, units=32, activation=tf.nn.relu) softmax = tf.layers.dense(inputs=fc, units=self.num_classes, activation=tf.nn.softmax) return softmax
[ "def", "model", "(", "self", ",", "inputs", ",", "mode", "=", "'train'", ")", ":", "# Extract features", "training", "=", "(", "mode", "==", "'train'", ")", "with", "tf", ".", "variable_scope", "(", "'conv1'", ")", "as", "scope", ":", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "inputs", ",", "filters", "=", "16", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "bn", ",", "filters", "=", "16", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "pool", "=", "tf", ".", "layers", ".", "max_pooling2d", "(", "bn", ",", "pool_size", "=", "[", "2", ",", "2", "]", ",", "strides", "=", "2", ",", "padding", "=", "'SAME'", ",", "name", "=", "scope", ".", "name", ")", "with", "tf", ".", "variable_scope", "(", "'conv2'", ")", "as", "scope", ":", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "pool", ",", "filters", "=", "32", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "bn", ",", "filters", "=", "32", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "pool", "=", "tf", ".", "layers", ".", "max_pooling2d", "(", "bn", ",", "pool_size", "=", "[", "2", ",", "2", "]", ",", "strides", "=", "2", ",", "padding", "=", "'SAME'", ",", "name", "=", "scope", ".", "name", ")", "with", "tf", ".", "variable_scope", "(", "'conv3'", ")", "as", "scope", ":", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "pool", ",", "filters", "=", "32", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "conv", "=", "tf", ".", "layers", ".", "conv2d", "(", "inputs", "=", "bn", ",", "filters", "=", "32", ",", "kernel_size", "=", "[", "3", ",", "3", "]", ",", "padding", "=", "'SAME'", ")", "bn", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "inputs", "=", "conv", ",", "training", "=", "training", ")", "bn", "=", "tf", ".", "nn", ".", "relu", "(", "bn", ")", "pool", "=", "tf", ".", "layers", ".", "max_pooling2d", "(", "bn", ",", "pool_size", "=", "[", "2", ",", "2", "]", ",", "strides", "=", "2", ",", "padding", "=", "'SAME'", ",", "name", "=", "scope", ".", "name", ")", "# Classify", "with", "tf", ".", "variable_scope", "(", "'fc'", ")", "as", "scope", ":", "flat", "=", "tf", ".", "layers", ".", "flatten", "(", "pool", ")", "fc", "=", "tf", ".", "layers", ".", "dense", "(", "inputs", "=", "flat", ",", "units", "=", "32", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ")", "softmax", "=", "tf", ".", "layers", ".", "dense", "(", "inputs", "=", "fc", ",", "units", "=", "self", ".", "num_classes", ",", "activation", "=", "tf", ".", "nn", ".", "softmax", ")", "return", "softmax" ]
Build a simple convnet (BN before ReLU). Args: inputs: a tensor of size [batch_size, height, width, channels] mode: string in ['train', 'test'] Returns: the last op containing the predictions Note: Best score Step: 7015 - Epoch: 18/20 - best batch acc: 0.8984 - loss: 1.5656 Worst score Step: 7523 - Epoch: 20/20 - best batch acc: 0.7734 - loss: 1.6874
[ "Build", "a", "simple", "convnet", "(", "BN", "before", "ReLU", ")", ".", "Args", ":", "inputs", ":", "a", "tensor", "of", "size", "[", "batch_size", "height", "width", "channels", "]", "mode", ":", "string", "in", "[", "train", "test", "]", "Returns", ":", "the", "last", "op", "containing", "the", "predictions", "Note", ":", "Best", "score", "Step", ":", "7015", "-", "Epoch", ":", "18", "/", "20", "-", "best", "batch", "acc", ":", "0", ".", "8984", "-", "loss", ":", "1", ".", "5656", "Worst", "score", "Step", ":", "7523", "-", "Epoch", ":", "20", "/", "20", "-", "best", "batch", "acc", ":", "0", ".", "7734", "-", "loss", ":", "1", ".", "6874" ]
python
train
52.55102
Miserlou/Zappa
zappa/utilities.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L442-L455
def check_new_version_available(this_version): """ Checks if a newer version of Zappa is available. Returns True is updateable, else False. """ import requests pypi_url = 'https://pypi.python.org/pypi/Zappa/json' resp = requests.get(pypi_url, timeout=1.5) top_version = resp.json()['info']['version'] return this_version != top_version
[ "def", "check_new_version_available", "(", "this_version", ")", ":", "import", "requests", "pypi_url", "=", "'https://pypi.python.org/pypi/Zappa/json'", "resp", "=", "requests", ".", "get", "(", "pypi_url", ",", "timeout", "=", "1.5", ")", "top_version", "=", "resp", ".", "json", "(", ")", "[", "'info'", "]", "[", "'version'", "]", "return", "this_version", "!=", "top_version" ]
Checks if a newer version of Zappa is available. Returns True is updateable, else False.
[ "Checks", "if", "a", "newer", "version", "of", "Zappa", "is", "available", "." ]
python
train
25.857143
yychen/twd97
twd97/converter.py
https://github.com/yychen/twd97/blob/2fe05dbca335be425a1f451e0ef8f210ec864de1/twd97/converter.py#L69-L78
def tomindec(origin): """ Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes) """ origin = float(origin) degrees = int(origin) minutes = (origin % 1) * 60 return degrees, minutes
[ "def", "tomindec", "(", "origin", ")", ":", "origin", "=", "float", "(", "origin", ")", "degrees", "=", "int", "(", "origin", ")", "minutes", "=", "(", "origin", "%", "1", ")", "*", "60", "return", "degrees", ",", "minutes" ]
Convert [+/-]DDD.DDDDD to a tuple (degrees, minutes)
[ "Convert", "[", "+", "/", "-", "]", "DDD", ".", "DDDDD", "to", "a", "tuple", "(", "degrees", "minutes", ")" ]
python
train
20
caktus/django-timepiece
timepiece/utils/__init__.py
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/utils/__init__.py#L31-L42
def get_active_entry(user, select_for_update=False): """Returns the user's currently-active entry, or None.""" entries = apps.get_model('entries', 'Entry').no_join if select_for_update: entries = entries.select_for_update() entries = entries.filter(user=user, end_time__isnull=True) if not entries.exists(): return None if entries.count() > 1: raise ActiveEntryError('Only one active entry is allowed.') return entries[0]
[ "def", "get_active_entry", "(", "user", ",", "select_for_update", "=", "False", ")", ":", "entries", "=", "apps", ".", "get_model", "(", "'entries'", ",", "'Entry'", ")", ".", "no_join", "if", "select_for_update", ":", "entries", "=", "entries", ".", "select_for_update", "(", ")", "entries", "=", "entries", ".", "filter", "(", "user", "=", "user", ",", "end_time__isnull", "=", "True", ")", "if", "not", "entries", ".", "exists", "(", ")", ":", "return", "None", "if", "entries", ".", "count", "(", ")", ">", "1", ":", "raise", "ActiveEntryError", "(", "'Only one active entry is allowed.'", ")", "return", "entries", "[", "0", "]" ]
Returns the user's currently-active entry, or None.
[ "Returns", "the", "user", "s", "currently", "-", "active", "entry", "or", "None", "." ]
python
train
38.583333
zsimic/runez
src/runez/config.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/config.py#L137-L152
def get_str(self, key, default=None): """ Args: key (str | unicode | None): Key to lookup default (str | unicode | None): Default to use if key is not configured Returns: (str | None): Value of key, if defined """ if key: for provider in self.providers: value = provider.get_str(key) if value is not None: return value return default
[ "def", "get_str", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "if", "key", ":", "for", "provider", "in", "self", ".", "providers", ":", "value", "=", "provider", ".", "get_str", "(", "key", ")", "if", "value", "is", "not", "None", ":", "return", "value", "return", "default" ]
Args: key (str | unicode | None): Key to lookup default (str | unicode | None): Default to use if key is not configured Returns: (str | None): Value of key, if defined
[ "Args", ":", "key", "(", "str", "|", "unicode", "|", "None", ")", ":", "Key", "to", "lookup", "default", "(", "str", "|", "unicode", "|", "None", ")", ":", "Default", "to", "use", "if", "key", "is", "not", "configured" ]
python
train
29.25
tamasgal/km3pipe
km3pipe/hardware.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L351-L356
def pmt_angles(self): """A list of PMT directions sorted by PMT channel, on DU-1, floor-1""" if self._pmt_angles == []: mask = (self.pmts.du == 1) & (self.pmts.floor == 1) self._pmt_angles = self.pmts.dir[mask] return self._pmt_angles
[ "def", "pmt_angles", "(", "self", ")", ":", "if", "self", ".", "_pmt_angles", "==", "[", "]", ":", "mask", "=", "(", "self", ".", "pmts", ".", "du", "==", "1", ")", "&", "(", "self", ".", "pmts", ".", "floor", "==", "1", ")", "self", ".", "_pmt_angles", "=", "self", ".", "pmts", ".", "dir", "[", "mask", "]", "return", "self", ".", "_pmt_angles" ]
A list of PMT directions sorted by PMT channel, on DU-1, floor-1
[ "A", "list", "of", "PMT", "directions", "sorted", "by", "PMT", "channel", "on", "DU", "-", "1", "floor", "-", "1" ]
python
train
46.166667
bearyinnovative/bearychat.py
examples/rtm_loop.py
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/examples/rtm_loop.py#L152-L167
def get_error(self, block=False, timeout=None): """Removes and returns an error from self._errors Args: block(bool): if True block until a RTMMessage is available, else it will return None when self._inbox is empty timeout(int): it blocks at most timeout seconds Returns: error if inbox is not empty, else None """ try: error = self._errors.get(block=block, timeout=timeout) return error except Exception: return None
[ "def", "get_error", "(", "self", ",", "block", "=", "False", ",", "timeout", "=", "None", ")", ":", "try", ":", "error", "=", "self", ".", "_errors", ".", "get", "(", "block", "=", "block", ",", "timeout", "=", "timeout", ")", "return", "error", "except", "Exception", ":", "return", "None" ]
Removes and returns an error from self._errors Args: block(bool): if True block until a RTMMessage is available, else it will return None when self._inbox is empty timeout(int): it blocks at most timeout seconds Returns: error if inbox is not empty, else None
[ "Removes", "and", "returns", "an", "error", "from", "self", ".", "_errors" ]
python
train
34.3125
inspirehep/harvesting-kit
harvestingkit/inspire_cds_package/from_cds.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_cds.py#L159-L171
def is_published(self): """Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False """ field980 = record_get_field_instances(self.record, '980') field773 = record_get_field_instances(self.record, '773') for f980 in field980: if 'a' in field_get_subfields(f980): for f773 in field773: if 'p' in field_get_subfields(f773): return True return False
[ "def", "is_published", "(", "self", ")", ":", "field980", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'980'", ")", "field773", "=", "record_get_field_instances", "(", "self", ".", "record", ",", "'773'", ")", "for", "f980", "in", "field980", ":", "if", "'a'", "in", "field_get_subfields", "(", "f980", ")", ":", "for", "f773", "in", "field773", ":", "if", "'p'", "in", "field_get_subfields", "(", "f773", ")", ":", "return", "True", "return", "False" ]
Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False
[ "Check", "fields", "980", "and", "773", "to", "see", "if", "the", "record", "has", "already", "been", "published", "." ]
python
valid
39.923077
pydata/numexpr
numexpr/utils.py
https://github.com/pydata/numexpr/blob/364bac13d84524e0e01db892301b2959d822dcff/numexpr/utils.py#L148-L168
def detect_number_of_cores(): """ Detects the number of cores on a system. Cribbed from pp. """ # Linux, Unix and MacOS: if hasattr(os, "sysconf"): if "SC_NPROCESSORS_ONLN" in os.sysconf_names: # Linux & Unix: ncpus = os.sysconf("SC_NPROCESSORS_ONLN") if isinstance(ncpus, int) and ncpus > 0: return ncpus else: # OSX: return int(subprocess.check_output(["sysctl", "-n", "hw.ncpu"])) # Windows: try: ncpus = int(os.environ.get("NUMBER_OF_PROCESSORS", "")) if ncpus > 0: return ncpus except ValueError: pass return 1
[ "def", "detect_number_of_cores", "(", ")", ":", "# Linux, Unix and MacOS:", "if", "hasattr", "(", "os", ",", "\"sysconf\"", ")", ":", "if", "\"SC_NPROCESSORS_ONLN\"", "in", "os", ".", "sysconf_names", ":", "# Linux & Unix:", "ncpus", "=", "os", ".", "sysconf", "(", "\"SC_NPROCESSORS_ONLN\"", ")", "if", "isinstance", "(", "ncpus", ",", "int", ")", "and", "ncpus", ">", "0", ":", "return", "ncpus", "else", ":", "# OSX:", "return", "int", "(", "subprocess", ".", "check_output", "(", "[", "\"sysctl\"", ",", "\"-n\"", ",", "\"hw.ncpu\"", "]", ")", ")", "# Windows:", "try", ":", "ncpus", "=", "int", "(", "os", ".", "environ", ".", "get", "(", "\"NUMBER_OF_PROCESSORS\"", ",", "\"\"", ")", ")", "if", "ncpus", ">", "0", ":", "return", "ncpus", "except", "ValueError", ":", "pass", "return", "1" ]
Detects the number of cores on a system. Cribbed from pp.
[ "Detects", "the", "number", "of", "cores", "on", "a", "system", ".", "Cribbed", "from", "pp", "." ]
python
train
30.857143
priestc/moneywagon
moneywagon/mcaf.py
https://github.com/priestc/moneywagon/blob/00518f1f557dcca8b3031f46d3564c2baa0227a3/moneywagon/mcaf.py#L38-L52
def gen_primes(): """ Generate an infinite sequence of prime numbers. """ D = {} q = 2 while True: if q not in D: yield q D[q * q] = [q] else: for p in D[q]: D.setdefault(p + q, []).append(p) del D[q] q += 1
[ "def", "gen_primes", "(", ")", ":", "D", "=", "{", "}", "q", "=", "2", "while", "True", ":", "if", "q", "not", "in", "D", ":", "yield", "q", "D", "[", "q", "*", "q", "]", "=", "[", "q", "]", "else", ":", "for", "p", "in", "D", "[", "q", "]", ":", "D", ".", "setdefault", "(", "p", "+", "q", ",", "[", "]", ")", ".", "append", "(", "p", ")", "del", "D", "[", "q", "]", "q", "+=", "1" ]
Generate an infinite sequence of prime numbers.
[ "Generate", "an", "infinite", "sequence", "of", "prime", "numbers", "." ]
python
train
20.133333
mdiener/grace
grace/py27/slimit/parser.py
https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/parser.py#L1129-L1131
def p_labelled_statement(self, p): """labelled_statement : identifier COLON statement""" p[0] = ast.Label(identifier=p[1], statement=p[3])
[ "def", "p_labelled_statement", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "ast", ".", "Label", "(", "identifier", "=", "p", "[", "1", "]", ",", "statement", "=", "p", "[", "3", "]", ")" ]
labelled_statement : identifier COLON statement
[ "labelled_statement", ":", "identifier", "COLON", "statement" ]
python
train
50.666667
scidam/cachepy
crypter.py
https://github.com/scidam/cachepy/blob/680eeb7ff04ec9bb634b71cceb0841abaf2d530e/crypter.py#L30-L42
def padding(s, bs=AES.block_size): """Fills a bytes-like object with arbitrary symbols to make its length divisible by `bs`. """ s = to_bytes(s) if len(s) % bs == 0: res = s + b''.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - 1)])) + to_bytes(chr(96 - bs)) elif len(s) % bs > 0 and len(s) > bs: res = s + b''.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - len(s) % bs - 1)])) + to_bytes(chr(96 + len(s) % bs - bs)) else: res = s + b''.join(map(to_bytes, [random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(bs - len(s) - 1)])) + to_bytes(chr(96 + len(s) - bs)) return res
[ "def", "padding", "(", "s", ",", "bs", "=", "AES", ".", "block_size", ")", ":", "s", "=", "to_bytes", "(", "s", ")", "if", "len", "(", "s", ")", "%", "bs", "==", "0", ":", "res", "=", "s", "+", "b''", ".", "join", "(", "map", "(", "to_bytes", ",", "[", "random", ".", "SystemRandom", "(", ")", ".", "choice", "(", "string", ".", "ascii_lowercase", "+", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "bs", "-", "1", ")", "]", ")", ")", "+", "to_bytes", "(", "chr", "(", "96", "-", "bs", ")", ")", "elif", "len", "(", "s", ")", "%", "bs", ">", "0", "and", "len", "(", "s", ")", ">", "bs", ":", "res", "=", "s", "+", "b''", ".", "join", "(", "map", "(", "to_bytes", ",", "[", "random", ".", "SystemRandom", "(", ")", ".", "choice", "(", "string", ".", "ascii_lowercase", "+", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "bs", "-", "len", "(", "s", ")", "%", "bs", "-", "1", ")", "]", ")", ")", "+", "to_bytes", "(", "chr", "(", "96", "+", "len", "(", "s", ")", "%", "bs", "-", "bs", ")", ")", "else", ":", "res", "=", "s", "+", "b''", ".", "join", "(", "map", "(", "to_bytes", ",", "[", "random", ".", "SystemRandom", "(", ")", ".", "choice", "(", "string", ".", "ascii_lowercase", "+", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "bs", "-", "len", "(", "s", ")", "-", "1", ")", "]", ")", ")", "+", "to_bytes", "(", "chr", "(", "96", "+", "len", "(", "s", ")", "-", "bs", ")", ")", "return", "res" ]
Fills a bytes-like object with arbitrary symbols to make its length divisible by `bs`.
[ "Fills", "a", "bytes", "-", "like", "object", "with", "arbitrary", "symbols", "to", "make", "its", "length", "divisible", "by", "bs", "." ]
python
train
59.538462
saltstack/salt
salt/modules/boto_rds.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_rds.py#L780-L819
def describe_parameter_group(name, Filters=None, MaxRecords=None, Marker=None, region=None, key=None, keyid=None, profile=None): ''' Returns a list of `DBParameterGroup` descriptions. CLI example to description of parameter group:: salt myminion boto_rds.describe_parameter_group parametergroupname\ region=us-east-1 ''' res = __salt__['boto_rds.parameter_group_exists'](name, tags=None, region=region, key=key, keyid=keyid, profile=profile) if not res.get('exists'): return {'exists': bool(res)} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return {'results': bool(conn)} kwargs = {} for key in ('Marker', 'Filters'): if locals()[key] is not None: kwargs[key] = str(locals()[key]) # future lint: disable=blacklisted-function if locals()['MaxRecords'] is not None: kwargs['MaxRecords'] = int(locals()['MaxRecords']) info = conn.describe_db_parameter_groups(DBParameterGroupName=name, **kwargs) if not info: return {'results': bool(info), 'message': 'Failed to get RDS description for group {0}.'.format(name)} return {'results': bool(info), 'message': 'Got RDS descrition for group {0}.'.format(name)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
[ "def", "describe_parameter_group", "(", "name", ",", "Filters", "=", "None", ",", "MaxRecords", "=", "None", ",", "Marker", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "res", "=", "__salt__", "[", "'boto_rds.parameter_group_exists'", "]", "(", "name", ",", "tags", "=", "None", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "res", ".", "get", "(", "'exists'", ")", ":", "return", "{", "'exists'", ":", "bool", "(", "res", ")", "}", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "conn", ":", "return", "{", "'results'", ":", "bool", "(", "conn", ")", "}", "kwargs", "=", "{", "}", "for", "key", "in", "(", "'Marker'", ",", "'Filters'", ")", ":", "if", "locals", "(", ")", "[", "key", "]", "is", "not", "None", ":", "kwargs", "[", "key", "]", "=", "str", "(", "locals", "(", ")", "[", "key", "]", ")", "# future lint: disable=blacklisted-function", "if", "locals", "(", ")", "[", "'MaxRecords'", "]", "is", "not", "None", ":", "kwargs", "[", "'MaxRecords'", "]", "=", "int", "(", "locals", "(", ")", "[", "'MaxRecords'", "]", ")", "info", "=", "conn", ".", "describe_db_parameter_groups", "(", "DBParameterGroupName", "=", "name", ",", "*", "*", "kwargs", ")", "if", "not", "info", ":", "return", "{", "'results'", ":", "bool", "(", "info", ")", ",", "'message'", ":", "'Failed to get RDS description for group {0}.'", ".", "format", "(", "name", ")", "}", "return", "{", "'results'", ":", "bool", "(", "info", ")", ",", "'message'", ":", "'Got RDS descrition for group {0}.'", ".", "format", "(", "name", ")", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
Returns a list of `DBParameterGroup` descriptions. CLI example to description of parameter group:: salt myminion boto_rds.describe_parameter_group parametergroupname\ region=us-east-1
[ "Returns", "a", "list", "of", "DBParameterGroup", "descriptions", ".", "CLI", "example", "to", "description", "of", "parameter", "group", "::" ]
python
train
41.525
kmike/django-generic-images
generic_images/models.py
https://github.com/kmike/django-generic-images/blob/4e45068ed219ac35396758eb6b6e1fe5306147df/generic_images/models.py#L164-L179
def get_upload_path(self, filename): ''' Override this in proxy subclass to customize upload path. Default upload path is :file:`/media/images/<user.id>/<filename>.<ext>` or :file:`/media/images/common/<filename>.<ext>` if user is not set. ``<filename>`` is returned by :meth:`~generic_images.models.AbstractAttachedImage.get_file_name` method. By default it is probable id of new image (it is predicted as it is unknown at this stage). ''' user_folder = str(self.user.pk) if self.user else 'common' root, ext = os.path.splitext(filename) return os.path.join('media', 'images', user_folder, self.get_file_name(filename) + ext)
[ "def", "get_upload_path", "(", "self", ",", "filename", ")", ":", "user_folder", "=", "str", "(", "self", ".", "user", ".", "pk", ")", "if", "self", ".", "user", "else", "'common'", "root", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "return", "os", ".", "path", ".", "join", "(", "'media'", ",", "'images'", ",", "user_folder", ",", "self", ".", "get_file_name", "(", "filename", ")", "+", "ext", ")" ]
Override this in proxy subclass to customize upload path. Default upload path is :file:`/media/images/<user.id>/<filename>.<ext>` or :file:`/media/images/common/<filename>.<ext>` if user is not set. ``<filename>`` is returned by :meth:`~generic_images.models.AbstractAttachedImage.get_file_name` method. By default it is probable id of new image (it is predicted as it is unknown at this stage).
[ "Override", "this", "in", "proxy", "subclass", "to", "customize", "upload", "path", ".", "Default", "upload", "path", "is", ":", "file", ":", "/", "media", "/", "images", "/", "<user", ".", "id", ">", "/", "<filename", ">", ".", "<ext", ">", "or", ":", "file", ":", "/", "media", "/", "images", "/", "common", "/", "<filename", ">", ".", "<ext", ">", "if", "user", "is", "not", "set", "." ]
python
train
47.8125
fermiPy/fermipy
fermipy/jobs/scatter_gather.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/scatter_gather.py#L147-L177
def _check_link_completion(self, link, fail_pending=False, fail_running=False): """Internal function to check the completion of all the dispatched jobs Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states. """ status_vect = JobStatusVector() for job_key, job_details in link.jobs.items(): # if job_details.status == JobStatus.failed: # failed = True # continue # elif job_details.status == JobStatus.done: # continue if job_key.find(JobDetails.topkey) >= 0: continue job_details.status = self._interface.check_job(job_details) if job_details.status == JobStatus.pending: if fail_pending: job_details.status = JobStatus.failed elif job_details.status == JobStatus.running: if fail_running: job_details.status = JobStatus.failed status_vect[job_details.status] += 1 link.jobs[job_key] = job_details link._set_status_self(job_details.jobkey, job_details.status) return status_vect
[ "def", "_check_link_completion", "(", "self", ",", "link", ",", "fail_pending", "=", "False", ",", "fail_running", "=", "False", ")", ":", "status_vect", "=", "JobStatusVector", "(", ")", "for", "job_key", ",", "job_details", "in", "link", ".", "jobs", ".", "items", "(", ")", ":", "# if job_details.status == JobStatus.failed:", "# failed = True", "# continue", "# elif job_details.status == JobStatus.done:", "# continue", "if", "job_key", ".", "find", "(", "JobDetails", ".", "topkey", ")", ">=", "0", ":", "continue", "job_details", ".", "status", "=", "self", ".", "_interface", ".", "check_job", "(", "job_details", ")", "if", "job_details", ".", "status", "==", "JobStatus", ".", "pending", ":", "if", "fail_pending", ":", "job_details", ".", "status", "=", "JobStatus", ".", "failed", "elif", "job_details", ".", "status", "==", "JobStatus", ".", "running", ":", "if", "fail_running", ":", "job_details", ".", "status", "=", "JobStatus", ".", "failed", "status_vect", "[", "job_details", ".", "status", "]", "+=", "1", "link", ".", "jobs", "[", "job_key", "]", "=", "job_details", "link", ".", "_set_status_self", "(", "job_details", ".", "jobkey", ",", "job_details", ".", "status", ")", "return", "status_vect" ]
Internal function to check the completion of all the dispatched jobs Returns ------- status_vect : `JobStatusVector` Vector that summarize the number of jobs in various states.
[ "Internal", "function", "to", "check", "the", "completion", "of", "all", "the", "dispatched", "jobs" ]
python
train
39.419355
cpburnz/python-path-specification
pathspec/util.py
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L181-L207
def normalize_file(file, separators=None): """ Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`). """ # Normalize path separators. if separators is None: separators = NORMALIZE_PATH_SEPS norm_file = file for sep in separators: norm_file = norm_file.replace(sep, posixpath.sep) # Remove current directory prefix. if norm_file.startswith('./'): norm_file = norm_file[2:] return norm_file
[ "def", "normalize_file", "(", "file", ",", "separators", "=", "None", ")", ":", "# Normalize path separators.", "if", "separators", "is", "None", ":", "separators", "=", "NORMALIZE_PATH_SEPS", "norm_file", "=", "file", "for", "sep", "in", "separators", ":", "norm_file", "=", "norm_file", ".", "replace", "(", "sep", ",", "posixpath", ".", "sep", ")", "# Remove current directory prefix.", "if", "norm_file", ".", "startswith", "(", "'./'", ")", ":", "norm_file", "=", "norm_file", "[", "2", ":", "]", "return", "norm_file" ]
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`).
[ "Normalizes", "the", "file", "path", "to", "use", "the", "POSIX", "path", "separator", "(", "i", ".", "e", ".", "/", ")", "." ]
python
train
32.925926
ipazc/mtcnn
mtcnn/network.py
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/network.py#L80-L97
def set_weights(self, weights_values: dict, ignore_missing=False): """ Sets the weights values of the network. :param weights_values: dictionary with weights for each layer """ network_name = self.__class__.__name__.lower() with tf.variable_scope(network_name): for layer_name in weights_values: with tf.variable_scope(layer_name, reuse=True): for param_name, data in weights_values[layer_name].items(): try: var = tf.get_variable(param_name) self._session.run(var.assign(data)) except ValueError: if not ignore_missing: raise
[ "def", "set_weights", "(", "self", ",", "weights_values", ":", "dict", ",", "ignore_missing", "=", "False", ")", ":", "network_name", "=", "self", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "with", "tf", ".", "variable_scope", "(", "network_name", ")", ":", "for", "layer_name", "in", "weights_values", ":", "with", "tf", ".", "variable_scope", "(", "layer_name", ",", "reuse", "=", "True", ")", ":", "for", "param_name", ",", "data", "in", "weights_values", "[", "layer_name", "]", ".", "items", "(", ")", ":", "try", ":", "var", "=", "tf", ".", "get_variable", "(", "param_name", ")", "self", ".", "_session", ".", "run", "(", "var", ".", "assign", "(", "data", ")", ")", "except", "ValueError", ":", "if", "not", "ignore_missing", ":", "raise" ]
Sets the weights values of the network. :param weights_values: dictionary with weights for each layer
[ "Sets", "the", "weights", "values", "of", "the", "network", ".", ":", "param", "weights_values", ":", "dictionary", "with", "weights", "for", "each", "layer" ]
python
train
42.833333
Azure/azure-cosmos-table-python
azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py#L613-L635
def get_table_acl(self, table_name, timeout=None): ''' Returns details about any stored access policies specified on the table that may be used with Shared Access Signatures. :param str table_name: The name of an existing table. :param int timeout: The server timeout, expressed in seconds. :return: A dictionary of access policies associated with the table. :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) ''' _validate_not_none('table_name', table_name) request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations(secondary=True) request.path = '/' + _to_str(table_name) request.query = { 'comp': 'acl', 'timeout': _int_to_str(timeout), } return self._perform_request(request, _convert_xml_to_signed_identifiers)
[ "def", "get_table_acl", "(", "self", ",", "table_name", ",", "timeout", "=", "None", ")", ":", "_validate_not_none", "(", "'table_name'", ",", "table_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'GET'", "request", ".", "host_locations", "=", "self", ".", "_get_host_locations", "(", "secondary", "=", "True", ")", "request", ".", "path", "=", "'/'", "+", "_to_str", "(", "table_name", ")", "request", ".", "query", "=", "{", "'comp'", ":", "'acl'", ",", "'timeout'", ":", "_int_to_str", "(", "timeout", ")", ",", "}", "return", "self", ".", "_perform_request", "(", "request", ",", "_convert_xml_to_signed_identifiers", ")" ]
Returns details about any stored access policies specified on the table that may be used with Shared Access Signatures. :param str table_name: The name of an existing table. :param int timeout: The server timeout, expressed in seconds. :return: A dictionary of access policies associated with the table. :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
[ "Returns", "details", "about", "any", "stored", "access", "policies", "specified", "on", "the", "table", "that", "may", "be", "used", "with", "Shared", "Access", "Signatures", "." ]
python
train
40.391304
mlperf/training
image_classification/tensorflow/official/utils/logs/hooks.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/utils/logs/hooks.py#L69-L74
def begin(self): """Called once before using the session to check global step.""" self._global_step_tensor = tf.train.get_global_step() if self._global_step_tensor is None: raise RuntimeError( 'Global step should be created to use StepCounterHook.')
[ "def", "begin", "(", "self", ")", ":", "self", ".", "_global_step_tensor", "=", "tf", ".", "train", ".", "get_global_step", "(", ")", "if", "self", ".", "_global_step_tensor", "is", "None", ":", "raise", "RuntimeError", "(", "'Global step should be created to use StepCounterHook.'", ")" ]
Called once before using the session to check global step.
[ "Called", "once", "before", "using", "the", "session", "to", "check", "global", "step", "." ]
python
train
45.333333
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L87-L106
def _create_put_request(self, resource, billomat_id, command=None, send_data=None): """ Creates a put request and return the response data """ assert (isinstance(resource, str)) if isinstance(billomat_id, int): billomat_id = str(billomat_id) if not command: command = '' else: command = '/' + command response = self.session.put( url=self.api_url + resource + '/' + billomat_id + command, data=json.dumps(send_data), ) return self._handle_response(response)
[ "def", "_create_put_request", "(", "self", ",", "resource", ",", "billomat_id", ",", "command", "=", "None", ",", "send_data", "=", "None", ")", ":", "assert", "(", "isinstance", "(", "resource", ",", "str", ")", ")", "if", "isinstance", "(", "billomat_id", ",", "int", ")", ":", "billomat_id", "=", "str", "(", "billomat_id", ")", "if", "not", "command", ":", "command", "=", "''", "else", ":", "command", "=", "'/'", "+", "command", "response", "=", "self", ".", "session", ".", "put", "(", "url", "=", "self", ".", "api_url", "+", "resource", "+", "'/'", "+", "billomat_id", "+", "command", ",", "data", "=", "json", ".", "dumps", "(", "send_data", ")", ",", ")", "return", "self", ".", "_handle_response", "(", "response", ")" ]
Creates a put request and return the response data
[ "Creates", "a", "put", "request", "and", "return", "the", "response", "data" ]
python
train
29.1
Clinical-Genomics/trailblazer
trailblazer/mip/sacct.py
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/sacct.py#L71-L75
def filter_jobs(sacct_jobs, failed=True): """Filter jobs that have a FAILED etc. status.""" categories = FAILED_CATEGORIES if failed else NORMAL_CATEGORIES filtered_jobs = [job for job in sacct_jobs if job['state'] in categories] return filtered_jobs
[ "def", "filter_jobs", "(", "sacct_jobs", ",", "failed", "=", "True", ")", ":", "categories", "=", "FAILED_CATEGORIES", "if", "failed", "else", "NORMAL_CATEGORIES", "filtered_jobs", "=", "[", "job", "for", "job", "in", "sacct_jobs", "if", "job", "[", "'state'", "]", "in", "categories", "]", "return", "filtered_jobs" ]
Filter jobs that have a FAILED etc. status.
[ "Filter", "jobs", "that", "have", "a", "FAILED", "etc", ".", "status", "." ]
python
train
52.4
projectatomic/osbs-client
osbs/api.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/api.py#L1331-L1342
def get_config_map(self, name): """ Get a ConfigMap object from the server Raises exception on error :param name: str, name of configMap to get from the server :returns: ConfigMapResponse containing the ConfigMap with the requested name """ response = self.os.get_config_map(name) config_map_response = ConfigMapResponse(response.json()) return config_map_response
[ "def", "get_config_map", "(", "self", ",", "name", ")", ":", "response", "=", "self", ".", "os", ".", "get_config_map", "(", "name", ")", "config_map_response", "=", "ConfigMapResponse", "(", "response", ".", "json", "(", ")", ")", "return", "config_map_response" ]
Get a ConfigMap object from the server Raises exception on error :param name: str, name of configMap to get from the server :returns: ConfigMapResponse containing the ConfigMap with the requested name
[ "Get", "a", "ConfigMap", "object", "from", "the", "server" ]
python
train
35.583333
MrMinimal64/timezonefinder
timezonefinder/timezonefinder.py
https://github.com/MrMinimal64/timezonefinder/blob/96cc43afb3bae57ffd002ab4cf104fe15eda2257/timezonefinder/timezonefinder.py#L423-L489
def timezone_at(self, *, lng, lat): """ this function looks up in which polygons the point could be included in to speed things up there are shortcuts being used (stored in a binary file) especially for large polygons it is expensive to check if a point is really included, so certain simplifications are made and even when you get a hit the point might actually not be inside the polygon (for example when there is only one timezone nearby) if you want to make sure a point is really inside a timezone use 'certain_timezone_at' :param lng: longitude of the point in degree (-180 to 180) :param lat: latitude in degree (90 to -90) :return: the timezone name of a matching polygon or None """ lng, lat = rectify_coordinates(lng, lat) # x = longitude y = latitude both converted to 8byte int x = coord2int(lng) y = coord2int(lat) shortcut_id_x, shortcut_id_y = coord2shortcut(lng, lat) self.shortcuts_unique_id.seek( (180 * NR_SHORTCUTS_PER_LAT * NR_BYTES_H * shortcut_id_x + NR_BYTES_H * shortcut_id_y)) try: # if there is just one possible zone in this shortcut instantly return its name return timezone_names[unpack(DTYPE_FORMAT_H, self.shortcuts_unique_id.read(NR_BYTES_H))[0]] except IndexError: possible_polygons = self.polygon_ids_of_shortcut(shortcut_id_x, shortcut_id_y) nr_possible_polygons = len(possible_polygons) if nr_possible_polygons == 0: return None if nr_possible_polygons == 1: # there is only one polygon in that area. return its timezone name without further checks return timezone_names[self.id_of(possible_polygons[0])] # create a list of all the timezone ids of all possible polygons ids = self.id_list(possible_polygons, nr_possible_polygons) # check until the point is included in one of the possible polygons for i in range(nr_possible_polygons): # when including the current polygon only polygons from the same zone remain, same_element = all_the_same(pointer=i, length=nr_possible_polygons, id_list=ids) if same_element != -1: # return the name of that zone return timezone_names[same_element] polygon_nr = possible_polygons[i] # get the boundaries of the polygon = (lng_max, lng_min, lat_max, lat_min) self.poly_max_values.seek(4 * NR_BYTES_I * polygon_nr) boundaries = self.fromfile(self.poly_max_values, dtype=DTYPE_FORMAT_SIGNED_I_NUMPY, count=4) # only run the expensive algorithm if the point is withing the boundaries if not (x > boundaries[0] or x < boundaries[1] or y > boundaries[2] or y < boundaries[3]): outside_all_holes = True # when the point is within a hole of the polygon, this timezone must not be returned for hole_coordinates in self._holes_of_line(polygon_nr): if inside_polygon(x, y, hole_coordinates): outside_all_holes = False break if outside_all_holes: if inside_polygon(x, y, self.coords_of(line=polygon_nr)): # the point is included in this polygon. return its timezone name without further checks return timezone_names[ids[i]] # the timezone name of the last polygon should always be returned # if no other polygon has been matched beforehand. raise ValueError('BUG: this statement should never be reached. Please open up an issue on Github!')
[ "def", "timezone_at", "(", "self", ",", "*", ",", "lng", ",", "lat", ")", ":", "lng", ",", "lat", "=", "rectify_coordinates", "(", "lng", ",", "lat", ")", "# x = longitude y = latitude both converted to 8byte int", "x", "=", "coord2int", "(", "lng", ")", "y", "=", "coord2int", "(", "lat", ")", "shortcut_id_x", ",", "shortcut_id_y", "=", "coord2shortcut", "(", "lng", ",", "lat", ")", "self", ".", "shortcuts_unique_id", ".", "seek", "(", "(", "180", "*", "NR_SHORTCUTS_PER_LAT", "*", "NR_BYTES_H", "*", "shortcut_id_x", "+", "NR_BYTES_H", "*", "shortcut_id_y", ")", ")", "try", ":", "# if there is just one possible zone in this shortcut instantly return its name", "return", "timezone_names", "[", "unpack", "(", "DTYPE_FORMAT_H", ",", "self", ".", "shortcuts_unique_id", ".", "read", "(", "NR_BYTES_H", ")", ")", "[", "0", "]", "]", "except", "IndexError", ":", "possible_polygons", "=", "self", ".", "polygon_ids_of_shortcut", "(", "shortcut_id_x", ",", "shortcut_id_y", ")", "nr_possible_polygons", "=", "len", "(", "possible_polygons", ")", "if", "nr_possible_polygons", "==", "0", ":", "return", "None", "if", "nr_possible_polygons", "==", "1", ":", "# there is only one polygon in that area. return its timezone name without further checks", "return", "timezone_names", "[", "self", ".", "id_of", "(", "possible_polygons", "[", "0", "]", ")", "]", "# create a list of all the timezone ids of all possible polygons", "ids", "=", "self", ".", "id_list", "(", "possible_polygons", ",", "nr_possible_polygons", ")", "# check until the point is included in one of the possible polygons", "for", "i", "in", "range", "(", "nr_possible_polygons", ")", ":", "# when including the current polygon only polygons from the same zone remain,", "same_element", "=", "all_the_same", "(", "pointer", "=", "i", ",", "length", "=", "nr_possible_polygons", ",", "id_list", "=", "ids", ")", "if", "same_element", "!=", "-", "1", ":", "# return the name of that zone", "return", "timezone_names", "[", "same_element", "]", "polygon_nr", "=", "possible_polygons", "[", "i", "]", "# get the boundaries of the polygon = (lng_max, lng_min, lat_max, lat_min)", "self", ".", "poly_max_values", ".", "seek", "(", "4", "*", "NR_BYTES_I", "*", "polygon_nr", ")", "boundaries", "=", "self", ".", "fromfile", "(", "self", ".", "poly_max_values", ",", "dtype", "=", "DTYPE_FORMAT_SIGNED_I_NUMPY", ",", "count", "=", "4", ")", "# only run the expensive algorithm if the point is withing the boundaries", "if", "not", "(", "x", ">", "boundaries", "[", "0", "]", "or", "x", "<", "boundaries", "[", "1", "]", "or", "y", ">", "boundaries", "[", "2", "]", "or", "y", "<", "boundaries", "[", "3", "]", ")", ":", "outside_all_holes", "=", "True", "# when the point is within a hole of the polygon, this timezone must not be returned", "for", "hole_coordinates", "in", "self", ".", "_holes_of_line", "(", "polygon_nr", ")", ":", "if", "inside_polygon", "(", "x", ",", "y", ",", "hole_coordinates", ")", ":", "outside_all_holes", "=", "False", "break", "if", "outside_all_holes", ":", "if", "inside_polygon", "(", "x", ",", "y", ",", "self", ".", "coords_of", "(", "line", "=", "polygon_nr", ")", ")", ":", "# the point is included in this polygon. return its timezone name without further checks", "return", "timezone_names", "[", "ids", "[", "i", "]", "]", "# the timezone name of the last polygon should always be returned", "# if no other polygon has been matched beforehand.", "raise", "ValueError", "(", "'BUG: this statement should never be reached. Please open up an issue on Github!'", ")" ]
this function looks up in which polygons the point could be included in to speed things up there are shortcuts being used (stored in a binary file) especially for large polygons it is expensive to check if a point is really included, so certain simplifications are made and even when you get a hit the point might actually not be inside the polygon (for example when there is only one timezone nearby) if you want to make sure a point is really inside a timezone use 'certain_timezone_at' :param lng: longitude of the point in degree (-180 to 180) :param lat: latitude in degree (90 to -90) :return: the timezone name of a matching polygon or None
[ "this", "function", "looks", "up", "in", "which", "polygons", "the", "point", "could", "be", "included", "in", "to", "speed", "things", "up", "there", "are", "shortcuts", "being", "used", "(", "stored", "in", "a", "binary", "file", ")", "especially", "for", "large", "polygons", "it", "is", "expensive", "to", "check", "if", "a", "point", "is", "really", "included", "so", "certain", "simplifications", "are", "made", "and", "even", "when", "you", "get", "a", "hit", "the", "point", "might", "actually", "not", "be", "inside", "the", "polygon", "(", "for", "example", "when", "there", "is", "only", "one", "timezone", "nearby", ")", "if", "you", "want", "to", "make", "sure", "a", "point", "is", "really", "inside", "a", "timezone", "use", "certain_timezone_at", ":", "param", "lng", ":", "longitude", "of", "the", "point", "in", "degree", "(", "-", "180", "to", "180", ")", ":", "param", "lat", ":", "latitude", "in", "degree", "(", "90", "to", "-", "90", ")", ":", "return", ":", "the", "timezone", "name", "of", "a", "matching", "polygon", "or", "None" ]
python
train
57.283582
salesking/salesking_python_sdk
salesking/collection.py
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/collection.py#L128-L137
def set_filters(self, filters): """ set and validate filters dict """ if not isinstance(filters, dict): raise Exception("filters must be a dict") self.filters = {} for key in filters.keys(): value = filters[key] self.add_filter(key,value)
[ "def", "set_filters", "(", "self", ",", "filters", ")", ":", "if", "not", "isinstance", "(", "filters", ",", "dict", ")", ":", "raise", "Exception", "(", "\"filters must be a dict\"", ")", "self", ".", "filters", "=", "{", "}", "for", "key", "in", "filters", ".", "keys", "(", ")", ":", "value", "=", "filters", "[", "key", "]", "self", ".", "add_filter", "(", "key", ",", "value", ")" ]
set and validate filters dict
[ "set", "and", "validate", "filters", "dict" ]
python
train
31.3
molmod/molmod
molmod/io/cp2k.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cp2k.py#L296-L300
def set_value(self, value): """Set the value associated with the keyword""" if not isinstance(value, str): raise TypeError("A value must be a string, got %s." % value) self.__value = value
[ "def", "set_value", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "str", ")", ":", "raise", "TypeError", "(", "\"A value must be a string, got %s.\"", "%", "value", ")", "self", ".", "__value", "=", "value" ]
Set the value associated with the keyword
[ "Set", "the", "value", "associated", "with", "the", "keyword" ]
python
train
44
shmir/PyIxNetwork
ixnetwork/ixn_statistics_view.py
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_statistics_view.py#L61-L67
def get_stats(self, stat_name): """ :param stat_name: requested statistics name. :returns: all values of the requested statistic for all objects. """ return [self.get_stat(r, stat_name) for r in self.statistics.keys()]
[ "def", "get_stats", "(", "self", ",", "stat_name", ")", ":", "return", "[", "self", ".", "get_stat", "(", "r", ",", "stat_name", ")", "for", "r", "in", "self", ".", "statistics", ".", "keys", "(", ")", "]" ]
:param stat_name: requested statistics name. :returns: all values of the requested statistic for all objects.
[ ":", "param", "stat_name", ":", "requested", "statistics", "name", ".", ":", "returns", ":", "all", "values", "of", "the", "requested", "statistic", "for", "all", "objects", "." ]
python
train
36.142857
dslackw/slpkg
slpkg/config.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/config.py#L40-L77
def view(self): """View slpkg config file """ print("") # new line at start conf_args = [ "RELEASE", "SLACKWARE_VERSION", "COMP_ARCH", "BUILD_PATH", "PACKAGES", "PATCHES", "CHECKMD5", "DEL_ALL", "DEL_BUILD", "SBO_BUILD_LOG", "MAKEFLAGS", "DEFAULT_ANSWER", "REMOVE_DEPS_ANSWER", "SKIP_UNST", "RSL_DEPS", "DEL_DEPS", "USE_COLORS", "DOWNDER", "DOWNDER_OPTIONS", "SLACKPKG_LOG", "ONLY_INSTALLED", "PRG_BAR", "EDITOR", "NOT_DOWNGRADE" ] read_conf = Utils().read_file(self.config_file) for line in read_conf.splitlines(): if not line.startswith("#") and line.split("=")[0] in conf_args: print("{0}".format(line)) else: print("{0}{1}{2}".format(self.meta.color["CYAN"], line, self.meta.color["ENDC"])) print("")
[ "def", "view", "(", "self", ")", ":", "print", "(", "\"\"", ")", "# new line at start", "conf_args", "=", "[", "\"RELEASE\"", ",", "\"SLACKWARE_VERSION\"", ",", "\"COMP_ARCH\"", ",", "\"BUILD_PATH\"", ",", "\"PACKAGES\"", ",", "\"PATCHES\"", ",", "\"CHECKMD5\"", ",", "\"DEL_ALL\"", ",", "\"DEL_BUILD\"", ",", "\"SBO_BUILD_LOG\"", ",", "\"MAKEFLAGS\"", ",", "\"DEFAULT_ANSWER\"", ",", "\"REMOVE_DEPS_ANSWER\"", ",", "\"SKIP_UNST\"", ",", "\"RSL_DEPS\"", ",", "\"DEL_DEPS\"", ",", "\"USE_COLORS\"", ",", "\"DOWNDER\"", ",", "\"DOWNDER_OPTIONS\"", ",", "\"SLACKPKG_LOG\"", ",", "\"ONLY_INSTALLED\"", ",", "\"PRG_BAR\"", ",", "\"EDITOR\"", ",", "\"NOT_DOWNGRADE\"", "]", "read_conf", "=", "Utils", "(", ")", ".", "read_file", "(", "self", ".", "config_file", ")", "for", "line", "in", "read_conf", ".", "splitlines", "(", ")", ":", "if", "not", "line", ".", "startswith", "(", "\"#\"", ")", "and", "line", ".", "split", "(", "\"=\"", ")", "[", "0", "]", "in", "conf_args", ":", "print", "(", "\"{0}\"", ".", "format", "(", "line", ")", ")", "else", ":", "print", "(", "\"{0}{1}{2}\"", ".", "format", "(", "self", ".", "meta", ".", "color", "[", "\"CYAN\"", "]", ",", "line", ",", "self", ".", "meta", ".", "color", "[", "\"ENDC\"", "]", ")", ")", "print", "(", "\"\"", ")" ]
View slpkg config file
[ "View", "slpkg", "config", "file" ]
python
train
29.421053
mozilla/taar
taar/plugin.py
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/plugin.py#L32-L52
def clean_promoted_guids(raw_promoted_guids): """ Verify that the promoted GUIDs are formatted correctly, otherwise strip it down into an empty list. """ valid = True for row in raw_promoted_guids: if len(row) != 2: valid = False break if not ( (isinstance(row[0], str) or isinstance(row[0], unicode)) and (isinstance(row[1], int) or isinstance(row[1], float)) # noqa ): valid = False break if valid: return raw_promoted_guids return []
[ "def", "clean_promoted_guids", "(", "raw_promoted_guids", ")", ":", "valid", "=", "True", "for", "row", "in", "raw_promoted_guids", ":", "if", "len", "(", "row", ")", "!=", "2", ":", "valid", "=", "False", "break", "if", "not", "(", "(", "isinstance", "(", "row", "[", "0", "]", ",", "str", ")", "or", "isinstance", "(", "row", "[", "0", "]", ",", "unicode", ")", ")", "and", "(", "isinstance", "(", "row", "[", "1", "]", ",", "int", ")", "or", "isinstance", "(", "row", "[", "1", "]", ",", "float", ")", ")", "# noqa", ")", ":", "valid", "=", "False", "break", "if", "valid", ":", "return", "raw_promoted_guids", "return", "[", "]" ]
Verify that the promoted GUIDs are formatted correctly, otherwise strip it down into an empty list.
[ "Verify", "that", "the", "promoted", "GUIDs", "are", "formatted", "correctly", "otherwise", "strip", "it", "down", "into", "an", "empty", "list", "." ]
python
train
26.285714
campbellr/smashrun-client
smashrun/client.py
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L215-L225
def delete_activity(self, id_num): """Delete an activity (run). :param id_num: The activity ID to delete """ url = self._build_url('my', 'activities', id_num) r = self.session.delete(url) r.raise_for_status() return r
[ "def", "delete_activity", "(", "self", ",", "id_num", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'my'", ",", "'activities'", ",", "id_num", ")", "r", "=", "self", ".", "session", ".", "delete", "(", "url", ")", "r", ".", "raise_for_status", "(", ")", "return", "r" ]
Delete an activity (run). :param id_num: The activity ID to delete
[ "Delete", "an", "activity", "(", "run", ")", "." ]
python
train
24.181818
joshspeagle/dynesty
dynesty/sampler.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/sampler.py#L275-L308
def _fill_queue(self, loglstar): """Sequentially add new live point proposals to the queue.""" # Add/zip arguments to submit to the queue. point_queue = [] axes_queue = [] while self.nqueue < self.queue_size: if self._beyond_unit_bound(loglstar): # Propose points using the provided sampling/bounding options. point, axes = self.propose_point() evolve_point = self.evolve_point else: # Propose/evaluate points directly from the unit cube. point = self.rstate.rand(self.npdim) axes = np.identity(self.npdim) evolve_point = sample_unif point_queue.append(point) axes_queue.append(axes) self.nqueue += 1 loglstars = [loglstar for i in range(self.queue_size)] scales = [self.scale for i in range(self.queue_size)] ptforms = [self.prior_transform for i in range(self.queue_size)] logls = [self.loglikelihood for i in range(self.queue_size)] kwargs = [self.kwargs for i in range(self.queue_size)] args = zip(point_queue, loglstars, axes_queue, scales, ptforms, logls, kwargs) if self.use_pool_evolve: # Use the pool to propose ("evolve") a new live point. self.queue = list(self.M(evolve_point, args)) else: # Propose ("evolve") a new live point using the default `map` # function. self.queue = list(map(evolve_point, args))
[ "def", "_fill_queue", "(", "self", ",", "loglstar", ")", ":", "# Add/zip arguments to submit to the queue.", "point_queue", "=", "[", "]", "axes_queue", "=", "[", "]", "while", "self", ".", "nqueue", "<", "self", ".", "queue_size", ":", "if", "self", ".", "_beyond_unit_bound", "(", "loglstar", ")", ":", "# Propose points using the provided sampling/bounding options.", "point", ",", "axes", "=", "self", ".", "propose_point", "(", ")", "evolve_point", "=", "self", ".", "evolve_point", "else", ":", "# Propose/evaluate points directly from the unit cube.", "point", "=", "self", ".", "rstate", ".", "rand", "(", "self", ".", "npdim", ")", "axes", "=", "np", ".", "identity", "(", "self", ".", "npdim", ")", "evolve_point", "=", "sample_unif", "point_queue", ".", "append", "(", "point", ")", "axes_queue", ".", "append", "(", "axes", ")", "self", ".", "nqueue", "+=", "1", "loglstars", "=", "[", "loglstar", "for", "i", "in", "range", "(", "self", ".", "queue_size", ")", "]", "scales", "=", "[", "self", ".", "scale", "for", "i", "in", "range", "(", "self", ".", "queue_size", ")", "]", "ptforms", "=", "[", "self", ".", "prior_transform", "for", "i", "in", "range", "(", "self", ".", "queue_size", ")", "]", "logls", "=", "[", "self", ".", "loglikelihood", "for", "i", "in", "range", "(", "self", ".", "queue_size", ")", "]", "kwargs", "=", "[", "self", ".", "kwargs", "for", "i", "in", "range", "(", "self", ".", "queue_size", ")", "]", "args", "=", "zip", "(", "point_queue", ",", "loglstars", ",", "axes_queue", ",", "scales", ",", "ptforms", ",", "logls", ",", "kwargs", ")", "if", "self", ".", "use_pool_evolve", ":", "# Use the pool to propose (\"evolve\") a new live point.", "self", ".", "queue", "=", "list", "(", "self", ".", "M", "(", "evolve_point", ",", "args", ")", ")", "else", ":", "# Propose (\"evolve\") a new live point using the default `map`", "# function.", "self", ".", "queue", "=", "list", "(", "map", "(", "evolve_point", ",", "args", ")", ")" ]
Sequentially add new live point proposals to the queue.
[ "Sequentially", "add", "new", "live", "point", "proposals", "to", "the", "queue", "." ]
python
train
45.352941
gem/oq-engine
openquake/baselib/general.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/general.py#L763-L778
def new(self, array): """ Convert an array of compatible length into a DictArray: >>> d = DictArray({'PGA': [0.01, 0.02, 0.04], 'PGV': [0.1, 0.2]}) >>> d.new(numpy.arange(0, 5, 1)) # array of lenght 5 = 3 + 2 <DictArray PGA: [0 1 2] PGV: [3 4]> """ assert len(self.array) == len(array) arr = object.__new__(self.__class__) arr.dt = self.dt arr.slicedic = self.slicedic arr.array = array return arr
[ "def", "new", "(", "self", ",", "array", ")", ":", "assert", "len", "(", "self", ".", "array", ")", "==", "len", "(", "array", ")", "arr", "=", "object", ".", "__new__", "(", "self", ".", "__class__", ")", "arr", ".", "dt", "=", "self", ".", "dt", "arr", ".", "slicedic", "=", "self", ".", "slicedic", "arr", ".", "array", "=", "array", "return", "arr" ]
Convert an array of compatible length into a DictArray: >>> d = DictArray({'PGA': [0.01, 0.02, 0.04], 'PGV': [0.1, 0.2]}) >>> d.new(numpy.arange(0, 5, 1)) # array of lenght 5 = 3 + 2 <DictArray PGA: [0 1 2] PGV: [3 4]>
[ "Convert", "an", "array", "of", "compatible", "length", "into", "a", "DictArray", ":" ]
python
train
31
bioasp/caspo
caspo/core/clamping.py
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/clamping.py#L105-L123
def to_csv(self, filename, stimuli=None, inhibitors=None, prepend=""): """ Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning """ self.to_dataframe(stimuli, inhibitors, prepend).to_csv(filename, index=False)
[ "def", "to_csv", "(", "self", ",", "filename", ",", "stimuli", "=", "None", ",", "inhibitors", "=", "None", ",", "prepend", "=", "\"\"", ")", ":", "self", ".", "to_dataframe", "(", "stimuli", ",", "inhibitors", ",", "prepend", ")", ".", "to_csv", "(", "filename", ",", "index", "=", "False", ")" ]
Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning
[ "Writes", "the", "list", "of", "clampings", "to", "a", "CSV", "file" ]
python
train
37.526316
tanghaibao/jcvi
jcvi/apps/restriction.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/restriction.py#L59-L69
def extract_full(rec, sites, flank, fw): """ Full extraction of seq flanking the sites. """ for s in sites: newid = "{0}:{1}".format(rec.name, s) left = max(s - flank, 0) right = min(s + flank, len(rec)) frag = rec.seq[left:right].strip("Nn") newrec = SeqRecord(frag, id=newid, description="") SeqIO.write([newrec], fw, "fasta")
[ "def", "extract_full", "(", "rec", ",", "sites", ",", "flank", ",", "fw", ")", ":", "for", "s", "in", "sites", ":", "newid", "=", "\"{0}:{1}\"", ".", "format", "(", "rec", ".", "name", ",", "s", ")", "left", "=", "max", "(", "s", "-", "flank", ",", "0", ")", "right", "=", "min", "(", "s", "+", "flank", ",", "len", "(", "rec", ")", ")", "frag", "=", "rec", ".", "seq", "[", "left", ":", "right", "]", ".", "strip", "(", "\"Nn\"", ")", "newrec", "=", "SeqRecord", "(", "frag", ",", "id", "=", "newid", ",", "description", "=", "\"\"", ")", "SeqIO", ".", "write", "(", "[", "newrec", "]", ",", "fw", ",", "\"fasta\"", ")" ]
Full extraction of seq flanking the sites.
[ "Full", "extraction", "of", "seq", "flanking", "the", "sites", "." ]
python
train
34.727273
snbuback/django_services
django_services/service/core.py
https://github.com/snbuback/django_services/blob/58cbdea878bb11197add0ed1008a9206e4d92671/django_services/service/core.py#L80-L94
def has_perm(self, service, perm_name, obj, call_name): """ Raise PermissionDenied if user has no permission in object """ user = service.user if not (perm_name is False): if not user.has_perm(perm_name, obj=obj): LOG_PERM.warn( u'User %s has no permission %s. Access to %s with obj=%s', user, perm_name, call_name, obj) raise PermissionDenied(u'User %s has no permission %s for object %s' % (service.user, perm_name, obj)) LOG_PERM.debug( u'User %s was authorized to access %s with permission %s with obj=%s', user, call_name, perm_name, obj)
[ "def", "has_perm", "(", "self", ",", "service", ",", "perm_name", ",", "obj", ",", "call_name", ")", ":", "user", "=", "service", ".", "user", "if", "not", "(", "perm_name", "is", "False", ")", ":", "if", "not", "user", ".", "has_perm", "(", "perm_name", ",", "obj", "=", "obj", ")", ":", "LOG_PERM", ".", "warn", "(", "u'User %s has no permission %s. Access to %s with obj=%s'", ",", "user", ",", "perm_name", ",", "call_name", ",", "obj", ")", "raise", "PermissionDenied", "(", "u'User %s has no permission %s for object %s'", "%", "(", "service", ".", "user", ",", "perm_name", ",", "obj", ")", ")", "LOG_PERM", ".", "debug", "(", "u'User %s was authorized to access %s with permission %s with obj=%s'", ",", "user", ",", "call_name", ",", "perm_name", ",", "obj", ")" ]
Raise PermissionDenied if user has no permission in object
[ "Raise", "PermissionDenied", "if", "user", "has", "no", "permission", "in", "object" ]
python
train
46.533333
rodluger/everest
everest/basecamp.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L232-L241
def mask(self): ''' The array of indices to be masked. This is the union of the sets of outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN` cadences. ''' return np.array(list(set(np.concatenate([self.outmask, self.badmask, self.transitmask, self.nanmask]))), dtype=int)
[ "def", "mask", "(", "self", ")", ":", "return", "np", ".", "array", "(", "list", "(", "set", "(", "np", ".", "concatenate", "(", "[", "self", ".", "outmask", ",", "self", ".", "badmask", ",", "self", ".", "transitmask", ",", "self", ".", "nanmask", "]", ")", ")", ")", ",", "dtype", "=", "int", ")" ]
The array of indices to be masked. This is the union of the sets of outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN` cadences.
[ "The", "array", "of", "indices", "to", "be", "masked", ".", "This", "is", "the", "union", "of", "the", "sets", "of", "outliers", "bad", "(", "flagged", ")", "cadences", "transit", "cadences", "and", ":", "py", ":", "obj", ":", "NaN", "cadences", "." ]
python
train
35.2
phuijse/P4J
P4J/generator.py
https://github.com/phuijse/P4J/blob/1ec6b2ac63674ca55aeb2966b9cf40c273d7c203/P4J/generator.py#L135-L165
def irregular_sampling(T, N, rseed=None): """ Generates an irregularly sampled time vector by perturbating a linearly spaced vector and latter deleting a certain number of points Parameters ---------- T: float Time span of the vector, i.e. how long it is in time N: positive integer Number of samples of the resulting time vector rseed: Random seed to feed the random number generator Returns ------- t_irr: ndarray An irregulary sampled time vector """ sampling_period = (T/float(N)) N = int(N) np.random.seed(rseed) t = np.linspace(0, T, num=5*N) # First we add jitter t[1:-1] += sampling_period*0.5*np.random.randn(5*N-2) # Then we do a random permutation and keep only N points P = np.random.permutation(5*N) t_irr = np.sort(t[P[:N]]) return t_irr
[ "def", "irregular_sampling", "(", "T", ",", "N", ",", "rseed", "=", "None", ")", ":", "sampling_period", "=", "(", "T", "/", "float", "(", "N", ")", ")", "N", "=", "int", "(", "N", ")", "np", ".", "random", ".", "seed", "(", "rseed", ")", "t", "=", "np", ".", "linspace", "(", "0", ",", "T", ",", "num", "=", "5", "*", "N", ")", "# First we add jitter", "t", "[", "1", ":", "-", "1", "]", "+=", "sampling_period", "*", "0.5", "*", "np", ".", "random", ".", "randn", "(", "5", "*", "N", "-", "2", ")", "# Then we do a random permutation and keep only N points ", "P", "=", "np", ".", "random", ".", "permutation", "(", "5", "*", "N", ")", "t_irr", "=", "np", ".", "sort", "(", "t", "[", "P", "[", ":", "N", "]", "]", ")", "return", "t_irr" ]
Generates an irregularly sampled time vector by perturbating a linearly spaced vector and latter deleting a certain number of points Parameters ---------- T: float Time span of the vector, i.e. how long it is in time N: positive integer Number of samples of the resulting time vector rseed: Random seed to feed the random number generator Returns ------- t_irr: ndarray An irregulary sampled time vector
[ "Generates", "an", "irregularly", "sampled", "time", "vector", "by", "perturbating", "a", "linearly", "spaced", "vector", "and", "latter", "deleting", "a", "certain", "number", "of", "points", "Parameters", "----------", "T", ":", "float", "Time", "span", "of", "the", "vector", "i", ".", "e", ".", "how", "long", "it", "is", "in", "time", "N", ":", "positive", "integer", "Number", "of", "samples", "of", "the", "resulting", "time", "vector", "rseed", ":", "Random", "seed", "to", "feed", "the", "random", "number", "generator", "Returns", "-------", "t_irr", ":", "ndarray", "An", "irregulary", "sampled", "time", "vector" ]
python
train
28.258065
googleapis/oauth2client
oauth2client/client.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/client.py#L1896-L1941
def step1_get_authorize_url(self, redirect_uri=None, state=None): """Returns a URI to redirect to the provider. Args: redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for a non-web-based application, or a URI that handles the callback from the authorization server. This parameter is deprecated, please move to passing the redirect_uri in via the constructor. state: string, Opaque state string which is passed through the OAuth2 flow and returned to the client as a query parameter in the callback. Returns: A URI as a string to redirect the user to begin the authorization flow. """ if redirect_uri is not None: logger.warning(( 'The redirect_uri parameter for ' 'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. ' 'Please move to passing the redirect_uri in via the ' 'constructor.')) self.redirect_uri = redirect_uri if self.redirect_uri is None: raise ValueError('The value of redirect_uri must not be None.') query_params = { 'client_id': self.client_id, 'redirect_uri': self.redirect_uri, 'scope': self.scope, } if state is not None: query_params['state'] = state if self.login_hint is not None: query_params['login_hint'] = self.login_hint if self._pkce: if not self.code_verifier: self.code_verifier = _pkce.code_verifier() challenge = _pkce.code_challenge(self.code_verifier) query_params['code_challenge'] = challenge query_params['code_challenge_method'] = 'S256' query_params.update(self.params) return _helpers.update_query_params(self.auth_uri, query_params)
[ "def", "step1_get_authorize_url", "(", "self", ",", "redirect_uri", "=", "None", ",", "state", "=", "None", ")", ":", "if", "redirect_uri", "is", "not", "None", ":", "logger", ".", "warning", "(", "(", "'The redirect_uri parameter for '", "'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. '", "'Please move to passing the redirect_uri in via the '", "'constructor.'", ")", ")", "self", ".", "redirect_uri", "=", "redirect_uri", "if", "self", ".", "redirect_uri", "is", "None", ":", "raise", "ValueError", "(", "'The value of redirect_uri must not be None.'", ")", "query_params", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'redirect_uri'", ":", "self", ".", "redirect_uri", ",", "'scope'", ":", "self", ".", "scope", ",", "}", "if", "state", "is", "not", "None", ":", "query_params", "[", "'state'", "]", "=", "state", "if", "self", ".", "login_hint", "is", "not", "None", ":", "query_params", "[", "'login_hint'", "]", "=", "self", ".", "login_hint", "if", "self", ".", "_pkce", ":", "if", "not", "self", ".", "code_verifier", ":", "self", ".", "code_verifier", "=", "_pkce", ".", "code_verifier", "(", ")", "challenge", "=", "_pkce", ".", "code_challenge", "(", "self", ".", "code_verifier", ")", "query_params", "[", "'code_challenge'", "]", "=", "challenge", "query_params", "[", "'code_challenge_method'", "]", "=", "'S256'", "query_params", ".", "update", "(", "self", ".", "params", ")", "return", "_helpers", ".", "update_query_params", "(", "self", ".", "auth_uri", ",", "query_params", ")" ]
Returns a URI to redirect to the provider. Args: redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for a non-web-based application, or a URI that handles the callback from the authorization server. This parameter is deprecated, please move to passing the redirect_uri in via the constructor. state: string, Opaque state string which is passed through the OAuth2 flow and returned to the client as a query parameter in the callback. Returns: A URI as a string to redirect the user to begin the authorization flow.
[ "Returns", "a", "URI", "to", "redirect", "to", "the", "provider", "." ]
python
valid
43.043478
wdecoster/nanoget
nanoget/extraction_functions.py
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L93-L117
def check_bam(bam, samtype="bam"): """Check if bam file is valid. Bam file should: - exists - has an index (create if necessary) - is sorted by coordinate - has at least one mapped read """ ut.check_existance(bam) samfile = pysam.AlignmentFile(bam, "rb") if not samfile.has_index(): pysam.index(bam) samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index logging.info("Nanoget: No index for bam file could be found, created index.") if not samfile.header['HD']['SO'] == 'coordinate': logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam)) sys.exit("Please use a bam file sorted by coordinate.") if samtype == "bam": logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format( bam, samfile.mapped, samfile.unmapped)) if samfile.mapped == 0: logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam)) sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam)) return samfile
[ "def", "check_bam", "(", "bam", ",", "samtype", "=", "\"bam\"", ")", ":", "ut", ".", "check_existance", "(", "bam", ")", "samfile", "=", "pysam", ".", "AlignmentFile", "(", "bam", ",", "\"rb\"", ")", "if", "not", "samfile", ".", "has_index", "(", ")", ":", "pysam", ".", "index", "(", "bam", ")", "samfile", "=", "pysam", ".", "AlignmentFile", "(", "bam", ",", "\"rb\"", ")", "# Need to reload the samfile after creating index", "logging", ".", "info", "(", "\"Nanoget: No index for bam file could be found, created index.\"", ")", "if", "not", "samfile", ".", "header", "[", "'HD'", "]", "[", "'SO'", "]", "==", "'coordinate'", ":", "logging", ".", "error", "(", "\"Nanoget: Bam file {} not sorted by coordinate!.\"", ".", "format", "(", "bam", ")", ")", "sys", ".", "exit", "(", "\"Please use a bam file sorted by coordinate.\"", ")", "if", "samtype", "==", "\"bam\"", ":", "logging", ".", "info", "(", "\"Nanoget: Bam file {} contains {} mapped and {} unmapped reads.\"", ".", "format", "(", "bam", ",", "samfile", ".", "mapped", ",", "samfile", ".", "unmapped", ")", ")", "if", "samfile", ".", "mapped", "==", "0", ":", "logging", ".", "error", "(", "\"Nanoget: Bam file {} does not contain aligned reads.\"", ".", "format", "(", "bam", ")", ")", "sys", ".", "exit", "(", "\"FATAL: not a single read was mapped in bam file {}\"", ".", "format", "(", "bam", ")", ")", "return", "samfile" ]
Check if bam file is valid. Bam file should: - exists - has an index (create if necessary) - is sorted by coordinate - has at least one mapped read
[ "Check", "if", "bam", "file", "is", "valid", "." ]
python
train
44.68
softlayer/softlayer-python
SoftLayer/managers/user.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/user.py#L146-L169
def get_logins(self, user_id, start_date=None): """Gets the login history for a user, default start_date is 30 days ago :param int id: User id to get :param string start_date: "%m/%d/%Y %H:%M:%s" formatted string. :returns: list https://softlayer.github.io/reference/datatypes/SoftLayer_User_Customer_Access_Authentication/ Example:: get_logins(123, '04/08/2018 0:0:0') """ if start_date is None: date_object = datetime.datetime.today() - datetime.timedelta(days=30) start_date = date_object.strftime("%m/%d/%Y 0:0:0") date_filter = { 'loginAttempts': { 'createDate': { 'operation': 'greaterThanDate', 'options': [{'name': 'date', 'value': [start_date]}] } } } login_log = self.user_service.getLoginAttempts(id=user_id, filter=date_filter) return login_log
[ "def", "get_logins", "(", "self", ",", "user_id", ",", "start_date", "=", "None", ")", ":", "if", "start_date", "is", "None", ":", "date_object", "=", "datetime", ".", "datetime", ".", "today", "(", ")", "-", "datetime", ".", "timedelta", "(", "days", "=", "30", ")", "start_date", "=", "date_object", ".", "strftime", "(", "\"%m/%d/%Y 0:0:0\"", ")", "date_filter", "=", "{", "'loginAttempts'", ":", "{", "'createDate'", ":", "{", "'operation'", ":", "'greaterThanDate'", ",", "'options'", ":", "[", "{", "'name'", ":", "'date'", ",", "'value'", ":", "[", "start_date", "]", "}", "]", "}", "}", "}", "login_log", "=", "self", ".", "user_service", ".", "getLoginAttempts", "(", "id", "=", "user_id", ",", "filter", "=", "date_filter", ")", "return", "login_log" ]
Gets the login history for a user, default start_date is 30 days ago :param int id: User id to get :param string start_date: "%m/%d/%Y %H:%M:%s" formatted string. :returns: list https://softlayer.github.io/reference/datatypes/SoftLayer_User_Customer_Access_Authentication/ Example:: get_logins(123, '04/08/2018 0:0:0')
[ "Gets", "the", "login", "history", "for", "a", "user", "default", "start_date", "is", "30", "days", "ago" ]
python
train
39.833333
jobovy/galpy
galpy/orbit/OrbitTop.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L1907-L2386
def animate(self,*args,**kwargs): #pragma: no cover """ NAME: animate PURPOSE: animate an Orbit INPUT: d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...); can be list with up to three entries for three subplots d2= second dimension to plot; can be list with up to three entries for three subplots width= (600) width of output div in px height= (400) height of output div in px json_filename= (None) if set, save the data necessary for the figure in this filename (e.g., json_filename= 'orbit_data/orbit.json'); this path is also used in the output HTML, so needs to be accessible load_jslibs= (True) if True, load the require and jQuery Javascript libraries (necessary in Jupyterlab, not necessary but harmless in notebooks; if embedding on a webpage one typically wants to load these libraries in the header) ro= (Object-wide default) physical scale for distances to use to convert vo= (Object-wide default) physical scale for velocities to use to convert use_physical= use to override Object-wide default for using a physical scale for output +kwargs for ra,dec,ll,bb, etc. functions OUTPUT: IPython.display.HTML object with code to animate the orbit; can be directly shown in jupyter notebook or embedded in HTML pages; get a text version of the HTML using the _repr_html_() function HISTORY: 2017-09-17-24 - Written - Bovy (UofT) 2017-11-28 - Allow arbitrary functions of time to be plotted - Bovy (UofT) """ try: from IPython.display import HTML except ImportError: raise ImportError("Orbit.animate requires ipython/jupyter to be installed") if (kwargs.get('use_physical',False) \ and kwargs.get('ro',self._roSet)) or \ (not 'use_physical' in kwargs \ and kwargs.get('ro',self._roSet)): labeldict= {'t':'t (Gyr)', 'R':'R (kpc)', 'vR':'v_R (km/s)', 'vT':'v_T (km/s)', 'z':'z (kpc)', 'vz':'v_z (km/s)', 'phi':'azimuthal angle', 'r':'r (kpc)', 'x':'x (kpc)', 'y':'y (kpc)', 'vx':'v_x (km/s)', 'vy':'v_y (km/s)', 'E':'E (km^2/s^2)', 'Ez':'E_z (km^2/s^2)', 'ER':'E_R (km^2/s^2)', 'Enorm':'E(t)/E(0.)', 'Eznorm':'E_z(t)/E_z(0.)', 'ERnorm':'E_R(t)/E_R(0.)', 'Jacobi':'E-Omega_p L (km^2/s^2)', 'Jacobinorm':'(E-Omega_p L)(t)/(E-Omega_p L)(0)'} else: labeldict= {'t':'t','R':'R','vR':'v_R','vT':'v_T', 'z':'z','vz':'v_z','phi':r'azimuthal angle', 'r':'r', 'x':'x','y':'y','vx':'v_x','vy':'v_y', 'E':'E','Enorm':'E(t)/E(0.)', 'Ez':'E_z','Eznorm':'E_z(t)/E_z(0.)', 'ER':r'E_R','ERnorm':r'E_R(t)/E_R(0.)', 'Jacobi':r'E-Omega_p L', 'Jacobinorm':r'(E-Omega_p L)(t)/(E-Omega_p L)(0)'} labeldict.update({'ra':'RA (deg)', 'dec':'Dec (deg)', 'll':'Galactic lon (deg)', 'bb':'Galactic lat (deg)', 'dist':'distance (kpc)', 'pmra':'pmRA (mas/yr)', 'pmdec':'pmDec (mas/yr)', 'pmll':'pmGlon (mas/yr)', 'pmbb':'pmGlat (mas/yr)', 'vlos':'line-of-sight vel (km/s)', 'helioX':'X (kpc)', 'helioY':'Y (kpc)', 'helioZ':'Z (kpc)', 'U':'U (km/s)', 'V':'V (km/s)', 'W':'W (km/s)'}) # Cannot be using Quantity output kwargs['quantity']= False #Defaults if not 'd1' in kwargs and not 'd2' in kwargs: if len(self.vxvv) == 3: d1= 'R' d2= 'vR' elif len(self.vxvv) == 4: d1= 'x' d2= 'y' elif len(self.vxvv) == 2: d1= 'x' d2= 'vx' elif len(self.vxvv) == 5 or len(self.vxvv) == 6: d1= 'R' d2= 'z' elif not 'd1' in kwargs: d2= kwargs.pop('d2') d1= 't' elif not 'd2' in kwargs: d1= kwargs.pop('d1') d2= 't' else: d1= kwargs.pop('d1') d2= kwargs.pop('d2') xs= [] ys= [] xlabels= [] ylabels= [] if isinstance(d1,str) or callable(d1): d1s= [d1] d2s= [d2] else: d1s= d1 d2s= d2 if len(d1s) > 3: raise ValueError('Orbit.animate only works for up to three subplots') all_xlabel= kwargs.get('xlabel',[None for d in d1]) all_ylabel= kwargs.get('ylabel',[None for d in d2]) for d1,d2, xlabel, ylabel in zip(d1s,d2s,all_xlabel,all_ylabel): #Get x and y for each subplot x= self._parse_plot_quantity(d1,**kwargs) y= self._parse_plot_quantity(d2,**kwargs) xs.append(x) ys.append(y) if xlabel is None: xlabels.append(labeldict.get(d1,'\mathrm{No\ xlabel\ specified}')) else: xlabels.append(xlabel) if ylabel is None: ylabels.append(labeldict.get(d2,'\mathrm{No\ ylabel\ specified}')) else: ylabels.append(ylabel) kwargs.pop('ro',None) kwargs.pop('vo',None) kwargs.pop('obs',None) kwargs.pop('use_physical',None) kwargs.pop('pot',None) kwargs.pop('OmegaP',None) kwargs.pop('quantity',None) width= kwargs.pop('width',600) height= kwargs.pop('height',400) load_jslibs= kwargs.pop('load_jslibs',True) if load_jslibs: load_jslibs_code= """</script> <script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.5/require.min.js"></script> <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script> <script> """ else: load_jslibs_code= "" # Dump data to HTML nplots= len(xs) jsonDict= {} jsonDict['x']= xs[0].tolist() jsonDict['y']= ys[0].tolist() for ii in range(1,nplots): jsonDict['x%i' % (ii+1)]= xs[ii].tolist() jsonDict['y%i' % (ii+1)]= ys[ii].tolist() json_filename= kwargs.pop('json_filename',None) if json_filename is None: jd= json.dumps(jsonDict) json_code= """ let data= JSON.parse('{jd}');""".format(jd=jd) close_json_code= "" else: with open(json_filename,'w') as jfile: json.dump(jsonDict,jfile) json_code= """Plotly.d3.json('{jfilename}',function(data){{""".format(jfilename=json_filename) close_json_code= "});" self.divid= 'galpy-'\ +''.join(choice(ascii_lowercase) for i in range(24)) button_width= 419.51+4.*10. button_margin_left= int(nu.round((width-button_width)/2.)) if button_margin_left < 0: button_margin_left= 0 # Layout for multiple plots if len(d1s) == 1: xmin= [0,0,0] xmax= [1,1,1] elif len(d1s) == 2: xmin= [0,0.55,0] xmax= [0.45,1,1] elif len(d1s) == 3: xmin= [0,0.365,0.73] xmax= [0.27,0.635,1] layout= """{{ xaxis: {{ title: '{xlabel}', domain: [{xmin},{xmax}], }}, yaxis: {{title: '{ylabel}'}}, margin: {{t: 20}}, hovermode: 'closest', showlegend: false, """.format(xlabel=xlabels[0],ylabel=ylabels[0],xmin=xmin[0],xmax=xmax[0]) for ii in range(1,nplots): layout+= """ xaxis{idx}: {{ title: '{xlabel}', anchor: 'y{idx}', domain: [{xmin},{xmax}], }}, yaxis{idx}: {{ title: '{ylabel}', anchor: 'x{idx}', }}, """.format(idx=ii+1,xlabel=xlabels[ii],ylabel=ylabels[ii], xmin=xmin[ii],xmax=xmax[ii]) layout+="""}""" # Additional traces for additional plots if len(d1s) > 1: setup_trace2= """ let trace3= {{ x: data.x2.slice(0,numPerFrame), y: data.y2.slice(0,numPerFrame), xaxis: 'x2', yaxis: 'y2', mode: 'lines', line: {{ shape: 'spline', width: 0.8, color: '#1f77b4', }}, }}; let trace4= {{ x: data.x2.slice(0,numPerFrame), y: data.y2.slice(0,numPerFrame), xaxis: 'x2', yaxis: 'y2', mode: 'lines', line: {{ shape: 'spline', width: 3., color: '#d62728', }}, }}; """.format(divid=self.divid) # not used! delete_trace4= """Plotly.deleteTraces('{divid}',3);""".format(divid=self.divid) delete_trace3= """Plotly.deleteTraces('{divid}',0);""".format(divid=self.divid) update_trace34= """ trace_slice_begin+= trace_slice_len; Plotly.extendTraces('{divid}', {{ x: [data.x2.slice(trace_slice_begin,trace_slice_end)], y: [data.y2.slice(trace_slice_begin,trace_slice_end)], }}, [2]); trace_slice_begin-= trace_slice_len; trace4= {{ x: [data.x2.slice(trace_slice_begin,trace_slice_end)], y: [data.y2.slice(trace_slice_begin,trace_slice_end)], }}, Plotly.restyle('{divid}',trace4,[3]); """.format(divid=self.divid) else: setup_trace2= """ let traces= [trace1,trace2]; """ delete_trace4= "" delete_trace3= "" update_trace34= "" if len(d1s) > 2: setup_trace3= """ let trace5= {{ x: data.x3.slice(0,numPerFrame), y: data.y3.slice(0,numPerFrame), xaxis: 'x3', yaxis: 'y3', mode: 'lines', line: {{ shape: 'spline', width: 0.8, color: '#1f77b4', }}, }}; let trace6= {{ x: data.x3.slice(0,numPerFrame), y: data.y3.slice(0,numPerFrame), xaxis: 'x3', yaxis: 'y3', mode: 'lines', line: {{ shape: 'spline', width: 3., color: '#d62728', }}, }}; let traces= [trace1,trace2,trace3,trace4,trace5,trace6]; """.format(divid=self.divid) delete_trace6= """Plotly.deleteTraces('{divid}',5);""".format(divid=self.divid) delete_trace5= """Plotly.deleteTraces('{divid}',0);""".format(divid=self.divid) update_trace56= """ trace_slice_begin+= trace_slice_len; Plotly.extendTraces('{divid}', {{ x: [data.x3.slice(trace_slice_begin,trace_slice_end)], y: [data.y3.slice(trace_slice_begin,trace_slice_end)], }}, [4]); trace_slice_begin-= trace_slice_len; trace6= {{ x: [data.x3.slice(trace_slice_begin,trace_slice_end)], y: [data.y3.slice(trace_slice_begin,trace_slice_end)], }}, Plotly.restyle('{divid}',trace6,[5]); """.format(divid=self.divid) elif len(d1s) > 1: setup_trace3= """ let traces= [trace1,trace2,trace3,trace4]; """ delete_trace5= "" delete_trace6= "" update_trace56= "" else: setup_trace3= "" delete_trace5= "" delete_trace6= "" update_trace56= "" return HTML(""" <style> .galpybutton {{ background-color:#ffffff; -moz-border-radius:16px; -webkit-border-radius:16px; border-radius:16px; border:1px solid #1f77b4; display:inline-block; cursor:pointer; color:#1f77b4; font-family:Courier; font-size:17px; padding:8px 10px; text-decoration:none; text-shadow:0px 1px 0px #2f6627; }} .galpybutton:hover {{ background-color:#ffffff; }} .galpybutton:active {{ position:relative; top:1px; }} .galpybutton:focus{{ outline:0; }} </style> <div id='{divid}' style='width:{width}px;height:{height}px;'></div> <div class="controlbutton" id="{divid}-play" style="margin-left:{button_margin_left}px;display: inline-block;"> <button class="galpybutton">Play</button></div> <div class="controlbutton" id="{divid}-pause" style="margin-left:10px;display: inline-block;"> <button class="galpybutton">Pause</button></div> <div class="controlbutton" id="{divid}-timestwo" style="margin-left:10px;display: inline-block;"> <button class="galpybutton">Speed<font face="Arial">&thinsp;</font>x<font face="Arial">&thinsp;</font>2</button></div> <div class="controlbutton" id="{divid}-timeshalf" style="margin-left:10px;display: inline-block;"> <button class="galpybutton">Speed<font face="Arial">&thinsp;</font>/<font face="Arial">&thinsp;</font>2</button></div> <div class="controlbutton" id="{divid}-replay" style="margin-left:10px;display: inline-block;"> <button class="galpybutton">Replay</button></div> <script> require.config({{ paths: {{ Plotly: 'https://cdn.plot.ly/plotly-latest.min', }} }}); {load_jslibs_code} require(['Plotly'], function (Plotly) {{ {json_code} let layout = {layout}; let numPerFrame= 5; let cnt= 1; let interval; let trace_slice_len; let trace_slice_begin; let trace_slice_end; setup_trace(); $('.controlbutton button').click(function() {{ let button_type= this.parentNode.id; if ( button_type === '{divid}-play' ) {{ clearInterval(interval); interval= animate_trace(); }} else if ( button_type === '{divid}-pause' ) clearInterval(interval); else if ( button_type === '{divid}-timestwo' ) {{ cnt/= 2; numPerFrame*= 2; }} else if ( button_type === '{divid}-timeshalf' ) {{ cnt*= 2; numPerFrame/= 2; }} else if ( button_type === '{divid}-replay' ) {{ cnt= 1; try {{ // doesn't exist if animation has already ended {delete_trace6} {delete_trace4} Plotly.deleteTraces('{divid}',1); }} catch (err) {{ }} Plotly.deleteTraces('{divid}',0); {delete_trace3} {delete_trace5} clearInterval(interval); setup_trace(); interval= animate_trace(); }} }}); function setup_trace() {{ let trace1= {{ x: data.x.slice(0,numPerFrame), y: data.y.slice(0,numPerFrame), mode: 'lines', line: {{ shape: 'spline', width: 0.8, color: '#1f77b4', }}, }}; let trace2= {{ x: data.x.slice(0,numPerFrame), y: data.y.slice(0,numPerFrame), mode: 'lines', line: {{ shape: 'spline', width: 3., color: '#d62728', }}, }}; {setup_trace2} {setup_trace3} Plotly.plot('{divid}',traces,layout); }} function animate_trace() {{ return setInterval(function() {{ // Make sure narrow and thick trace end in the same // and the highlighted length has constant length trace_slice_len= Math.floor(numPerFrame); if ( trace_slice_len < 1) trace_slice_len= 1; trace_slice_begin= Math.floor(cnt*numPerFrame); trace_slice_end= Math.floor(Math.min(cnt*numPerFrame+trace_slice_len,data.x.length-1)); Plotly.extendTraces('{divid}', {{ x: [data.x.slice(trace_slice_begin,trace_slice_end)], y: [data.y.slice(trace_slice_begin,trace_slice_end)], }}, [0]); trace_slice_begin-= trace_slice_len; trace2= {{ x: [data.x.slice(trace_slice_begin,trace_slice_end)], y: [data.y.slice(trace_slice_begin,trace_slice_end)], }}; Plotly.restyle('{divid}',trace2,[1]); {update_trace34} {update_trace56} cnt+= 1; if(cnt*numPerFrame+trace_slice_len > data.x.length/1) {{ clearInterval(interval); {delete_trace6} {delete_trace4} Plotly.deleteTraces('{divid}',1); }} }}, 30); }} {close_json_code}}}); </script>""".format(json_code=json_code,close_json_code=close_json_code, divid=self.divid,width=width,height=height, button_margin_left=button_margin_left, layout=layout,load_jslibs_code=load_jslibs_code, setup_trace2=setup_trace2,setup_trace3=setup_trace3, delete_trace4=delete_trace4,delete_trace6=delete_trace6, delete_trace3=delete_trace3,delete_trace5=delete_trace5, update_trace34=update_trace34, update_trace56=update_trace56))
[ "def", "animate", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#pragma: no cover", "try", ":", "from", "IPython", ".", "display", "import", "HTML", "except", "ImportError", ":", "raise", "ImportError", "(", "\"Orbit.animate requires ipython/jupyter to be installed\"", ")", "if", "(", "kwargs", ".", "get", "(", "'use_physical'", ",", "False", ")", "and", "kwargs", ".", "get", "(", "'ro'", ",", "self", ".", "_roSet", ")", ")", "or", "(", "not", "'use_physical'", "in", "kwargs", "and", "kwargs", ".", "get", "(", "'ro'", ",", "self", ".", "_roSet", ")", ")", ":", "labeldict", "=", "{", "'t'", ":", "'t (Gyr)'", ",", "'R'", ":", "'R (kpc)'", ",", "'vR'", ":", "'v_R (km/s)'", ",", "'vT'", ":", "'v_T (km/s)'", ",", "'z'", ":", "'z (kpc)'", ",", "'vz'", ":", "'v_z (km/s)'", ",", "'phi'", ":", "'azimuthal angle'", ",", "'r'", ":", "'r (kpc)'", ",", "'x'", ":", "'x (kpc)'", ",", "'y'", ":", "'y (kpc)'", ",", "'vx'", ":", "'v_x (km/s)'", ",", "'vy'", ":", "'v_y (km/s)'", ",", "'E'", ":", "'E (km^2/s^2)'", ",", "'Ez'", ":", "'E_z (km^2/s^2)'", ",", "'ER'", ":", "'E_R (km^2/s^2)'", ",", "'Enorm'", ":", "'E(t)/E(0.)'", ",", "'Eznorm'", ":", "'E_z(t)/E_z(0.)'", ",", "'ERnorm'", ":", "'E_R(t)/E_R(0.)'", ",", "'Jacobi'", ":", "'E-Omega_p L (km^2/s^2)'", ",", "'Jacobinorm'", ":", "'(E-Omega_p L)(t)/(E-Omega_p L)(0)'", "}", "else", ":", "labeldict", "=", "{", "'t'", ":", "'t'", ",", "'R'", ":", "'R'", ",", "'vR'", ":", "'v_R'", ",", "'vT'", ":", "'v_T'", ",", "'z'", ":", "'z'", ",", "'vz'", ":", "'v_z'", ",", "'phi'", ":", "r'azimuthal angle'", ",", "'r'", ":", "'r'", ",", "'x'", ":", "'x'", ",", "'y'", ":", "'y'", ",", "'vx'", ":", "'v_x'", ",", "'vy'", ":", "'v_y'", ",", "'E'", ":", "'E'", ",", "'Enorm'", ":", "'E(t)/E(0.)'", ",", "'Ez'", ":", "'E_z'", ",", "'Eznorm'", ":", "'E_z(t)/E_z(0.)'", ",", "'ER'", ":", "r'E_R'", ",", "'ERnorm'", ":", "r'E_R(t)/E_R(0.)'", ",", "'Jacobi'", ":", "r'E-Omega_p L'", ",", "'Jacobinorm'", ":", "r'(E-Omega_p L)(t)/(E-Omega_p L)(0)'", "}", "labeldict", ".", "update", "(", "{", "'ra'", ":", "'RA (deg)'", ",", "'dec'", ":", "'Dec (deg)'", ",", "'ll'", ":", "'Galactic lon (deg)'", ",", "'bb'", ":", "'Galactic lat (deg)'", ",", "'dist'", ":", "'distance (kpc)'", ",", "'pmra'", ":", "'pmRA (mas/yr)'", ",", "'pmdec'", ":", "'pmDec (mas/yr)'", ",", "'pmll'", ":", "'pmGlon (mas/yr)'", ",", "'pmbb'", ":", "'pmGlat (mas/yr)'", ",", "'vlos'", ":", "'line-of-sight vel (km/s)'", ",", "'helioX'", ":", "'X (kpc)'", ",", "'helioY'", ":", "'Y (kpc)'", ",", "'helioZ'", ":", "'Z (kpc)'", ",", "'U'", ":", "'U (km/s)'", ",", "'V'", ":", "'V (km/s)'", ",", "'W'", ":", "'W (km/s)'", "}", ")", "# Cannot be using Quantity output", "kwargs", "[", "'quantity'", "]", "=", "False", "#Defaults", "if", "not", "'d1'", "in", "kwargs", "and", "not", "'d2'", "in", "kwargs", ":", "if", "len", "(", "self", ".", "vxvv", ")", "==", "3", ":", "d1", "=", "'R'", "d2", "=", "'vR'", "elif", "len", "(", "self", ".", "vxvv", ")", "==", "4", ":", "d1", "=", "'x'", "d2", "=", "'y'", "elif", "len", "(", "self", ".", "vxvv", ")", "==", "2", ":", "d1", "=", "'x'", "d2", "=", "'vx'", "elif", "len", "(", "self", ".", "vxvv", ")", "==", "5", "or", "len", "(", "self", ".", "vxvv", ")", "==", "6", ":", "d1", "=", "'R'", "d2", "=", "'z'", "elif", "not", "'d1'", "in", "kwargs", ":", "d2", "=", "kwargs", ".", "pop", "(", "'d2'", ")", "d1", "=", "'t'", "elif", "not", "'d2'", "in", "kwargs", ":", "d1", "=", "kwargs", ".", "pop", "(", "'d1'", ")", "d2", "=", "'t'", "else", ":", "d1", "=", "kwargs", ".", "pop", "(", "'d1'", ")", "d2", "=", "kwargs", ".", "pop", "(", "'d2'", ")", "xs", "=", "[", "]", "ys", "=", "[", "]", "xlabels", "=", "[", "]", "ylabels", "=", "[", "]", "if", "isinstance", "(", "d1", ",", "str", ")", "or", "callable", "(", "d1", ")", ":", "d1s", "=", "[", "d1", "]", "d2s", "=", "[", "d2", "]", "else", ":", "d1s", "=", "d1", "d2s", "=", "d2", "if", "len", "(", "d1s", ")", ">", "3", ":", "raise", "ValueError", "(", "'Orbit.animate only works for up to three subplots'", ")", "all_xlabel", "=", "kwargs", ".", "get", "(", "'xlabel'", ",", "[", "None", "for", "d", "in", "d1", "]", ")", "all_ylabel", "=", "kwargs", ".", "get", "(", "'ylabel'", ",", "[", "None", "for", "d", "in", "d2", "]", ")", "for", "d1", ",", "d2", ",", "xlabel", ",", "ylabel", "in", "zip", "(", "d1s", ",", "d2s", ",", "all_xlabel", ",", "all_ylabel", ")", ":", "#Get x and y for each subplot", "x", "=", "self", ".", "_parse_plot_quantity", "(", "d1", ",", "*", "*", "kwargs", ")", "y", "=", "self", ".", "_parse_plot_quantity", "(", "d2", ",", "*", "*", "kwargs", ")", "xs", ".", "append", "(", "x", ")", "ys", ".", "append", "(", "y", ")", "if", "xlabel", "is", "None", ":", "xlabels", ".", "append", "(", "labeldict", ".", "get", "(", "d1", ",", "'\\mathrm{No\\ xlabel\\ specified}'", ")", ")", "else", ":", "xlabels", ".", "append", "(", "xlabel", ")", "if", "ylabel", "is", "None", ":", "ylabels", ".", "append", "(", "labeldict", ".", "get", "(", "d2", ",", "'\\mathrm{No\\ ylabel\\ specified}'", ")", ")", "else", ":", "ylabels", ".", "append", "(", "ylabel", ")", "kwargs", ".", "pop", "(", "'ro'", ",", "None", ")", "kwargs", ".", "pop", "(", "'vo'", ",", "None", ")", "kwargs", ".", "pop", "(", "'obs'", ",", "None", ")", "kwargs", ".", "pop", "(", "'use_physical'", ",", "None", ")", "kwargs", ".", "pop", "(", "'pot'", ",", "None", ")", "kwargs", ".", "pop", "(", "'OmegaP'", ",", "None", ")", "kwargs", ".", "pop", "(", "'quantity'", ",", "None", ")", "width", "=", "kwargs", ".", "pop", "(", "'width'", ",", "600", ")", "height", "=", "kwargs", ".", "pop", "(", "'height'", ",", "400", ")", "load_jslibs", "=", "kwargs", ".", "pop", "(", "'load_jslibs'", ",", "True", ")", "if", "load_jslibs", ":", "load_jslibs_code", "=", "\"\"\"</script>\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.5/require.min.js\"></script>\n<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js\"></script>\n<script>\n\"\"\"", "else", ":", "load_jslibs_code", "=", "\"\"", "# Dump data to HTML", "nplots", "=", "len", "(", "xs", ")", "jsonDict", "=", "{", "}", "jsonDict", "[", "'x'", "]", "=", "xs", "[", "0", "]", ".", "tolist", "(", ")", "jsonDict", "[", "'y'", "]", "=", "ys", "[", "0", "]", ".", "tolist", "(", ")", "for", "ii", "in", "range", "(", "1", ",", "nplots", ")", ":", "jsonDict", "[", "'x%i'", "%", "(", "ii", "+", "1", ")", "]", "=", "xs", "[", "ii", "]", ".", "tolist", "(", ")", "jsonDict", "[", "'y%i'", "%", "(", "ii", "+", "1", ")", "]", "=", "ys", "[", "ii", "]", ".", "tolist", "(", ")", "json_filename", "=", "kwargs", ".", "pop", "(", "'json_filename'", ",", "None", ")", "if", "json_filename", "is", "None", ":", "jd", "=", "json", ".", "dumps", "(", "jsonDict", ")", "json_code", "=", "\"\"\" let data= JSON.parse('{jd}');\"\"\"", ".", "format", "(", "jd", "=", "jd", ")", "close_json_code", "=", "\"\"", "else", ":", "with", "open", "(", "json_filename", ",", "'w'", ")", "as", "jfile", ":", "json", ".", "dump", "(", "jsonDict", ",", "jfile", ")", "json_code", "=", "\"\"\"Plotly.d3.json('{jfilename}',function(data){{\"\"\"", ".", "format", "(", "jfilename", "=", "json_filename", ")", "close_json_code", "=", "\"});\"", "self", ".", "divid", "=", "'galpy-'", "+", "''", ".", "join", "(", "choice", "(", "ascii_lowercase", ")", "for", "i", "in", "range", "(", "24", ")", ")", "button_width", "=", "419.51", "+", "4.", "*", "10.", "button_margin_left", "=", "int", "(", "nu", ".", "round", "(", "(", "width", "-", "button_width", ")", "/", "2.", ")", ")", "if", "button_margin_left", "<", "0", ":", "button_margin_left", "=", "0", "# Layout for multiple plots", "if", "len", "(", "d1s", ")", "==", "1", ":", "xmin", "=", "[", "0", ",", "0", ",", "0", "]", "xmax", "=", "[", "1", ",", "1", ",", "1", "]", "elif", "len", "(", "d1s", ")", "==", "2", ":", "xmin", "=", "[", "0", ",", "0.55", ",", "0", "]", "xmax", "=", "[", "0.45", ",", "1", ",", "1", "]", "elif", "len", "(", "d1s", ")", "==", "3", ":", "xmin", "=", "[", "0", ",", "0.365", ",", "0.73", "]", "xmax", "=", "[", "0.27", ",", "0.635", ",", "1", "]", "layout", "=", "\"\"\"{{\n xaxis: {{\n title: '{xlabel}',\n domain: [{xmin},{xmax}],\n}},\n yaxis: {{title: '{ylabel}'}},\n margin: {{t: 20}},\n hovermode: 'closest',\n showlegend: false,\n\"\"\"", ".", "format", "(", "xlabel", "=", "xlabels", "[", "0", "]", ",", "ylabel", "=", "ylabels", "[", "0", "]", ",", "xmin", "=", "xmin", "[", "0", "]", ",", "xmax", "=", "xmax", "[", "0", "]", ")", "for", "ii", "in", "range", "(", "1", ",", "nplots", ")", ":", "layout", "+=", "\"\"\" xaxis{idx}: {{\n title: '{xlabel}',\n anchor: 'y{idx}',\n domain: [{xmin},{xmax}],\n}},\n yaxis{idx}: {{\n title: '{ylabel}',\n anchor: 'x{idx}',\n}},\n\"\"\"", ".", "format", "(", "idx", "=", "ii", "+", "1", ",", "xlabel", "=", "xlabels", "[", "ii", "]", ",", "ylabel", "=", "ylabels", "[", "ii", "]", ",", "xmin", "=", "xmin", "[", "ii", "]", ",", "xmax", "=", "xmax", "[", "ii", "]", ")", "layout", "+=", "\"\"\"}\"\"\"", "# Additional traces for additional plots", "if", "len", "(", "d1s", ")", ">", "1", ":", "setup_trace2", "=", "\"\"\"\n let trace3= {{\n x: data.x2.slice(0,numPerFrame),\n y: data.y2.slice(0,numPerFrame),\n xaxis: 'x2',\n yaxis: 'y2',\n mode: 'lines',\n line: {{\n shape: 'spline',\n width: 0.8,\n color: '#1f77b4',\n }},\n }};\n\n let trace4= {{\n x: data.x2.slice(0,numPerFrame), \n y: data.y2.slice(0,numPerFrame),\n xaxis: 'x2',\n yaxis: 'y2',\n mode: 'lines',\n line: {{\n shape: 'spline',\n width: 3.,\n color: '#d62728',\n }},\n }};\n\"\"\"", ".", "format", "(", "divid", "=", "self", ".", "divid", ")", "# not used!", "delete_trace4", "=", "\"\"\"Plotly.deleteTraces('{divid}',3);\"\"\"", ".", "format", "(", "divid", "=", "self", ".", "divid", ")", "delete_trace3", "=", "\"\"\"Plotly.deleteTraces('{divid}',0);\"\"\"", ".", "format", "(", "divid", "=", "self", ".", "divid", ")", "update_trace34", "=", "\"\"\"\n trace_slice_begin+= trace_slice_len;\n Plotly.extendTraces('{divid}', {{\n x: [data.x2.slice(trace_slice_begin,trace_slice_end)],\n y: [data.y2.slice(trace_slice_begin,trace_slice_end)],\n }}, [2]);\n\n trace_slice_begin-= trace_slice_len;\n trace4= {{\n x: [data.x2.slice(trace_slice_begin,trace_slice_end)], \n y: [data.y2.slice(trace_slice_begin,trace_slice_end)],\n }},\n Plotly.restyle('{divid}',trace4,[3]);\n\"\"\"", ".", "format", "(", "divid", "=", "self", ".", "divid", ")", "else", ":", "setup_trace2", "=", "\"\"\"\n let traces= [trace1,trace2];\n\"\"\"", "delete_trace4", "=", "\"\"", "delete_trace3", "=", "\"\"", "update_trace34", "=", "\"\"", "if", "len", "(", "d1s", ")", ">", "2", ":", "setup_trace3", "=", "\"\"\"\n let trace5= {{\n x: data.x3.slice(0,numPerFrame),\n y: data.y3.slice(0,numPerFrame),\n xaxis: 'x3',\n yaxis: 'y3',\n mode: 'lines',\n line: {{\n shape: 'spline',\n width: 0.8,\n color: '#1f77b4',\n }},\n }};\n\n let trace6= {{\n x: data.x3.slice(0,numPerFrame), \n y: data.y3.slice(0,numPerFrame),\n xaxis: 'x3',\n yaxis: 'y3',\n mode: 'lines',\n line: {{\n shape: 'spline',\n width: 3.,\n color: '#d62728',\n }},\n }};\n\n let traces= [trace1,trace2,trace3,trace4,trace5,trace6];\n\"\"\"", ".", "format", "(", "divid", "=", "self", ".", "divid", ")", "delete_trace6", "=", "\"\"\"Plotly.deleteTraces('{divid}',5);\"\"\"", ".", "format", "(", "divid", "=", "self", ".", "divid", ")", "delete_trace5", "=", "\"\"\"Plotly.deleteTraces('{divid}',0);\"\"\"", ".", "format", "(", "divid", "=", "self", ".", "divid", ")", "update_trace56", "=", "\"\"\"\n trace_slice_begin+= trace_slice_len;\n Plotly.extendTraces('{divid}', {{\n x: [data.x3.slice(trace_slice_begin,trace_slice_end)],\n y: [data.y3.slice(trace_slice_begin,trace_slice_end)],\n }}, [4]);\n\n trace_slice_begin-= trace_slice_len;\n trace6= {{\n x: [data.x3.slice(trace_slice_begin,trace_slice_end)], \n y: [data.y3.slice(trace_slice_begin,trace_slice_end)],\n }},\n Plotly.restyle('{divid}',trace6,[5]);\n\"\"\"", ".", "format", "(", "divid", "=", "self", ".", "divid", ")", "elif", "len", "(", "d1s", ")", ">", "1", ":", "setup_trace3", "=", "\"\"\"\n let traces= [trace1,trace2,trace3,trace4];\n\"\"\"", "delete_trace5", "=", "\"\"", "delete_trace6", "=", "\"\"", "update_trace56", "=", "\"\"", "else", ":", "setup_trace3", "=", "\"\"", "delete_trace5", "=", "\"\"", "delete_trace6", "=", "\"\"", "update_trace56", "=", "\"\"", "return", "HTML", "(", "\"\"\"\n<style>\n.galpybutton {{\n background-color:#ffffff;\n -moz-border-radius:16px;\n -webkit-border-radius:16px;\n border-radius:16px;\n border:1px solid #1f77b4;\n display:inline-block;\n cursor:pointer;\n color:#1f77b4;\n font-family:Courier;\n font-size:17px;\n padding:8px 10px;\n text-decoration:none;\n text-shadow:0px 1px 0px #2f6627;\n}}\n.galpybutton:hover {{\n background-color:#ffffff;\n}}\n.galpybutton:active {{\n position:relative;\n top:1px;\n}}\n.galpybutton:focus{{\n outline:0;\n}}\n</style>\n\n<div id='{divid}' style='width:{width}px;height:{height}px;'></div>\n<div class=\"controlbutton\" id=\"{divid}-play\" style=\"margin-left:{button_margin_left}px;display: inline-block;\">\n<button class=\"galpybutton\">Play</button></div>\n<div class=\"controlbutton\" id=\"{divid}-pause\" style=\"margin-left:10px;display: inline-block;\">\n<button class=\"galpybutton\">Pause</button></div>\n<div class=\"controlbutton\" id=\"{divid}-timestwo\" style=\"margin-left:10px;display: inline-block;\">\n<button class=\"galpybutton\">Speed<font face=\"Arial\">&thinsp;</font>x<font face=\"Arial\">&thinsp;</font>2</button></div>\n<div class=\"controlbutton\" id=\"{divid}-timeshalf\" style=\"margin-left:10px;display: inline-block;\">\n<button class=\"galpybutton\">Speed<font face=\"Arial\">&thinsp;</font>/<font face=\"Arial\">&thinsp;</font>2</button></div>\n<div class=\"controlbutton\" id=\"{divid}-replay\" style=\"margin-left:10px;display: inline-block;\">\n<button class=\"galpybutton\">Replay</button></div>\n\n<script>\nrequire.config({{\n paths: {{\n Plotly: 'https://cdn.plot.ly/plotly-latest.min',\n }}\n}});\n{load_jslibs_code}\nrequire(['Plotly'], function (Plotly) {{\n{json_code}\n let layout = {layout};\n let numPerFrame= 5; \n let cnt= 1;\n let interval;\n let trace_slice_len;\n let trace_slice_begin;\n let trace_slice_end;\n\n setup_trace();\n \n $('.controlbutton button').click(function() {{\n let button_type= this.parentNode.id;\n if ( button_type === '{divid}-play' ) {{\n clearInterval(interval);\n interval= animate_trace();\n }}\n else if ( button_type === '{divid}-pause' )\n clearInterval(interval);\n else if ( button_type === '{divid}-timestwo' ) {{\n cnt/= 2;\n numPerFrame*= 2;\n }}\n else if ( button_type === '{divid}-timeshalf' ) {{\n cnt*= 2;\n numPerFrame/= 2;\n }}\n else if ( button_type === '{divid}-replay' ) {{\n cnt= 1;\n try {{ // doesn't exist if animation has already ended\n {delete_trace6}\n {delete_trace4}\n Plotly.deleteTraces('{divid}',1);\n }}\n catch (err) {{\n }}\n Plotly.deleteTraces('{divid}',0);\n {delete_trace3}\n {delete_trace5}\n clearInterval(interval);\n setup_trace();\n interval= animate_trace();\n }}\n }});\n \n function setup_trace() {{\n let trace1= {{\n x: data.x.slice(0,numPerFrame), \n y: data.y.slice(0,numPerFrame),\n mode: 'lines',\n line: {{\n shape: 'spline',\n width: 0.8,\n color: '#1f77b4',\n }},\n }};\n\n let trace2= {{\n x: data.x.slice(0,numPerFrame), \n y: data.y.slice(0,numPerFrame),\n mode: 'lines',\n line: {{\n shape: 'spline',\n width: 3.,\n color: '#d62728',\n }},\n }};\n\n {setup_trace2}\n\n {setup_trace3}\n\n Plotly.plot('{divid}',traces,layout);\n }}\n\n function animate_trace() {{\n return setInterval(function() {{\n // Make sure narrow and thick trace end in the same \n // and the highlighted length has constant length\n trace_slice_len= Math.floor(numPerFrame);\n if ( trace_slice_len < 1) trace_slice_len= 1;\n trace_slice_begin= Math.floor(cnt*numPerFrame);\n trace_slice_end= Math.floor(Math.min(cnt*numPerFrame+trace_slice_len,data.x.length-1));\n Plotly.extendTraces('{divid}', {{\n x: [data.x.slice(trace_slice_begin,trace_slice_end)],\n y: [data.y.slice(trace_slice_begin,trace_slice_end)],\n }}, [0]);\n trace_slice_begin-= trace_slice_len;\n trace2= {{\n x: [data.x.slice(trace_slice_begin,trace_slice_end)], \n y: [data.y.slice(trace_slice_begin,trace_slice_end)],\n }};\n Plotly.restyle('{divid}',trace2,[1]);\n\n {update_trace34}\n {update_trace56}\n cnt+= 1;\n if(cnt*numPerFrame+trace_slice_len > data.x.length/1) {{\n clearInterval(interval);\n {delete_trace6}\n {delete_trace4}\n Plotly.deleteTraces('{divid}',1);\n }}\n }}, 30);\n }}\n{close_json_code}}});\n</script>\"\"\"", ".", "format", "(", "json_code", "=", "json_code", ",", "close_json_code", "=", "close_json_code", ",", "divid", "=", "self", ".", "divid", ",", "width", "=", "width", ",", "height", "=", "height", ",", "button_margin_left", "=", "button_margin_left", ",", "layout", "=", "layout", ",", "load_jslibs_code", "=", "load_jslibs_code", ",", "setup_trace2", "=", "setup_trace2", ",", "setup_trace3", "=", "setup_trace3", ",", "delete_trace4", "=", "delete_trace4", ",", "delete_trace6", "=", "delete_trace6", ",", "delete_trace3", "=", "delete_trace3", ",", "delete_trace5", "=", "delete_trace5", ",", "update_trace34", "=", "update_trace34", ",", "update_trace56", "=", "update_trace56", ")", ")" ]
NAME: animate PURPOSE: animate an Orbit INPUT: d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...); can be list with up to three entries for three subplots d2= second dimension to plot; can be list with up to three entries for three subplots width= (600) width of output div in px height= (400) height of output div in px json_filename= (None) if set, save the data necessary for the figure in this filename (e.g., json_filename= 'orbit_data/orbit.json'); this path is also used in the output HTML, so needs to be accessible load_jslibs= (True) if True, load the require and jQuery Javascript libraries (necessary in Jupyterlab, not necessary but harmless in notebooks; if embedding on a webpage one typically wants to load these libraries in the header) ro= (Object-wide default) physical scale for distances to use to convert vo= (Object-wide default) physical scale for velocities to use to convert use_physical= use to override Object-wide default for using a physical scale for output +kwargs for ra,dec,ll,bb, etc. functions OUTPUT: IPython.display.HTML object with code to animate the orbit; can be directly shown in jupyter notebook or embedded in HTML pages; get a text version of the HTML using the _repr_html_() function HISTORY: 2017-09-17-24 - Written - Bovy (UofT) 2017-11-28 - Allow arbitrary functions of time to be plotted - Bovy (UofT)
[ "NAME", ":", "animate", "PURPOSE", ":", "animate", "an", "Orbit", "INPUT", ":", "d1", "=", "first", "dimension", "to", "plot", "(", "x", "y", "R", "vR", "vT", "z", "vz", "...", ")", ";", "can", "be", "list", "with", "up", "to", "three", "entries", "for", "three", "subplots", "d2", "=", "second", "dimension", "to", "plot", ";", "can", "be", "list", "with", "up", "to", "three", "entries", "for", "three", "subplots", "width", "=", "(", "600", ")", "width", "of", "output", "div", "in", "px", "height", "=", "(", "400", ")", "height", "of", "output", "div", "in", "px", "json_filename", "=", "(", "None", ")", "if", "set", "save", "the", "data", "necessary", "for", "the", "figure", "in", "this", "filename", "(", "e", ".", "g", ".", "json_filename", "=", "orbit_data", "/", "orbit", ".", "json", ")", ";", "this", "path", "is", "also", "used", "in", "the", "output", "HTML", "so", "needs", "to", "be", "accessible", "load_jslibs", "=", "(", "True", ")", "if", "True", "load", "the", "require", "and", "jQuery", "Javascript", "libraries", "(", "necessary", "in", "Jupyterlab", "not", "necessary", "but", "harmless", "in", "notebooks", ";", "if", "embedding", "on", "a", "webpage", "one", "typically", "wants", "to", "load", "these", "libraries", "in", "the", "header", ")", "ro", "=", "(", "Object", "-", "wide", "default", ")", "physical", "scale", "for", "distances", "to", "use", "to", "convert", "vo", "=", "(", "Object", "-", "wide", "default", ")", "physical", "scale", "for", "velocities", "to", "use", "to", "convert", "use_physical", "=", "use", "to", "override", "Object", "-", "wide", "default", "for", "using", "a", "physical", "scale", "for", "output" ]
python
train
34.539583
Yubico/yubikey-manager
ykman/cli/piv.py
https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/piv.py#L833-L857
def read_object(ctx, pin, object_id): """ Read arbitrary PIV object. Read PIV object by providing the object id. \b OBJECT-ID Id of PIV object in HEX. """ controller = ctx.obj['controller'] def do_read_object(retry=True): try: click.echo(controller.get_data(object_id)) except APDUError as e: if e.sw == SW.NOT_FOUND: ctx.fail('No data found.') elif e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED: _verify_pin(ctx, controller, pin) do_read_object(retry=False) else: raise do_read_object()
[ "def", "read_object", "(", "ctx", ",", "pin", ",", "object_id", ")", ":", "controller", "=", "ctx", ".", "obj", "[", "'controller'", "]", "def", "do_read_object", "(", "retry", "=", "True", ")", ":", "try", ":", "click", ".", "echo", "(", "controller", ".", "get_data", "(", "object_id", ")", ")", "except", "APDUError", "as", "e", ":", "if", "e", ".", "sw", "==", "SW", ".", "NOT_FOUND", ":", "ctx", ".", "fail", "(", "'No data found.'", ")", "elif", "e", ".", "sw", "==", "SW", ".", "SECURITY_CONDITION_NOT_SATISFIED", ":", "_verify_pin", "(", "ctx", ",", "controller", ",", "pin", ")", "do_read_object", "(", "retry", "=", "False", ")", "else", ":", "raise", "do_read_object", "(", ")" ]
Read arbitrary PIV object. Read PIV object by providing the object id. \b OBJECT-ID Id of PIV object in HEX.
[ "Read", "arbitrary", "PIV", "object", "." ]
python
train
25.44
shaypal5/pdutil
pdutil/iter/iter.py
https://github.com/shaypal5/pdutil/blob/231059634643af2558d22070f89767410978cf56/pdutil/iter/iter.py#L4-L32
def sub_dfs_by_size(df, size): """Get a generator yielding consecutive sub-dataframes of the given size. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. size : int The size of each sub-dataframe. Returns ------- generator A generator yielding consecutive sub-dataframe of the given size. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin """ for i in range(0, len(df), size): yield (df.iloc[i:i + size])
[ "def", "sub_dfs_by_size", "(", "df", ",", "size", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "df", ")", ",", "size", ")", ":", "yield", "(", "df", ".", "iloc", "[", "i", ":", "i", "+", "size", "]", ")" ]
Get a generator yielding consecutive sub-dataframes of the given size. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. size : int The size of each sub-dataframe. Returns ------- generator A generator yielding consecutive sub-dataframe of the given size. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin
[ "Get", "a", "generator", "yielding", "consecutive", "sub", "-", "dataframes", "of", "the", "given", "size", "." ]
python
train
26.586207
facelessuser/backrefs
backrefs/uniprops/__init__.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/uniprops/__init__.py#L154-L165
def get_hangul_syllable_type_property(value, is_bytes=False): """Get `HANGUL SYLLABLE TYPE` property.""" obj = unidata.ascii_hangul_syllable_type if is_bytes else unidata.unicode_hangul_syllable_type if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['hangulsyllabletype'].get(negated, negated) else: value = unidata.unicode_alias['hangulsyllabletype'].get(value, value) return obj[value]
[ "def", "get_hangul_syllable_type_property", "(", "value", ",", "is_bytes", "=", "False", ")", ":", "obj", "=", "unidata", ".", "ascii_hangul_syllable_type", "if", "is_bytes", "else", "unidata", ".", "unicode_hangul_syllable_type", "if", "value", ".", "startswith", "(", "'^'", ")", ":", "negated", "=", "value", "[", "1", ":", "]", "value", "=", "'^'", "+", "unidata", ".", "unicode_alias", "[", "'hangulsyllabletype'", "]", ".", "get", "(", "negated", ",", "negated", ")", "else", ":", "value", "=", "unidata", ".", "unicode_alias", "[", "'hangulsyllabletype'", "]", ".", "get", "(", "value", ",", "value", ")", "return", "obj", "[", "value", "]" ]
Get `HANGUL SYLLABLE TYPE` property.
[ "Get", "HANGUL", "SYLLABLE", "TYPE", "property", "." ]
python
train
37.916667
jtpaasch/simplygithub
simplygithub/internals/api.py
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/internals/api.py#L119-L144
def post_request(profile, resource, payload): """Do a POST request to Github's API. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. payload A dict of values to send as the payload of the POST request. The data will be JSON-encoded. Returns: The body of the response, converted from JSON into a Python dict. """ url = get_url(profile, resource) headers = get_headers(profile) response = requests.post(url, json=payload, headers=headers) return response.json()
[ "def", "post_request", "(", "profile", ",", "resource", ",", "payload", ")", ":", "url", "=", "get_url", "(", "profile", ",", "resource", ")", "headers", "=", "get_headers", "(", "profile", ")", "response", "=", "requests", ".", "post", "(", "url", ",", "json", "=", "payload", ",", "headers", "=", "headers", ")", "return", "response", ".", "json", "(", ")" ]
Do a POST request to Github's API. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. payload A dict of values to send as the payload of the POST request. The data will be JSON-encoded. Returns: The body of the response, converted from JSON into a Python dict.
[ "Do", "a", "POST", "request", "to", "Github", "s", "API", "." ]
python
train
33.076923
codelv/enaml-native
src/enamlnative/android/android_list_view.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_list_view.py#L268-L290
def _on_items_changed(self, change): """ Observe container events on the items list and update the adapter appropriately. """ if change['type'] != 'container': return op = change['operation'] if op == 'append': i = len(change['value'])-1 self.adapter.notifyItemInserted(i) elif op == 'insert': self.adapter.notifyItemInserted(change['index']) elif op in ('pop', '__delitem__'): self.adapter.notifyItemRemoved(change['index']) elif op == '__setitem__': self.adapter.notifyItemChanged(change['index']) elif op == 'extend': n = len(change['items']) i = len(change['value'])-n self.adapter.notifyItemRangeInserted(i, n) elif op in ('remove', 'reverse', 'sort'): # Reset everything for these self.adapter.notifyDataSetChanged()
[ "def", "_on_items_changed", "(", "self", ",", "change", ")", ":", "if", "change", "[", "'type'", "]", "!=", "'container'", ":", "return", "op", "=", "change", "[", "'operation'", "]", "if", "op", "==", "'append'", ":", "i", "=", "len", "(", "change", "[", "'value'", "]", ")", "-", "1", "self", ".", "adapter", ".", "notifyItemInserted", "(", "i", ")", "elif", "op", "==", "'insert'", ":", "self", ".", "adapter", ".", "notifyItemInserted", "(", "change", "[", "'index'", "]", ")", "elif", "op", "in", "(", "'pop'", ",", "'__delitem__'", ")", ":", "self", ".", "adapter", ".", "notifyItemRemoved", "(", "change", "[", "'index'", "]", ")", "elif", "op", "==", "'__setitem__'", ":", "self", ".", "adapter", ".", "notifyItemChanged", "(", "change", "[", "'index'", "]", ")", "elif", "op", "==", "'extend'", ":", "n", "=", "len", "(", "change", "[", "'items'", "]", ")", "i", "=", "len", "(", "change", "[", "'value'", "]", ")", "-", "n", "self", ".", "adapter", ".", "notifyItemRangeInserted", "(", "i", ",", "n", ")", "elif", "op", "in", "(", "'remove'", ",", "'reverse'", ",", "'sort'", ")", ":", "# Reset everything for these", "self", ".", "adapter", ".", "notifyDataSetChanged", "(", ")" ]
Observe container events on the items list and update the adapter appropriately.
[ "Observe", "container", "events", "on", "the", "items", "list", "and", "update", "the", "adapter", "appropriately", "." ]
python
train
40.043478
apache/incubator-mxnet
python/mxnet/optimizer/optimizer.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/optimizer/optimizer.py#L266-L291
def update_multi_precision(self, index, weight, grad, state): """Updates the given parameter using the corresponding gradient and state. Mixed precision version. Parameters ---------- index : int The unique index of the parameter into the individual learning rates and weight decays. Learning rates and weight decay may be set via `set_lr_mult()` and `set_wd_mult()`, respectively. weight : NDArray The parameter to be updated. grad : NDArray The gradient of the objective with respect to this parameter. state : any obj The state returned by `create_state()`. """ if self.multi_precision and weight.dtype == numpy.float16: # Wrapper for mixed precision weight_master_copy = state[0] original_state = state[1] grad32 = grad.astype(numpy.float32) self.update(index, weight_master_copy, grad32, original_state) cast(weight_master_copy, dtype=weight.dtype, out=weight) else: self.update(index, weight, grad, state)
[ "def", "update_multi_precision", "(", "self", ",", "index", ",", "weight", ",", "grad", ",", "state", ")", ":", "if", "self", ".", "multi_precision", "and", "weight", ".", "dtype", "==", "numpy", ".", "float16", ":", "# Wrapper for mixed precision", "weight_master_copy", "=", "state", "[", "0", "]", "original_state", "=", "state", "[", "1", "]", "grad32", "=", "grad", ".", "astype", "(", "numpy", ".", "float32", ")", "self", ".", "update", "(", "index", ",", "weight_master_copy", ",", "grad32", ",", "original_state", ")", "cast", "(", "weight_master_copy", ",", "dtype", "=", "weight", ".", "dtype", ",", "out", "=", "weight", ")", "else", ":", "self", ".", "update", "(", "index", ",", "weight", ",", "grad", ",", "state", ")" ]
Updates the given parameter using the corresponding gradient and state. Mixed precision version. Parameters ---------- index : int The unique index of the parameter into the individual learning rates and weight decays. Learning rates and weight decay may be set via `set_lr_mult()` and `set_wd_mult()`, respectively. weight : NDArray The parameter to be updated. grad : NDArray The gradient of the objective with respect to this parameter. state : any obj The state returned by `create_state()`.
[ "Updates", "the", "given", "parameter", "using", "the", "corresponding", "gradient", "and", "state", ".", "Mixed", "precision", "version", "." ]
python
train
43.5
ClericPy/torequests
torequests/crawlers.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L282-L323
def reset_new_request(self): """Remove the non-sense args from the self.ignore, return self.new_request""" raw_url = self.new_request['url'] parsed_url = urlparse(raw_url) qsl = parse_qsl(parsed_url.query) new_url = self._join_url( parsed_url, [i for i in qsl if i not in self.ignore['qsl']]) self.new_request['url'] = new_url self.logger_function('ignore: %s' % self.ignore) for key in self.ignore['headers']: self.new_request['headers'].pop(key) if not self.new_request.get('headers'): self.new_request.pop('headers', None) if self.ignore['Cookie'] and 'Cookie' not in self.ignore['headers']: headers = self.new_request['headers'] headers = {key.title(): headers[key] for key in headers} if 'Cookie' in headers: cookies = SimpleCookie(headers['Cookie']) new_cookie = '; '.join([ i[1].OutputString() for i in cookies.items() if i[0] not in self.ignore['Cookie'] ]) self.new_request['headers']['Cookie'] = new_cookie if self.new_request['method'] == 'post': data = self.new_request.get('data') if data: if isinstance(data, dict): for key in self.ignore['form_data']: data.pop(key) if (not data) or self.ignore['total_data']: # not need data any more self.new_request.pop('data', None) if self.has_json_data and 'data' in self.new_request: json_data = json.loads(data.decode(self.encoding)) for key in self.ignore['json_data']: json_data.pop(key) self.new_request['data'] = json.dumps(json_data).encode( self.encoding) return self.new_request
[ "def", "reset_new_request", "(", "self", ")", ":", "raw_url", "=", "self", ".", "new_request", "[", "'url'", "]", "parsed_url", "=", "urlparse", "(", "raw_url", ")", "qsl", "=", "parse_qsl", "(", "parsed_url", ".", "query", ")", "new_url", "=", "self", ".", "_join_url", "(", "parsed_url", ",", "[", "i", "for", "i", "in", "qsl", "if", "i", "not", "in", "self", ".", "ignore", "[", "'qsl'", "]", "]", ")", "self", ".", "new_request", "[", "'url'", "]", "=", "new_url", "self", ".", "logger_function", "(", "'ignore: %s'", "%", "self", ".", "ignore", ")", "for", "key", "in", "self", ".", "ignore", "[", "'headers'", "]", ":", "self", ".", "new_request", "[", "'headers'", "]", ".", "pop", "(", "key", ")", "if", "not", "self", ".", "new_request", ".", "get", "(", "'headers'", ")", ":", "self", ".", "new_request", ".", "pop", "(", "'headers'", ",", "None", ")", "if", "self", ".", "ignore", "[", "'Cookie'", "]", "and", "'Cookie'", "not", "in", "self", ".", "ignore", "[", "'headers'", "]", ":", "headers", "=", "self", ".", "new_request", "[", "'headers'", "]", "headers", "=", "{", "key", ".", "title", "(", ")", ":", "headers", "[", "key", "]", "for", "key", "in", "headers", "}", "if", "'Cookie'", "in", "headers", ":", "cookies", "=", "SimpleCookie", "(", "headers", "[", "'Cookie'", "]", ")", "new_cookie", "=", "'; '", ".", "join", "(", "[", "i", "[", "1", "]", ".", "OutputString", "(", ")", "for", "i", "in", "cookies", ".", "items", "(", ")", "if", "i", "[", "0", "]", "not", "in", "self", ".", "ignore", "[", "'Cookie'", "]", "]", ")", "self", ".", "new_request", "[", "'headers'", "]", "[", "'Cookie'", "]", "=", "new_cookie", "if", "self", ".", "new_request", "[", "'method'", "]", "==", "'post'", ":", "data", "=", "self", ".", "new_request", ".", "get", "(", "'data'", ")", "if", "data", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "for", "key", "in", "self", ".", "ignore", "[", "'form_data'", "]", ":", "data", ".", "pop", "(", "key", ")", "if", "(", "not", "data", ")", "or", "self", ".", "ignore", "[", "'total_data'", "]", ":", "# not need data any more", "self", ".", "new_request", ".", "pop", "(", "'data'", ",", "None", ")", "if", "self", ".", "has_json_data", "and", "'data'", "in", "self", ".", "new_request", ":", "json_data", "=", "json", ".", "loads", "(", "data", ".", "decode", "(", "self", ".", "encoding", ")", ")", "for", "key", "in", "self", ".", "ignore", "[", "'json_data'", "]", ":", "json_data", ".", "pop", "(", "key", ")", "self", ".", "new_request", "[", "'data'", "]", "=", "json", ".", "dumps", "(", "json_data", ")", ".", "encode", "(", "self", ".", "encoding", ")", "return", "self", ".", "new_request" ]
Remove the non-sense args from the self.ignore, return self.new_request
[ "Remove", "the", "non", "-", "sense", "args", "from", "the", "self", ".", "ignore", "return", "self", ".", "new_request" ]
python
train
46.619048
pgmpy/pgmpy
ez_setup.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/ez_setup.py#L335-L354
def _parse_args(): """ Parse the command line for options """ parser = optparse.OptionParser() parser.add_option( '--user', dest='user_install', action='store_true', default=False, help='install in user site package (requires Python 2.6 or later)') parser.add_option( '--download-base', dest='download_base', metavar="URL", default=DEFAULT_URL, help='alternative URL from where to download the setuptools package') parser.add_option( '--insecure', dest='downloader_factory', action='store_const', const=lambda: download_file_insecure, default=get_best_downloader, help='Use internal, non-validating downloader' ) options, args = parser.parse_args() # positional arguments are ignored return options
[ "def", "_parse_args", "(", ")", ":", "parser", "=", "optparse", ".", "OptionParser", "(", ")", "parser", ".", "add_option", "(", "'--user'", ",", "dest", "=", "'user_install'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'install in user site package (requires Python 2.6 or later)'", ")", "parser", ".", "add_option", "(", "'--download-base'", ",", "dest", "=", "'download_base'", ",", "metavar", "=", "\"URL\"", ",", "default", "=", "DEFAULT_URL", ",", "help", "=", "'alternative URL from where to download the setuptools package'", ")", "parser", ".", "add_option", "(", "'--insecure'", ",", "dest", "=", "'downloader_factory'", ",", "action", "=", "'store_const'", ",", "const", "=", "lambda", ":", "download_file_insecure", ",", "default", "=", "get_best_downloader", ",", "help", "=", "'Use internal, non-validating downloader'", ")", "options", ",", "args", "=", "parser", ".", "parse_args", "(", ")", "# positional arguments are ignored", "return", "options" ]
Parse the command line for options
[ "Parse", "the", "command", "line", "for", "options" ]
python
train
39.35
s-m-i-t-a/railroad
railroad/actions.py
https://github.com/s-m-i-t-a/railroad/blob/ddb4afa018b8523b5d8c3a86e55388d1ea0ab37c/railroad/actions.py#L8-L32
def actions(acts, done): ''' Prepare actions pipeline. :param tuple acts: called functions :param function done: get result from actions :returns function: function that starts executio ''' def _intermediate(acc, action): result = action(acc['state']) values = concatv(acc['values'], [result['answer']]) return {'values': values, 'state': result['state']} def _actions(seed): init = {'values': [], 'state': seed} result = reduce(_intermediate, acts, init) keep = remove(lambda x: x is None, result['values']) return done(keep, result['state']) return _actions
[ "def", "actions", "(", "acts", ",", "done", ")", ":", "def", "_intermediate", "(", "acc", ",", "action", ")", ":", "result", "=", "action", "(", "acc", "[", "'state'", "]", ")", "values", "=", "concatv", "(", "acc", "[", "'values'", "]", ",", "[", "result", "[", "'answer'", "]", "]", ")", "return", "{", "'values'", ":", "values", ",", "'state'", ":", "result", "[", "'state'", "]", "}", "def", "_actions", "(", "seed", ")", ":", "init", "=", "{", "'values'", ":", "[", "]", ",", "'state'", ":", "seed", "}", "result", "=", "reduce", "(", "_intermediate", ",", "acts", ",", "init", ")", "keep", "=", "remove", "(", "lambda", "x", ":", "x", "is", "None", ",", "result", "[", "'values'", "]", ")", "return", "done", "(", "keep", ",", "result", "[", "'state'", "]", ")", "return", "_actions" ]
Prepare actions pipeline. :param tuple acts: called functions :param function done: get result from actions :returns function: function that starts executio
[ "Prepare", "actions", "pipeline", "." ]
python
train
25.4
tkem/cachetools
cachetools/keys.py
https://github.com/tkem/cachetools/blob/1b67cddadccb89993e9d2567bac22e57e2b2b373/cachetools/keys.py#L28-L34
def hashkey(*args, **kwargs): """Return a cache key for the specified hashable arguments.""" if kwargs: return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark)) else: return _HashedTuple(args)
[ "def", "hashkey", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "return", "_HashedTuple", "(", "args", "+", "sum", "(", "sorted", "(", "kwargs", ".", "items", "(", ")", ")", ",", "_kwmark", ")", ")", "else", ":", "return", "_HashedTuple", "(", "args", ")" ]
Return a cache key for the specified hashable arguments.
[ "Return", "a", "cache", "key", "for", "the", "specified", "hashable", "arguments", "." ]
python
train
31.857143
trailofbits/manticore
manticore/utils/helpers.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/utils/helpers.py#L44-L60
def get_taints(arg, taint=None): """ Helper to list an object taints. :param arg: a value or Expression :param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value. """ if not issymbolic(arg): return for arg_taint in arg.taint: if taint is not None: m = re.match(taint, arg_taint, re.DOTALL | re.IGNORECASE) if m: yield arg_taint else: yield arg_taint return
[ "def", "get_taints", "(", "arg", ",", "taint", "=", "None", ")", ":", "if", "not", "issymbolic", "(", "arg", ")", ":", "return", "for", "arg_taint", "in", "arg", ".", "taint", ":", "if", "taint", "is", "not", "None", ":", "m", "=", "re", ".", "match", "(", "taint", ",", "arg_taint", ",", "re", ".", "DOTALL", "|", "re", ".", "IGNORECASE", ")", "if", "m", ":", "yield", "arg_taint", "else", ":", "yield", "arg_taint", "return" ]
Helper to list an object taints. :param arg: a value or Expression :param taint: a regular expression matching a taint value (eg. 'IMPORTANT.*'). If None, this function checks for any taint value.
[ "Helper", "to", "list", "an", "object", "taints", ".", ":", "param", "arg", ":", "a", "value", "or", "Expression", ":", "param", "taint", ":", "a", "regular", "expression", "matching", "a", "taint", "value", "(", "eg", ".", "IMPORTANT", ".", "*", ")", ".", "If", "None", "this", "function", "checks", "for", "any", "taint", "value", "." ]
python
valid
30.588235
pmacosta/pmisc
pmisc/compat3.py
https://github.com/pmacosta/pmisc/blob/dd2bb32e59eee872f1ef2db2d9921a396ab9f50b/pmisc/compat3.py#L30-L38
def _readlines(fname, fpointer1=open, fpointer2=open): # pragma: no cover """Read all lines from file.""" # fpointer1, fpointer2 arguments to ease testing try: with fpointer1(fname, "r") as fobj: return fobj.readlines() except UnicodeDecodeError: # pragma: no cover with fpointer2(fname, "r", encoding="utf-8") as fobj: return fobj.readlines()
[ "def", "_readlines", "(", "fname", ",", "fpointer1", "=", "open", ",", "fpointer2", "=", "open", ")", ":", "# pragma: no cover", "# fpointer1, fpointer2 arguments to ease testing", "try", ":", "with", "fpointer1", "(", "fname", ",", "\"r\"", ")", "as", "fobj", ":", "return", "fobj", ".", "readlines", "(", ")", "except", "UnicodeDecodeError", ":", "# pragma: no cover", "with", "fpointer2", "(", "fname", ",", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "fobj", ":", "return", "fobj", ".", "readlines", "(", ")" ]
Read all lines from file.
[ "Read", "all", "lines", "from", "file", "." ]
python
train
43.666667
hsolbrig/PyShEx
pyshex/utils/schema_utils.py
https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/utils/schema_utils.py#L26-L36
def triple_reference_of(label: ShExJ.tripleExprLabel, cntxt: Context) -> Optional[ShExJ.tripleExpr]: """ Search for the label in a Schema """ te: Optional[ShExJ.tripleExpr] = None if cntxt.schema.start is not None: te = triple_in_shape(cntxt.schema.start, label, cntxt) if te is None: for shapeExpr in cntxt.schema.shapes: te = triple_in_shape(shapeExpr, label, cntxt) if te: break return te
[ "def", "triple_reference_of", "(", "label", ":", "ShExJ", ".", "tripleExprLabel", ",", "cntxt", ":", "Context", ")", "->", "Optional", "[", "ShExJ", ".", "tripleExpr", "]", ":", "te", ":", "Optional", "[", "ShExJ", ".", "tripleExpr", "]", "=", "None", "if", "cntxt", ".", "schema", ".", "start", "is", "not", "None", ":", "te", "=", "triple_in_shape", "(", "cntxt", ".", "schema", ".", "start", ",", "label", ",", "cntxt", ")", "if", "te", "is", "None", ":", "for", "shapeExpr", "in", "cntxt", ".", "schema", ".", "shapes", ":", "te", "=", "triple_in_shape", "(", "shapeExpr", ",", "label", ",", "cntxt", ")", "if", "te", ":", "break", "return", "te" ]
Search for the label in a Schema
[ "Search", "for", "the", "label", "in", "a", "Schema" ]
python
train
41.545455
saltstack/salt
salt/states/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/virt.py#L148-L184
def _virt_call(domain, function, section, comment, connection=None, username=None, password=None, **kwargs): ''' Helper to call the virt functions. Wildcards supported. :param domain: :param function: :param section: :param comment: :return: ''' ret = {'name': domain, 'changes': {}, 'result': True, 'comment': ''} targeted_domains = fnmatch.filter(__salt__['virt.list_domains'](), domain) changed_domains = list() ignored_domains = list() for targeted_domain in targeted_domains: try: response = __salt__['virt.{0}'.format(function)](targeted_domain, connection=connection, username=username, password=password, **kwargs) if isinstance(response, dict): response = response['name'] changed_domains.append({'domain': targeted_domain, function: response}) except libvirt.libvirtError as err: ignored_domains.append({'domain': targeted_domain, 'issue': six.text_type(err)}) if not changed_domains: ret['result'] = False ret['comment'] = 'No changes had happened' if ignored_domains: ret['changes'] = {'ignored': ignored_domains} else: ret['changes'] = {section: changed_domains} ret['comment'] = comment return ret
[ "def", "_virt_call", "(", "domain", ",", "function", ",", "section", ",", "comment", ",", "connection", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "domain", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "targeted_domains", "=", "fnmatch", ".", "filter", "(", "__salt__", "[", "'virt.list_domains'", "]", "(", ")", ",", "domain", ")", "changed_domains", "=", "list", "(", ")", "ignored_domains", "=", "list", "(", ")", "for", "targeted_domain", "in", "targeted_domains", ":", "try", ":", "response", "=", "__salt__", "[", "'virt.{0}'", ".", "format", "(", "function", ")", "]", "(", "targeted_domain", ",", "connection", "=", "connection", ",", "username", "=", "username", ",", "password", "=", "password", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "response", ",", "dict", ")", ":", "response", "=", "response", "[", "'name'", "]", "changed_domains", ".", "append", "(", "{", "'domain'", ":", "targeted_domain", ",", "function", ":", "response", "}", ")", "except", "libvirt", ".", "libvirtError", "as", "err", ":", "ignored_domains", ".", "append", "(", "{", "'domain'", ":", "targeted_domain", ",", "'issue'", ":", "six", ".", "text_type", "(", "err", ")", "}", ")", "if", "not", "changed_domains", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'No changes had happened'", "if", "ignored_domains", ":", "ret", "[", "'changes'", "]", "=", "{", "'ignored'", ":", "ignored_domains", "}", "else", ":", "ret", "[", "'changes'", "]", "=", "{", "section", ":", "changed_domains", "}", "ret", "[", "'comment'", "]", "=", "comment", "return", "ret" ]
Helper to call the virt functions. Wildcards supported. :param domain: :param function: :param section: :param comment: :return:
[ "Helper", "to", "call", "the", "virt", "functions", ".", "Wildcards", "supported", "." ]
python
train
41.405405
ultradns/python_rest_api_client
ultra_rest_client/ultra_rest_client.py
https://github.com/ultradns/python_rest_api_client/blob/e4095f28f5cb5e258b768c06ef7cf8b1915aa5ec/ultra_rest_client/ultra_rest_client.py#L311-L330
def create_rrset(self, zone_name, rtype, owner_name, ttl, rdata): """Creates a new RRSet in the specified zone. Arguments: zone_name -- The zone that will contain the new RRSet. The trailing dot is optional. rtype -- The type of the RRSet. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) ttl -- The TTL value for the RRSet. rdata -- The BIND data for the RRSet as a string. If there is a single resource record in the RRSet, you can pass in the single string. If there are multiple resource records in this RRSet, pass in a list of strings. """ if type(rdata) is not list: rdata = [rdata] rrset = {"ttl": ttl, "rdata": rdata} return self.rest_api_connection.post("/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name, json.dumps(rrset))
[ "def", "create_rrset", "(", "self", ",", "zone_name", ",", "rtype", ",", "owner_name", ",", "ttl", ",", "rdata", ")", ":", "if", "type", "(", "rdata", ")", "is", "not", "list", ":", "rdata", "=", "[", "rdata", "]", "rrset", "=", "{", "\"ttl\"", ":", "ttl", ",", "\"rdata\"", ":", "rdata", "}", "return", "self", ".", "rest_api_connection", ".", "post", "(", "\"/v1/zones/\"", "+", "zone_name", "+", "\"/rrsets/\"", "+", "rtype", "+", "\"/\"", "+", "owner_name", ",", "json", ".", "dumps", "(", "rrset", ")", ")" ]
Creates a new RRSet in the specified zone. Arguments: zone_name -- The zone that will contain the new RRSet. The trailing dot is optional. rtype -- The type of the RRSet. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) ttl -- The TTL value for the RRSet. rdata -- The BIND data for the RRSet as a string. If there is a single resource record in the RRSet, you can pass in the single string. If there are multiple resource records in this RRSet, pass in a list of strings.
[ "Creates", "a", "new", "RRSet", "in", "the", "specified", "zone", "." ]
python
train
59.85
kentik/kentikapi-py
kentikapi/v5/tagging.py
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L431-L446
def submit_populator_batch(self, column_name, batch): """Submit a populator batch Submit a populator batch as a series of HTTP requests in small chunks, returning the batch GUID, or raising exception on error.""" if not set(column_name).issubset(_allowedCustomDimensionChars): raise ValueError('Invalid custom dimension name "%s": must only contain letters, digits, and underscores' % column_name) if len(column_name) < 3 or len(column_name) > 20: raise ValueError('Invalid value "%s": must be between 3-20 characters' % column_name) url = '%s/api/v5/batch/customdimensions/%s/populators' % (self.base_url, column_name) resp_json_dict = self._submit_batch(url, batch) if resp_json_dict.get('error') is not None: raise RuntimeError('Error received from server: %s' % resp_json_dict['error']) return resp_json_dict['guid']
[ "def", "submit_populator_batch", "(", "self", ",", "column_name", ",", "batch", ")", ":", "if", "not", "set", "(", "column_name", ")", ".", "issubset", "(", "_allowedCustomDimensionChars", ")", ":", "raise", "ValueError", "(", "'Invalid custom dimension name \"%s\": must only contain letters, digits, and underscores'", "%", "column_name", ")", "if", "len", "(", "column_name", ")", "<", "3", "or", "len", "(", "column_name", ")", ">", "20", ":", "raise", "ValueError", "(", "'Invalid value \"%s\": must be between 3-20 characters'", "%", "column_name", ")", "url", "=", "'%s/api/v5/batch/customdimensions/%s/populators'", "%", "(", "self", ".", "base_url", ",", "column_name", ")", "resp_json_dict", "=", "self", ".", "_submit_batch", "(", "url", ",", "batch", ")", "if", "resp_json_dict", ".", "get", "(", "'error'", ")", "is", "not", "None", ":", "raise", "RuntimeError", "(", "'Error received from server: %s'", "%", "resp_json_dict", "[", "'error'", "]", ")", "return", "resp_json_dict", "[", "'guid'", "]" ]
Submit a populator batch Submit a populator batch as a series of HTTP requests in small chunks, returning the batch GUID, or raising exception on error.
[ "Submit", "a", "populator", "batch" ]
python
train
57.25
kwikteam/phy
phy/plot/utils.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/utils.py#L122-L147
def _get_texture(arr, default, n_items, from_bounds): """Prepare data to be uploaded as a texture. The from_bounds must be specified. """ if not hasattr(default, '__len__'): # pragma: no cover default = [default] n_cols = len(default) if arr is None: # pragma: no cover arr = np.tile(default, (n_items, 1)) assert arr.shape == (n_items, n_cols) # Convert to 3D texture. arr = arr[np.newaxis, ...].astype(np.float64) assert arr.shape == (1, n_items, n_cols) # NOTE: we need to cast the texture to [0., 1.] (float texture). # This is easy as soon as we assume that the signal bounds are in # [-1, 1]. assert len(from_bounds) == 2 m, M = map(float, from_bounds) assert np.all(arr >= m) assert np.all(arr <= M) arr = (arr - m) / (M - m) assert np.all(arr >= 0) assert np.all(arr <= 1.) return arr
[ "def", "_get_texture", "(", "arr", ",", "default", ",", "n_items", ",", "from_bounds", ")", ":", "if", "not", "hasattr", "(", "default", ",", "'__len__'", ")", ":", "# pragma: no cover", "default", "=", "[", "default", "]", "n_cols", "=", "len", "(", "default", ")", "if", "arr", "is", "None", ":", "# pragma: no cover", "arr", "=", "np", ".", "tile", "(", "default", ",", "(", "n_items", ",", "1", ")", ")", "assert", "arr", ".", "shape", "==", "(", "n_items", ",", "n_cols", ")", "# Convert to 3D texture.", "arr", "=", "arr", "[", "np", ".", "newaxis", ",", "...", "]", ".", "astype", "(", "np", ".", "float64", ")", "assert", "arr", ".", "shape", "==", "(", "1", ",", "n_items", ",", "n_cols", ")", "# NOTE: we need to cast the texture to [0., 1.] (float texture).", "# This is easy as soon as we assume that the signal bounds are in", "# [-1, 1].", "assert", "len", "(", "from_bounds", ")", "==", "2", "m", ",", "M", "=", "map", "(", "float", ",", "from_bounds", ")", "assert", "np", ".", "all", "(", "arr", ">=", "m", ")", "assert", "np", ".", "all", "(", "arr", "<=", "M", ")", "arr", "=", "(", "arr", "-", "m", ")", "/", "(", "M", "-", "m", ")", "assert", "np", ".", "all", "(", "arr", ">=", "0", ")", "assert", "np", ".", "all", "(", "arr", "<=", "1.", ")", "return", "arr" ]
Prepare data to be uploaded as a texture. The from_bounds must be specified.
[ "Prepare", "data", "to", "be", "uploaded", "as", "a", "texture", "." ]
python
train
33.5
angr/angr
angr/analyses/vfg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/vfg.py#L1815-L1835
def _ordered_node_addrs(self, function_address): """ For a given function, return all nodes in an optimal traversal order. If the function does not exist, return an empty list. :param int function_address: Address of the function. :return: A ordered list of the nodes. :rtype: list """ try: function = self.kb.functions[function_address] except KeyError: # the function does not exist return [ ] if function_address not in self._function_node_addrs: sorted_nodes = CFGUtils.quasi_topological_sort_nodes(function.graph) self._function_node_addrs[function_address] = [ n.addr for n in sorted_nodes ] return self._function_node_addrs[function_address]
[ "def", "_ordered_node_addrs", "(", "self", ",", "function_address", ")", ":", "try", ":", "function", "=", "self", ".", "kb", ".", "functions", "[", "function_address", "]", "except", "KeyError", ":", "# the function does not exist", "return", "[", "]", "if", "function_address", "not", "in", "self", ".", "_function_node_addrs", ":", "sorted_nodes", "=", "CFGUtils", ".", "quasi_topological_sort_nodes", "(", "function", ".", "graph", ")", "self", ".", "_function_node_addrs", "[", "function_address", "]", "=", "[", "n", ".", "addr", "for", "n", "in", "sorted_nodes", "]", "return", "self", ".", "_function_node_addrs", "[", "function_address", "]" ]
For a given function, return all nodes in an optimal traversal order. If the function does not exist, return an empty list. :param int function_address: Address of the function. :return: A ordered list of the nodes. :rtype: list
[ "For", "a", "given", "function", "return", "all", "nodes", "in", "an", "optimal", "traversal", "order", ".", "If", "the", "function", "does", "not", "exist", "return", "an", "empty", "list", "." ]
python
train
37.142857