repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
evhub/coconut
coconut/exceptions.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/exceptions.py#L87-L93
def message(self, message, item, extra): """Uses arguments to create the message.""" if item is not None: message += ": " + ascii(item) if extra is not None: message += " (" + str(extra) + ")" return message
[ "def", "message", "(", "self", ",", "message", ",", "item", ",", "extra", ")", ":", "if", "item", "is", "not", "None", ":", "message", "+=", "\": \"", "+", "ascii", "(", "item", ")", "if", "extra", "is", "not", "None", ":", "message", "+=", "\" (\"", "+", "str", "(", "extra", ")", "+", "\")\"", "return", "message" ]
Uses arguments to create the message.
[ "Uses", "arguments", "to", "create", "the", "message", "." ]
python
train
36.714286
usc-isi-i2/etk
etk/extractors/spacy_rule_extractor.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/spacy_rule_extractor.py#L638-L665
def _construct_punctuation_token(self, d: Dict, nlp) -> List[Dict]: """ Construct a shape token Args: d: Dict nlp Returns: List[Dict] """ result = [] if not d["token"]: this_token = {attrs.IS_PUNCT: True} elif len(d["token"]) == 1: this_token = {attrs.ORTH: d["token"][0]} else: global FLAG_ID punct_set = set(d["token"]) def is_selected_punct(x): return x in punct_set FLAG_DICT[FLAG_ID] = nlp.vocab.add_flag(is_selected_punct) this_token = {FLAG_DICT[FLAG_ID]: True} FLAG_ID += 1 result.append(this_token) result = self._add_common_constrain(result, d) return result
[ "def", "_construct_punctuation_token", "(", "self", ",", "d", ":", "Dict", ",", "nlp", ")", "->", "List", "[", "Dict", "]", ":", "result", "=", "[", "]", "if", "not", "d", "[", "\"token\"", "]", ":", "this_token", "=", "{", "attrs", ".", "IS_PUNCT", ":", "True", "}", "elif", "len", "(", "d", "[", "\"token\"", "]", ")", "==", "1", ":", "this_token", "=", "{", "attrs", ".", "ORTH", ":", "d", "[", "\"token\"", "]", "[", "0", "]", "}", "else", ":", "global", "FLAG_ID", "punct_set", "=", "set", "(", "d", "[", "\"token\"", "]", ")", "def", "is_selected_punct", "(", "x", ")", ":", "return", "x", "in", "punct_set", "FLAG_DICT", "[", "FLAG_ID", "]", "=", "nlp", ".", "vocab", ".", "add_flag", "(", "is_selected_punct", ")", "this_token", "=", "{", "FLAG_DICT", "[", "FLAG_ID", "]", ":", "True", "}", "FLAG_ID", "+=", "1", "result", ".", "append", "(", "this_token", ")", "result", "=", "self", ".", "_add_common_constrain", "(", "result", ",", "d", ")", "return", "result" ]
Construct a shape token Args: d: Dict nlp Returns: List[Dict]
[ "Construct", "a", "shape", "token", "Args", ":", "d", ":", "Dict", "nlp" ]
python
train
27.75
geertj/gruvi
vendor/txdbus/marshal.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/vendor/txdbus/marshal.py#L307-L354
def genCompleteTypes( compoundSig ): """ Generator function used to iterate over each complete, top-level type contained in in a signature. Ex:: "iii" => [ 'i', 'i', 'i' ] "i(ii)i" => [ 'i', '(ii)', 'i' ] "i(i(ii))i" => [ 'i', '(i(ii))', 'i' ] """ i = 0 start = 0 end = len(compoundSig) def find_end( idx, b, e ): depth = 1 while idx < end: subc = compoundSig[idx] if subc == b: depth += 1 elif subc == e: depth -= 1 if depth == 0: return idx idx += 1 while i < end: c = compoundSig[i] if c == '(': x = find_end(i+1, '(', ')') yield compoundSig[i:x+1] i = x elif c == '{': x = find_end(i+1, '{', '}') yield compoundSig[i:x+1] i = x elif c == 'a': start = i g = genCompleteTypes( compoundSig[i+1:] ) ct = six.next(g) i += len(ct) yield 'a' + ct else: yield c i += 1
[ "def", "genCompleteTypes", "(", "compoundSig", ")", ":", "i", "=", "0", "start", "=", "0", "end", "=", "len", "(", "compoundSig", ")", "def", "find_end", "(", "idx", ",", "b", ",", "e", ")", ":", "depth", "=", "1", "while", "idx", "<", "end", ":", "subc", "=", "compoundSig", "[", "idx", "]", "if", "subc", "==", "b", ":", "depth", "+=", "1", "elif", "subc", "==", "e", ":", "depth", "-=", "1", "if", "depth", "==", "0", ":", "return", "idx", "idx", "+=", "1", "while", "i", "<", "end", ":", "c", "=", "compoundSig", "[", "i", "]", "if", "c", "==", "'('", ":", "x", "=", "find_end", "(", "i", "+", "1", ",", "'('", ",", "')'", ")", "yield", "compoundSig", "[", "i", ":", "x", "+", "1", "]", "i", "=", "x", "elif", "c", "==", "'{'", ":", "x", "=", "find_end", "(", "i", "+", "1", ",", "'{'", ",", "'}'", ")", "yield", "compoundSig", "[", "i", ":", "x", "+", "1", "]", "i", "=", "x", "elif", "c", "==", "'a'", ":", "start", "=", "i", "g", "=", "genCompleteTypes", "(", "compoundSig", "[", "i", "+", "1", ":", "]", ")", "ct", "=", "six", ".", "next", "(", "g", ")", "i", "+=", "len", "(", "ct", ")", "yield", "'a'", "+", "ct", "else", ":", "yield", "c", "i", "+=", "1" ]
Generator function used to iterate over each complete, top-level type contained in in a signature. Ex:: "iii" => [ 'i', 'i', 'i' ] "i(ii)i" => [ 'i', '(ii)', 'i' ] "i(i(ii))i" => [ 'i', '(i(ii))', 'i' ]
[ "Generator", "function", "used", "to", "iterate", "over", "each", "complete", "top", "-", "level", "type", "contained", "in", "in", "a", "signature", ".", "Ex", "::", "iii", "=", ">", "[", "i", "i", "i", "]", "i", "(", "ii", ")", "i", "=", ">", "[", "i", "(", "ii", ")", "i", "]", "i", "(", "i", "(", "ii", "))", "i", "=", ">", "[", "i", "(", "i", "(", "ii", "))", "i", "]" ]
python
train
23.958333
google/pybadges
pybadges/precalculated_text_measurer.py
https://github.com/google/pybadges/blob/d42c8080adb21b81123ac9540c53127ed2fa1edc/pybadges/precalculated_text_measurer.py#L62-L70
def from_json(f: TextIO) -> 'PrecalculatedTextMeasurer': """Return a PrecalculatedTextMeasurer given a JSON stream. See precalculate_text.py for details on the required format. """ o = json.load(f) return PrecalculatedTextMeasurer(o['mean-character-length'], o['character-lengths'], o['kerning-pairs'])
[ "def", "from_json", "(", "f", ":", "TextIO", ")", "->", "'PrecalculatedTextMeasurer'", ":", "o", "=", "json", ".", "load", "(", "f", ")", "return", "PrecalculatedTextMeasurer", "(", "o", "[", "'mean-character-length'", "]", ",", "o", "[", "'character-lengths'", "]", ",", "o", "[", "'kerning-pairs'", "]", ")" ]
Return a PrecalculatedTextMeasurer given a JSON stream. See precalculate_text.py for details on the required format.
[ "Return", "a", "PrecalculatedTextMeasurer", "given", "a", "JSON", "stream", "." ]
python
test
46.333333
pyrogram/pyrogram
pyrogram/client/methods/messages/edit_message_media.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/methods/messages/edit_message_media.py#L36-L366
def edit_message_media( self, chat_id: Union[int, str], message_id: int, media: InputMedia, reply_markup: "pyrogram.InlineKeyboardMarkup" = None ) -> "pyrogram.Message": """Use this method to edit audio, document, photo, or video messages. If a message is a part of a message album, then it can be edited only to a photo or a video. Otherwise, message type can be changed arbitrarily. When inline message is edited, new file can't be uploaded. Use previously uploaded file via its file_id or specify a URL. On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). message_id (``int``): Message identifier in the chat specified in chat_id. media (:obj:`InputMedia`) One of the InputMedia objects describing an animation, audio, document, photo or video. reply_markup (:obj:`InlineKeyboardMarkup`, *optional*): An InlineKeyboardMarkup object. Returns: On success, the edited :obj:`Message <pyrogram.Message>` is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. """ style = self.html if media.parse_mode.lower() == "html" else self.markdown caption = media.caption if isinstance(media, InputMediaPhoto): if os.path.exists(media.media): media = self.send( functions.messages.UploadMedia( peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedPhoto( file=self.save_file(media.media) ) ) ) media = types.InputMediaPhoto( id=types.InputPhoto( id=media.photo.id, access_hash=media.photo.access_hash, file_reference=b"" ) ) elif media.media.startswith("http"): media = types.InputMediaPhotoExternal( url=media.media ) else: try: decoded = utils.decode(media.media) fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq" unpacked = struct.unpack(fmt, decoded) except (AssertionError, binascii.Error, struct.error): raise FileIdInvalid from None else: if unpacked[0] != 2: media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None) if media_type: raise FileIdInvalid("The file_id belongs to a {}".format(media_type)) else: raise FileIdInvalid("Unknown media type: {}".format(unpacked[0])) media = types.InputMediaPhoto( id=types.InputPhoto( id=unpacked[2], access_hash=unpacked[3], file_reference=b"" ) ) if isinstance(media, InputMediaVideo): if os.path.exists(media.media): media = self.send( functions.messages.UploadMedia( peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedDocument( mime_type=self.guess_mime_type(media.media) or "video/mp4", thumb=None if media.thumb is None else self.save_file(media.thumb), file=self.save_file(media.media), attributes=[ types.DocumentAttributeVideo( supports_streaming=media.supports_streaming or None, duration=media.duration, w=media.width, h=media.height ), types.DocumentAttributeFilename( file_name=os.path.basename(media.media) ) ] ) ) ) media = types.InputMediaDocument( id=types.InputDocument( id=media.document.id, access_hash=media.document.access_hash, file_reference=b"" ) ) elif media.media.startswith("http"): media = types.InputMediaDocumentExternal( url=media.media ) else: try: decoded = utils.decode(media.media) fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq" unpacked = struct.unpack(fmt, decoded) except (AssertionError, binascii.Error, struct.error): raise FileIdInvalid from None else: if unpacked[0] != 4: media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None) if media_type: raise FileIdInvalid("The file_id belongs to a {}".format(media_type)) else: raise FileIdInvalid("Unknown media type: {}".format(unpacked[0])) media = types.InputMediaDocument( id=types.InputDocument( id=unpacked[2], access_hash=unpacked[3], file_reference=b"" ) ) if isinstance(media, InputMediaAudio): if os.path.exists(media.media): media = self.send( functions.messages.UploadMedia( peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedDocument( mime_type=self.guess_mime_type(media.media) or "audio/mpeg", thumb=None if media.thumb is None else self.save_file(media.thumb), file=self.save_file(media.media), attributes=[ types.DocumentAttributeAudio( duration=media.duration, performer=media.performer, title=media.title ), types.DocumentAttributeFilename( file_name=os.path.basename(media.media) ) ] ) ) ) media = types.InputMediaDocument( id=types.InputDocument( id=media.document.id, access_hash=media.document.access_hash, file_reference=b"" ) ) elif media.media.startswith("http"): media = types.InputMediaDocumentExternal( url=media.media ) else: try: decoded = utils.decode(media.media) fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq" unpacked = struct.unpack(fmt, decoded) except (AssertionError, binascii.Error, struct.error): raise FileIdInvalid from None else: if unpacked[0] != 9: media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None) if media_type: raise FileIdInvalid("The file_id belongs to a {}".format(media_type)) else: raise FileIdInvalid("Unknown media type: {}".format(unpacked[0])) media = types.InputMediaDocument( id=types.InputDocument( id=unpacked[2], access_hash=unpacked[3], file_reference=b"" ) ) if isinstance(media, InputMediaAnimation): if os.path.exists(media.media): media = self.send( functions.messages.UploadMedia( peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedDocument( mime_type=self.guess_mime_type(media.media) or "video/mp4", thumb=None if media.thumb is None else self.save_file(media.thumb), file=self.save_file(media.media), attributes=[ types.DocumentAttributeVideo( supports_streaming=True, duration=media.duration, w=media.width, h=media.height ), types.DocumentAttributeFilename( file_name=os.path.basename(media.media) ), types.DocumentAttributeAnimated() ] ) ) ) media = types.InputMediaDocument( id=types.InputDocument( id=media.document.id, access_hash=media.document.access_hash, file_reference=b"" ) ) elif media.media.startswith("http"): media = types.InputMediaDocumentExternal( url=media.media ) else: try: decoded = utils.decode(media.media) fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq" unpacked = struct.unpack(fmt, decoded) except (AssertionError, binascii.Error, struct.error): raise FileIdInvalid from None else: if unpacked[0] != 10: media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None) if media_type: raise FileIdInvalid("The file_id belongs to a {}".format(media_type)) else: raise FileIdInvalid("Unknown media type: {}".format(unpacked[0])) media = types.InputMediaDocument( id=types.InputDocument( id=unpacked[2], access_hash=unpacked[3], file_reference=b"" ) ) if isinstance(media, InputMediaDocument): if os.path.exists(media.media): media = self.send( functions.messages.UploadMedia( peer=self.resolve_peer(chat_id), media=types.InputMediaUploadedDocument( mime_type=self.guess_mime_type(media.media) or "application/zip", thumb=None if media.thumb is None else self.save_file(media.thumb), file=self.save_file(media.media), attributes=[ types.DocumentAttributeFilename( file_name=os.path.basename(media.media) ) ] ) ) ) media = types.InputMediaDocument( id=types.InputDocument( id=media.document.id, access_hash=media.document.access_hash, file_reference=b"" ) ) elif media.media.startswith("http"): media = types.InputMediaDocumentExternal( url=media.media ) else: try: decoded = utils.decode(media.media) fmt = "<iiqqqqi" if len(decoded) > 24 else "<iiqq" unpacked = struct.unpack(fmt, decoded) except (AssertionError, binascii.Error, struct.error): raise FileIdInvalid from None else: if unpacked[0] not in (5, 10): media_type = BaseClient.MEDIA_TYPE_ID.get(unpacked[0], None) if media_type: raise FileIdInvalid("The file_id belongs to a {}".format(media_type)) else: raise FileIdInvalid("Unknown media type: {}".format(unpacked[0])) media = types.InputMediaDocument( id=types.InputDocument( id=unpacked[2], access_hash=unpacked[3], file_reference=b"" ) ) r = self.send( functions.messages.EditMessage( peer=self.resolve_peer(chat_id), id=message_id, reply_markup=reply_markup.write() if reply_markup else None, media=media, **style.parse(caption) ) ) for i in r.updates: if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)): return pyrogram.Message._parse( self, i.message, {i.id: i for i in r.users}, {i.id: i for i in r.chats} )
[ "def", "edit_message_media", "(", "self", ",", "chat_id", ":", "Union", "[", "int", ",", "str", "]", ",", "message_id", ":", "int", ",", "media", ":", "InputMedia", ",", "reply_markup", ":", "\"pyrogram.InlineKeyboardMarkup\"", "=", "None", ")", "->", "\"pyrogram.Message\"", ":", "style", "=", "self", ".", "html", "if", "media", ".", "parse_mode", ".", "lower", "(", ")", "==", "\"html\"", "else", "self", ".", "markdown", "caption", "=", "media", ".", "caption", "if", "isinstance", "(", "media", ",", "InputMediaPhoto", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "media", ".", "media", ")", ":", "media", "=", "self", ".", "send", "(", "functions", ".", "messages", ".", "UploadMedia", "(", "peer", "=", "self", ".", "resolve_peer", "(", "chat_id", ")", ",", "media", "=", "types", ".", "InputMediaUploadedPhoto", "(", "file", "=", "self", ".", "save_file", "(", "media", ".", "media", ")", ")", ")", ")", "media", "=", "types", ".", "InputMediaPhoto", "(", "id", "=", "types", ".", "InputPhoto", "(", "id", "=", "media", ".", "photo", ".", "id", ",", "access_hash", "=", "media", ".", "photo", ".", "access_hash", ",", "file_reference", "=", "b\"\"", ")", ")", "elif", "media", ".", "media", ".", "startswith", "(", "\"http\"", ")", ":", "media", "=", "types", ".", "InputMediaPhotoExternal", "(", "url", "=", "media", ".", "media", ")", "else", ":", "try", ":", "decoded", "=", "utils", ".", "decode", "(", "media", ".", "media", ")", "fmt", "=", "\"<iiqqqqi\"", "if", "len", "(", "decoded", ")", ">", "24", "else", "\"<iiqq\"", "unpacked", "=", "struct", ".", "unpack", "(", "fmt", ",", "decoded", ")", "except", "(", "AssertionError", ",", "binascii", ".", "Error", ",", "struct", ".", "error", ")", ":", "raise", "FileIdInvalid", "from", "None", "else", ":", "if", "unpacked", "[", "0", "]", "!=", "2", ":", "media_type", "=", "BaseClient", ".", "MEDIA_TYPE_ID", ".", "get", "(", "unpacked", "[", "0", "]", ",", "None", ")", "if", "media_type", ":", "raise", "FileIdInvalid", "(", "\"The file_id belongs to a {}\"", ".", "format", "(", "media_type", ")", ")", "else", ":", "raise", "FileIdInvalid", "(", "\"Unknown media type: {}\"", ".", "format", "(", "unpacked", "[", "0", "]", ")", ")", "media", "=", "types", ".", "InputMediaPhoto", "(", "id", "=", "types", ".", "InputPhoto", "(", "id", "=", "unpacked", "[", "2", "]", ",", "access_hash", "=", "unpacked", "[", "3", "]", ",", "file_reference", "=", "b\"\"", ")", ")", "if", "isinstance", "(", "media", ",", "InputMediaVideo", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "media", ".", "media", ")", ":", "media", "=", "self", ".", "send", "(", "functions", ".", "messages", ".", "UploadMedia", "(", "peer", "=", "self", ".", "resolve_peer", "(", "chat_id", ")", ",", "media", "=", "types", ".", "InputMediaUploadedDocument", "(", "mime_type", "=", "self", ".", "guess_mime_type", "(", "media", ".", "media", ")", "or", "\"video/mp4\"", ",", "thumb", "=", "None", "if", "media", ".", "thumb", "is", "None", "else", "self", ".", "save_file", "(", "media", ".", "thumb", ")", ",", "file", "=", "self", ".", "save_file", "(", "media", ".", "media", ")", ",", "attributes", "=", "[", "types", ".", "DocumentAttributeVideo", "(", "supports_streaming", "=", "media", ".", "supports_streaming", "or", "None", ",", "duration", "=", "media", ".", "duration", ",", "w", "=", "media", ".", "width", ",", "h", "=", "media", ".", "height", ")", ",", "types", ".", "DocumentAttributeFilename", "(", "file_name", "=", "os", ".", "path", ".", "basename", "(", "media", ".", "media", ")", ")", "]", ")", ")", ")", "media", "=", "types", ".", "InputMediaDocument", "(", "id", "=", "types", ".", "InputDocument", "(", "id", "=", "media", ".", "document", ".", "id", ",", "access_hash", "=", "media", ".", "document", ".", "access_hash", ",", "file_reference", "=", "b\"\"", ")", ")", "elif", "media", ".", "media", ".", "startswith", "(", "\"http\"", ")", ":", "media", "=", "types", ".", "InputMediaDocumentExternal", "(", "url", "=", "media", ".", "media", ")", "else", ":", "try", ":", "decoded", "=", "utils", ".", "decode", "(", "media", ".", "media", ")", "fmt", "=", "\"<iiqqqqi\"", "if", "len", "(", "decoded", ")", ">", "24", "else", "\"<iiqq\"", "unpacked", "=", "struct", ".", "unpack", "(", "fmt", ",", "decoded", ")", "except", "(", "AssertionError", ",", "binascii", ".", "Error", ",", "struct", ".", "error", ")", ":", "raise", "FileIdInvalid", "from", "None", "else", ":", "if", "unpacked", "[", "0", "]", "!=", "4", ":", "media_type", "=", "BaseClient", ".", "MEDIA_TYPE_ID", ".", "get", "(", "unpacked", "[", "0", "]", ",", "None", ")", "if", "media_type", ":", "raise", "FileIdInvalid", "(", "\"The file_id belongs to a {}\"", ".", "format", "(", "media_type", ")", ")", "else", ":", "raise", "FileIdInvalid", "(", "\"Unknown media type: {}\"", ".", "format", "(", "unpacked", "[", "0", "]", ")", ")", "media", "=", "types", ".", "InputMediaDocument", "(", "id", "=", "types", ".", "InputDocument", "(", "id", "=", "unpacked", "[", "2", "]", ",", "access_hash", "=", "unpacked", "[", "3", "]", ",", "file_reference", "=", "b\"\"", ")", ")", "if", "isinstance", "(", "media", ",", "InputMediaAudio", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "media", ".", "media", ")", ":", "media", "=", "self", ".", "send", "(", "functions", ".", "messages", ".", "UploadMedia", "(", "peer", "=", "self", ".", "resolve_peer", "(", "chat_id", ")", ",", "media", "=", "types", ".", "InputMediaUploadedDocument", "(", "mime_type", "=", "self", ".", "guess_mime_type", "(", "media", ".", "media", ")", "or", "\"audio/mpeg\"", ",", "thumb", "=", "None", "if", "media", ".", "thumb", "is", "None", "else", "self", ".", "save_file", "(", "media", ".", "thumb", ")", ",", "file", "=", "self", ".", "save_file", "(", "media", ".", "media", ")", ",", "attributes", "=", "[", "types", ".", "DocumentAttributeAudio", "(", "duration", "=", "media", ".", "duration", ",", "performer", "=", "media", ".", "performer", ",", "title", "=", "media", ".", "title", ")", ",", "types", ".", "DocumentAttributeFilename", "(", "file_name", "=", "os", ".", "path", ".", "basename", "(", "media", ".", "media", ")", ")", "]", ")", ")", ")", "media", "=", "types", ".", "InputMediaDocument", "(", "id", "=", "types", ".", "InputDocument", "(", "id", "=", "media", ".", "document", ".", "id", ",", "access_hash", "=", "media", ".", "document", ".", "access_hash", ",", "file_reference", "=", "b\"\"", ")", ")", "elif", "media", ".", "media", ".", "startswith", "(", "\"http\"", ")", ":", "media", "=", "types", ".", "InputMediaDocumentExternal", "(", "url", "=", "media", ".", "media", ")", "else", ":", "try", ":", "decoded", "=", "utils", ".", "decode", "(", "media", ".", "media", ")", "fmt", "=", "\"<iiqqqqi\"", "if", "len", "(", "decoded", ")", ">", "24", "else", "\"<iiqq\"", "unpacked", "=", "struct", ".", "unpack", "(", "fmt", ",", "decoded", ")", "except", "(", "AssertionError", ",", "binascii", ".", "Error", ",", "struct", ".", "error", ")", ":", "raise", "FileIdInvalid", "from", "None", "else", ":", "if", "unpacked", "[", "0", "]", "!=", "9", ":", "media_type", "=", "BaseClient", ".", "MEDIA_TYPE_ID", ".", "get", "(", "unpacked", "[", "0", "]", ",", "None", ")", "if", "media_type", ":", "raise", "FileIdInvalid", "(", "\"The file_id belongs to a {}\"", ".", "format", "(", "media_type", ")", ")", "else", ":", "raise", "FileIdInvalid", "(", "\"Unknown media type: {}\"", ".", "format", "(", "unpacked", "[", "0", "]", ")", ")", "media", "=", "types", ".", "InputMediaDocument", "(", "id", "=", "types", ".", "InputDocument", "(", "id", "=", "unpacked", "[", "2", "]", ",", "access_hash", "=", "unpacked", "[", "3", "]", ",", "file_reference", "=", "b\"\"", ")", ")", "if", "isinstance", "(", "media", ",", "InputMediaAnimation", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "media", ".", "media", ")", ":", "media", "=", "self", ".", "send", "(", "functions", ".", "messages", ".", "UploadMedia", "(", "peer", "=", "self", ".", "resolve_peer", "(", "chat_id", ")", ",", "media", "=", "types", ".", "InputMediaUploadedDocument", "(", "mime_type", "=", "self", ".", "guess_mime_type", "(", "media", ".", "media", ")", "or", "\"video/mp4\"", ",", "thumb", "=", "None", "if", "media", ".", "thumb", "is", "None", "else", "self", ".", "save_file", "(", "media", ".", "thumb", ")", ",", "file", "=", "self", ".", "save_file", "(", "media", ".", "media", ")", ",", "attributes", "=", "[", "types", ".", "DocumentAttributeVideo", "(", "supports_streaming", "=", "True", ",", "duration", "=", "media", ".", "duration", ",", "w", "=", "media", ".", "width", ",", "h", "=", "media", ".", "height", ")", ",", "types", ".", "DocumentAttributeFilename", "(", "file_name", "=", "os", ".", "path", ".", "basename", "(", "media", ".", "media", ")", ")", ",", "types", ".", "DocumentAttributeAnimated", "(", ")", "]", ")", ")", ")", "media", "=", "types", ".", "InputMediaDocument", "(", "id", "=", "types", ".", "InputDocument", "(", "id", "=", "media", ".", "document", ".", "id", ",", "access_hash", "=", "media", ".", "document", ".", "access_hash", ",", "file_reference", "=", "b\"\"", ")", ")", "elif", "media", ".", "media", ".", "startswith", "(", "\"http\"", ")", ":", "media", "=", "types", ".", "InputMediaDocumentExternal", "(", "url", "=", "media", ".", "media", ")", "else", ":", "try", ":", "decoded", "=", "utils", ".", "decode", "(", "media", ".", "media", ")", "fmt", "=", "\"<iiqqqqi\"", "if", "len", "(", "decoded", ")", ">", "24", "else", "\"<iiqq\"", "unpacked", "=", "struct", ".", "unpack", "(", "fmt", ",", "decoded", ")", "except", "(", "AssertionError", ",", "binascii", ".", "Error", ",", "struct", ".", "error", ")", ":", "raise", "FileIdInvalid", "from", "None", "else", ":", "if", "unpacked", "[", "0", "]", "!=", "10", ":", "media_type", "=", "BaseClient", ".", "MEDIA_TYPE_ID", ".", "get", "(", "unpacked", "[", "0", "]", ",", "None", ")", "if", "media_type", ":", "raise", "FileIdInvalid", "(", "\"The file_id belongs to a {}\"", ".", "format", "(", "media_type", ")", ")", "else", ":", "raise", "FileIdInvalid", "(", "\"Unknown media type: {}\"", ".", "format", "(", "unpacked", "[", "0", "]", ")", ")", "media", "=", "types", ".", "InputMediaDocument", "(", "id", "=", "types", ".", "InputDocument", "(", "id", "=", "unpacked", "[", "2", "]", ",", "access_hash", "=", "unpacked", "[", "3", "]", ",", "file_reference", "=", "b\"\"", ")", ")", "if", "isinstance", "(", "media", ",", "InputMediaDocument", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "media", ".", "media", ")", ":", "media", "=", "self", ".", "send", "(", "functions", ".", "messages", ".", "UploadMedia", "(", "peer", "=", "self", ".", "resolve_peer", "(", "chat_id", ")", ",", "media", "=", "types", ".", "InputMediaUploadedDocument", "(", "mime_type", "=", "self", ".", "guess_mime_type", "(", "media", ".", "media", ")", "or", "\"application/zip\"", ",", "thumb", "=", "None", "if", "media", ".", "thumb", "is", "None", "else", "self", ".", "save_file", "(", "media", ".", "thumb", ")", ",", "file", "=", "self", ".", "save_file", "(", "media", ".", "media", ")", ",", "attributes", "=", "[", "types", ".", "DocumentAttributeFilename", "(", "file_name", "=", "os", ".", "path", ".", "basename", "(", "media", ".", "media", ")", ")", "]", ")", ")", ")", "media", "=", "types", ".", "InputMediaDocument", "(", "id", "=", "types", ".", "InputDocument", "(", "id", "=", "media", ".", "document", ".", "id", ",", "access_hash", "=", "media", ".", "document", ".", "access_hash", ",", "file_reference", "=", "b\"\"", ")", ")", "elif", "media", ".", "media", ".", "startswith", "(", "\"http\"", ")", ":", "media", "=", "types", ".", "InputMediaDocumentExternal", "(", "url", "=", "media", ".", "media", ")", "else", ":", "try", ":", "decoded", "=", "utils", ".", "decode", "(", "media", ".", "media", ")", "fmt", "=", "\"<iiqqqqi\"", "if", "len", "(", "decoded", ")", ">", "24", "else", "\"<iiqq\"", "unpacked", "=", "struct", ".", "unpack", "(", "fmt", ",", "decoded", ")", "except", "(", "AssertionError", ",", "binascii", ".", "Error", ",", "struct", ".", "error", ")", ":", "raise", "FileIdInvalid", "from", "None", "else", ":", "if", "unpacked", "[", "0", "]", "not", "in", "(", "5", ",", "10", ")", ":", "media_type", "=", "BaseClient", ".", "MEDIA_TYPE_ID", ".", "get", "(", "unpacked", "[", "0", "]", ",", "None", ")", "if", "media_type", ":", "raise", "FileIdInvalid", "(", "\"The file_id belongs to a {}\"", ".", "format", "(", "media_type", ")", ")", "else", ":", "raise", "FileIdInvalid", "(", "\"Unknown media type: {}\"", ".", "format", "(", "unpacked", "[", "0", "]", ")", ")", "media", "=", "types", ".", "InputMediaDocument", "(", "id", "=", "types", ".", "InputDocument", "(", "id", "=", "unpacked", "[", "2", "]", ",", "access_hash", "=", "unpacked", "[", "3", "]", ",", "file_reference", "=", "b\"\"", ")", ")", "r", "=", "self", ".", "send", "(", "functions", ".", "messages", ".", "EditMessage", "(", "peer", "=", "self", ".", "resolve_peer", "(", "chat_id", ")", ",", "id", "=", "message_id", ",", "reply_markup", "=", "reply_markup", ".", "write", "(", ")", "if", "reply_markup", "else", "None", ",", "media", "=", "media", ",", "*", "*", "style", ".", "parse", "(", "caption", ")", ")", ")", "for", "i", "in", "r", ".", "updates", ":", "if", "isinstance", "(", "i", ",", "(", "types", ".", "UpdateEditMessage", ",", "types", ".", "UpdateEditChannelMessage", ")", ")", ":", "return", "pyrogram", ".", "Message", ".", "_parse", "(", "self", ",", "i", ".", "message", ",", "{", "i", ".", "id", ":", "i", "for", "i", "in", "r", ".", "users", "}", ",", "{", "i", ".", "id", ":", "i", "for", "i", "in", "r", ".", "chats", "}", ")" ]
Use this method to edit audio, document, photo, or video messages. If a message is a part of a message album, then it can be edited only to a photo or a video. Otherwise, message type can be changed arbitrarily. When inline message is edited, new file can't be uploaded. Use previously uploaded file via its file_id or specify a URL. On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. For your personal cloud (Saved Messages) you can simply use "me" or "self". For a contact that exists in your Telegram address book you can use his phone number (str). message_id (``int``): Message identifier in the chat specified in chat_id. media (:obj:`InputMedia`) One of the InputMedia objects describing an animation, audio, document, photo or video. reply_markup (:obj:`InlineKeyboardMarkup`, *optional*): An InlineKeyboardMarkup object. Returns: On success, the edited :obj:`Message <pyrogram.Message>` is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
[ "Use", "this", "method", "to", "edit", "audio", "document", "photo", "or", "video", "messages", "." ]
python
train
43.522659
happyleavesaoc/python-limitlessled
limitlessled/group/__init__.py
https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/__init__.py#L77-L86
def on(self, state): """ Turn on or off. :param state: True (on) or False (off). """ self._on = state cmd = self.command_set.off() if state: cmd = self.command_set.on() self.send(cmd)
[ "def", "on", "(", "self", ",", "state", ")", ":", "self", ".", "_on", "=", "state", "cmd", "=", "self", ".", "command_set", ".", "off", "(", ")", "if", "state", ":", "cmd", "=", "self", ".", "command_set", ".", "on", "(", ")", "self", ".", "send", "(", "cmd", ")" ]
Turn on or off. :param state: True (on) or False (off).
[ "Turn", "on", "or", "off", "." ]
python
train
24.3
kytos/python-openflow
pyof/foundation/base.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/foundation/base.py#L387-L411
def replace_pyof_version(module_fullname, version): """Replace the OF Version of a module fullname. Get's a module name (eg. 'pyof.v0x01.common.header') and returns it on a new 'version' (eg. 'pyof.v0x02.common.header'). Args: module_fullname (str): The fullname of the module (e.g.: pyof.v0x01.common.header) version (str): The version to be 'inserted' on the module fullname. Returns: str: module fullname The new module fullname, with the replaced version, on the format "pyof.v0x01.common.header". If the requested version is the same as the one of the module_fullname or if the module_fullname is not a 'OF version' specific module, returns None. """ module_version = MetaStruct.get_pyof_version(module_fullname) if not module_version or module_version == version: return None return module_fullname.replace(module_version, version)
[ "def", "replace_pyof_version", "(", "module_fullname", ",", "version", ")", ":", "module_version", "=", "MetaStruct", ".", "get_pyof_version", "(", "module_fullname", ")", "if", "not", "module_version", "or", "module_version", "==", "version", ":", "return", "None", "return", "module_fullname", ".", "replace", "(", "module_version", ",", "version", ")" ]
Replace the OF Version of a module fullname. Get's a module name (eg. 'pyof.v0x01.common.header') and returns it on a new 'version' (eg. 'pyof.v0x02.common.header'). Args: module_fullname (str): The fullname of the module (e.g.: pyof.v0x01.common.header) version (str): The version to be 'inserted' on the module fullname. Returns: str: module fullname The new module fullname, with the replaced version, on the format "pyof.v0x01.common.header". If the requested version is the same as the one of the module_fullname or if the module_fullname is not a 'OF version' specific module, returns None.
[ "Replace", "the", "OF", "Version", "of", "a", "module", "fullname", "." ]
python
train
42.32
rmed/pyemtmad
pyemtmad/api/parking.py
https://github.com/rmed/pyemtmad/blob/c21c42d0c7b50035dfed29540d7e64ab67833728/pyemtmad/api/parking.py#L230-L254
def list_features(self, **kwargs): """Obtain a list of parkings. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Parking]), or message string in case of error. """ # Endpoint parameters params = { 'language': util.language_code(kwargs.get('lang')), 'publicData': True } # Request result = self.make_request('list_features', {}, **params) if not util.check_result(result): return False, result.get('message', 'UNKNOWN ERROR') # Parse values = util.response_list(result, 'Data') return True, [emtype.ParkingFeature(**a) for a in values]
[ "def", "list_features", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Endpoint parameters", "params", "=", "{", "'language'", ":", "util", ".", "language_code", "(", "kwargs", ".", "get", "(", "'lang'", ")", ")", ",", "'publicData'", ":", "True", "}", "# Request", "result", "=", "self", ".", "make_request", "(", "'list_features'", ",", "{", "}", ",", "*", "*", "params", ")", "if", "not", "util", ".", "check_result", "(", "result", ")", ":", "return", "False", ",", "result", ".", "get", "(", "'message'", ",", "'UNKNOWN ERROR'", ")", "# Parse", "values", "=", "util", ".", "response_list", "(", "result", ",", "'Data'", ")", "return", "True", ",", "[", "emtype", ".", "ParkingFeature", "(", "*", "*", "a", ")", "for", "a", "in", "values", "]" ]
Obtain a list of parkings. Args: lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Parking]), or message string in case of error.
[ "Obtain", "a", "list", "of", "parkings", "." ]
python
train
29.68
SoftwareDefinedBuildings/XBOS
apps/Data_quality_analysis/Wrapper.py
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Wrapper.py#L439-L543
def clean_data(self, data, rename_col=None, drop_col=None, resample=True, freq='h', resampler='mean', interpolate=True, limit=1, method='linear', remove_na=True, remove_na_how='any', remove_outliers=True, sd_val=3, remove_out_of_bounds=True, low_bound=0, high_bound=float('inf'), save_file=True): """ Cleans dataframe according to user specifications and stores result in self.cleaned_data. Parameters ---------- data : pd.DataFrame() Dataframe to be cleaned. rename_col : list(str) List of new column names. drop_col : list(str) Columns to be dropped. resample : bool Indicates whether to resample data or not. freq : str Resampling frequency i.e. d, h, 15T... resampler : str Resampling type i.e. mean, max. interpolate : bool Indicates whether to interpolate data or not. limit : int Interpolation limit. method : str Interpolation method. remove_na : bool Indicates whether to remove NAs or not. remove_na_how : str Specificies how to remove NA i.e. all, any... remove_outliers : bool Indicates whether to remove outliers or not. sd_val : int Standard Deviation Value (specifices how many SDs away is a point considered an outlier) remove_out_of_bounds : bool Indicates whether to remove out of bounds datapoints or not. low_bound : int Low bound of the data. high_bound : int High bound of the data. save_file : bool Specifies whether to save file or not. Defaults to True. Returns ------- pd.DataFrame() Dataframe containing cleaned data. """ # Check to ensure data is a pandas dataframe if not isinstance(data, pd.DataFrame): raise TypeError('data has to be a pandas dataframe.') # Create instance and clean the data clean_data_obj = Clean_Data(data) clean_data_obj.clean_data(resample=resample, freq=freq, resampler=resampler, interpolate=interpolate, limit=limit, method=method, remove_na=remove_na, remove_na_how=remove_na_how, remove_outliers=remove_outliers, sd_val=sd_val, remove_out_of_bounds=remove_out_of_bounds, low_bound=low_bound, high_bound=high_bound) # Correlation plot # fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data) # fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png') if rename_col: # Rename columns of dataframe clean_data_obj.rename_columns(rename_col) if drop_col: # Drop columns of dataframe clean_data_obj.drop_columns(drop_col) # Store cleaned data in wrapper class self.cleaned_data = clean_data_obj.cleaned_data # Logging self.result['Clean'] = { 'Rename Col': rename_col, 'Drop Col': drop_col, 'Resample': resample, 'Frequency': freq, 'Resampler': resampler, 'Interpolate': interpolate, 'Limit': limit, 'Method': method, 'Remove NA': remove_na, 'Remove NA How': remove_na_how, 'Remove Outliers': remove_outliers, 'SD Val': sd_val, 'Remove Out of Bounds': remove_out_of_bounds, 'Low Bound': low_bound, 'High Bound': str(high_bound) if high_bound == float('inf') else high_bound, 'Save File': save_file } if save_file: f = self.results_folder_name + '/cleaned_data-' + str(self.get_global_count()) + '.csv' self.cleaned_data.to_csv(f) self.result['Clean']['Saved File'] = f else: self.result['Clean']['Saved File'] = '' return self.cleaned_data
[ "def", "clean_data", "(", "self", ",", "data", ",", "rename_col", "=", "None", ",", "drop_col", "=", "None", ",", "resample", "=", "True", ",", "freq", "=", "'h'", ",", "resampler", "=", "'mean'", ",", "interpolate", "=", "True", ",", "limit", "=", "1", ",", "method", "=", "'linear'", ",", "remove_na", "=", "True", ",", "remove_na_how", "=", "'any'", ",", "remove_outliers", "=", "True", ",", "sd_val", "=", "3", ",", "remove_out_of_bounds", "=", "True", ",", "low_bound", "=", "0", ",", "high_bound", "=", "float", "(", "'inf'", ")", ",", "save_file", "=", "True", ")", ":", "# Check to ensure data is a pandas dataframe", "if", "not", "isinstance", "(", "data", ",", "pd", ".", "DataFrame", ")", ":", "raise", "TypeError", "(", "'data has to be a pandas dataframe.'", ")", "# Create instance and clean the data", "clean_data_obj", "=", "Clean_Data", "(", "data", ")", "clean_data_obj", ".", "clean_data", "(", "resample", "=", "resample", ",", "freq", "=", "freq", ",", "resampler", "=", "resampler", ",", "interpolate", "=", "interpolate", ",", "limit", "=", "limit", ",", "method", "=", "method", ",", "remove_na", "=", "remove_na", ",", "remove_na_how", "=", "remove_na_how", ",", "remove_outliers", "=", "remove_outliers", ",", "sd_val", "=", "sd_val", ",", "remove_out_of_bounds", "=", "remove_out_of_bounds", ",", "low_bound", "=", "low_bound", ",", "high_bound", "=", "high_bound", ")", "# Correlation plot", "# fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data)", "# fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png')", "if", "rename_col", ":", "# Rename columns of dataframe", "clean_data_obj", ".", "rename_columns", "(", "rename_col", ")", "if", "drop_col", ":", "# Drop columns of dataframe", "clean_data_obj", ".", "drop_columns", "(", "drop_col", ")", "# Store cleaned data in wrapper class", "self", ".", "cleaned_data", "=", "clean_data_obj", ".", "cleaned_data", "# Logging", "self", ".", "result", "[", "'Clean'", "]", "=", "{", "'Rename Col'", ":", "rename_col", ",", "'Drop Col'", ":", "drop_col", ",", "'Resample'", ":", "resample", ",", "'Frequency'", ":", "freq", ",", "'Resampler'", ":", "resampler", ",", "'Interpolate'", ":", "interpolate", ",", "'Limit'", ":", "limit", ",", "'Method'", ":", "method", ",", "'Remove NA'", ":", "remove_na", ",", "'Remove NA How'", ":", "remove_na_how", ",", "'Remove Outliers'", ":", "remove_outliers", ",", "'SD Val'", ":", "sd_val", ",", "'Remove Out of Bounds'", ":", "remove_out_of_bounds", ",", "'Low Bound'", ":", "low_bound", ",", "'High Bound'", ":", "str", "(", "high_bound", ")", "if", "high_bound", "==", "float", "(", "'inf'", ")", "else", "high_bound", ",", "'Save File'", ":", "save_file", "}", "if", "save_file", ":", "f", "=", "self", ".", "results_folder_name", "+", "'/cleaned_data-'", "+", "str", "(", "self", ".", "get_global_count", "(", ")", ")", "+", "'.csv'", "self", ".", "cleaned_data", ".", "to_csv", "(", "f", ")", "self", ".", "result", "[", "'Clean'", "]", "[", "'Saved File'", "]", "=", "f", "else", ":", "self", ".", "result", "[", "'Clean'", "]", "[", "'Saved File'", "]", "=", "''", "return", "self", ".", "cleaned_data" ]
Cleans dataframe according to user specifications and stores result in self.cleaned_data. Parameters ---------- data : pd.DataFrame() Dataframe to be cleaned. rename_col : list(str) List of new column names. drop_col : list(str) Columns to be dropped. resample : bool Indicates whether to resample data or not. freq : str Resampling frequency i.e. d, h, 15T... resampler : str Resampling type i.e. mean, max. interpolate : bool Indicates whether to interpolate data or not. limit : int Interpolation limit. method : str Interpolation method. remove_na : bool Indicates whether to remove NAs or not. remove_na_how : str Specificies how to remove NA i.e. all, any... remove_outliers : bool Indicates whether to remove outliers or not. sd_val : int Standard Deviation Value (specifices how many SDs away is a point considered an outlier) remove_out_of_bounds : bool Indicates whether to remove out of bounds datapoints or not. low_bound : int Low bound of the data. high_bound : int High bound of the data. save_file : bool Specifies whether to save file or not. Defaults to True. Returns ------- pd.DataFrame() Dataframe containing cleaned data.
[ "Cleans", "dataframe", "according", "to", "user", "specifications", "and", "stores", "result", "in", "self", ".", "cleaned_data", "." ]
python
train
41.628571
saltstack/salt
salt/cli/daemons.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/daemons.py#L377-L389
def shutdown(self, exitcode=0, exitmsg=None): ''' If sub-classed, run any shutdown operations on this method. :param exitcode :param exitmsg ''' self.action_log_info('Shutting down') if hasattr(self, 'minion') and hasattr(self.minion, 'destroy'): self.minion.destroy() super(Minion, self).shutdown( exitcode, ('The Salt {0} is shutdown. {1}'.format( self.__class__.__name__, (exitmsg or '')).strip()))
[ "def", "shutdown", "(", "self", ",", "exitcode", "=", "0", ",", "exitmsg", "=", "None", ")", ":", "self", ".", "action_log_info", "(", "'Shutting down'", ")", "if", "hasattr", "(", "self", ",", "'minion'", ")", "and", "hasattr", "(", "self", ".", "minion", ",", "'destroy'", ")", ":", "self", ".", "minion", ".", "destroy", "(", ")", "super", "(", "Minion", ",", "self", ")", ".", "shutdown", "(", "exitcode", ",", "(", "'The Salt {0} is shutdown. {1}'", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "(", "exitmsg", "or", "''", ")", ")", ".", "strip", "(", ")", ")", ")" ]
If sub-classed, run any shutdown operations on this method. :param exitcode :param exitmsg
[ "If", "sub", "-", "classed", "run", "any", "shutdown", "operations", "on", "this", "method", "." ]
python
train
38
yougov/mongo-connector
mongo_connector/connector.py
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/connector.py#L348-L473
def run(self): """Discovers the mongo cluster and creates a thread for each primary. """ # Reset the global minimum MongoDB version update_mininum_mongodb_version(None) self.main_conn = self.create_authed_client() LOG.always( "Source MongoDB version: %s", self.main_conn.admin.command("buildInfo")["version"], ) for dm in self.doc_managers: name = dm.__class__.__module__ module = sys.modules[name] version = "unknown" if hasattr(module, "__version__"): version = module.__version__ elif hasattr(module, "version"): version = module.version LOG.always("Target DocManager: %s version: %s", name, version) self.read_oplog_progress() conn_type = None try: self.main_conn.admin.command("isdbgrid") except pymongo.errors.OperationFailure: conn_type = "REPLSET" if conn_type == "REPLSET": # Make sure we are connected to a replica set is_master = self.main_conn.admin.command("isMaster") if "setName" not in is_master: LOG.error( 'No replica set at "%s"! A replica set is required ' "to run mongo-connector. Shutting down..." % self.address ) return # Establish a connection to the replica set as a whole self.main_conn.close() self.main_conn = self.create_authed_client(replicaSet=is_master["setName"]) self.update_version_from_client(self.main_conn) # non sharded configuration oplog = OplogThread( self.main_conn, self.doc_managers, self.oplog_progress, self.namespace_config, **self.kwargs ) self.shard_set[0] = oplog LOG.info("MongoConnector: Starting connection thread %s" % self.main_conn) oplog.start() while self.can_run: shard_thread = self.shard_set[0] if not (shard_thread.running and shard_thread.is_alive()): LOG.error( "MongoConnector: OplogThread" " %s unexpectedly stopped! Shutting down" % (str(self.shard_set[0])) ) self.oplog_thread_join() for dm in self.doc_managers: dm.stop() return self.write_oplog_progress() time.sleep(1) else: # sharded cluster while self.can_run: # The backup role does not provide the listShards privilege, # so use the config.shards collection instead. for shard_doc in retry_until_ok( lambda: list(self.main_conn.config.shards.find()) ): shard_id = shard_doc["_id"] if shard_id in self.shard_set: shard_thread = self.shard_set[shard_id] if not (shard_thread.running and shard_thread.is_alive()): LOG.error( "MongoConnector: OplogThread " "%s unexpectedly stopped! Shutting " "down" % (str(self.shard_set[shard_id])) ) self.oplog_thread_join() for dm in self.doc_managers: dm.stop() return self.write_oplog_progress() time.sleep(1) continue try: repl_set, hosts = shard_doc["host"].split("/") except ValueError: cause = "The system only uses replica sets!" LOG.exception("MongoConnector: %s", cause) self.oplog_thread_join() for dm in self.doc_managers: dm.stop() return shard_conn = self.create_authed_client(hosts, replicaSet=repl_set) self.update_version_from_client(shard_conn) oplog = OplogThread( shard_conn, self.doc_managers, self.oplog_progress, self.namespace_config, mongos_client=self.main_conn, **self.kwargs ) self.shard_set[shard_id] = oplog msg = "Starting connection thread" LOG.info("MongoConnector: %s %s" % (msg, shard_conn)) oplog.start() if self.signal is not None: LOG.info("recieved signal %s: shutting down...", self.signal) self.oplog_thread_join() self.write_oplog_progress()
[ "def", "run", "(", "self", ")", ":", "# Reset the global minimum MongoDB version", "update_mininum_mongodb_version", "(", "None", ")", "self", ".", "main_conn", "=", "self", ".", "create_authed_client", "(", ")", "LOG", ".", "always", "(", "\"Source MongoDB version: %s\"", ",", "self", ".", "main_conn", ".", "admin", ".", "command", "(", "\"buildInfo\"", ")", "[", "\"version\"", "]", ",", ")", "for", "dm", "in", "self", ".", "doc_managers", ":", "name", "=", "dm", ".", "__class__", ".", "__module__", "module", "=", "sys", ".", "modules", "[", "name", "]", "version", "=", "\"unknown\"", "if", "hasattr", "(", "module", ",", "\"__version__\"", ")", ":", "version", "=", "module", ".", "__version__", "elif", "hasattr", "(", "module", ",", "\"version\"", ")", ":", "version", "=", "module", ".", "version", "LOG", ".", "always", "(", "\"Target DocManager: %s version: %s\"", ",", "name", ",", "version", ")", "self", ".", "read_oplog_progress", "(", ")", "conn_type", "=", "None", "try", ":", "self", ".", "main_conn", ".", "admin", ".", "command", "(", "\"isdbgrid\"", ")", "except", "pymongo", ".", "errors", ".", "OperationFailure", ":", "conn_type", "=", "\"REPLSET\"", "if", "conn_type", "==", "\"REPLSET\"", ":", "# Make sure we are connected to a replica set", "is_master", "=", "self", ".", "main_conn", ".", "admin", ".", "command", "(", "\"isMaster\"", ")", "if", "\"setName\"", "not", "in", "is_master", ":", "LOG", ".", "error", "(", "'No replica set at \"%s\"! A replica set is required '", "\"to run mongo-connector. Shutting down...\"", "%", "self", ".", "address", ")", "return", "# Establish a connection to the replica set as a whole", "self", ".", "main_conn", ".", "close", "(", ")", "self", ".", "main_conn", "=", "self", ".", "create_authed_client", "(", "replicaSet", "=", "is_master", "[", "\"setName\"", "]", ")", "self", ".", "update_version_from_client", "(", "self", ".", "main_conn", ")", "# non sharded configuration", "oplog", "=", "OplogThread", "(", "self", ".", "main_conn", ",", "self", ".", "doc_managers", ",", "self", ".", "oplog_progress", ",", "self", ".", "namespace_config", ",", "*", "*", "self", ".", "kwargs", ")", "self", ".", "shard_set", "[", "0", "]", "=", "oplog", "LOG", ".", "info", "(", "\"MongoConnector: Starting connection thread %s\"", "%", "self", ".", "main_conn", ")", "oplog", ".", "start", "(", ")", "while", "self", ".", "can_run", ":", "shard_thread", "=", "self", ".", "shard_set", "[", "0", "]", "if", "not", "(", "shard_thread", ".", "running", "and", "shard_thread", ".", "is_alive", "(", ")", ")", ":", "LOG", ".", "error", "(", "\"MongoConnector: OplogThread\"", "\" %s unexpectedly stopped! Shutting down\"", "%", "(", "str", "(", "self", ".", "shard_set", "[", "0", "]", ")", ")", ")", "self", ".", "oplog_thread_join", "(", ")", "for", "dm", "in", "self", ".", "doc_managers", ":", "dm", ".", "stop", "(", ")", "return", "self", ".", "write_oplog_progress", "(", ")", "time", ".", "sleep", "(", "1", ")", "else", ":", "# sharded cluster", "while", "self", ".", "can_run", ":", "# The backup role does not provide the listShards privilege,", "# so use the config.shards collection instead.", "for", "shard_doc", "in", "retry_until_ok", "(", "lambda", ":", "list", "(", "self", ".", "main_conn", ".", "config", ".", "shards", ".", "find", "(", ")", ")", ")", ":", "shard_id", "=", "shard_doc", "[", "\"_id\"", "]", "if", "shard_id", "in", "self", ".", "shard_set", ":", "shard_thread", "=", "self", ".", "shard_set", "[", "shard_id", "]", "if", "not", "(", "shard_thread", ".", "running", "and", "shard_thread", ".", "is_alive", "(", ")", ")", ":", "LOG", ".", "error", "(", "\"MongoConnector: OplogThread \"", "\"%s unexpectedly stopped! Shutting \"", "\"down\"", "%", "(", "str", "(", "self", ".", "shard_set", "[", "shard_id", "]", ")", ")", ")", "self", ".", "oplog_thread_join", "(", ")", "for", "dm", "in", "self", ".", "doc_managers", ":", "dm", ".", "stop", "(", ")", "return", "self", ".", "write_oplog_progress", "(", ")", "time", ".", "sleep", "(", "1", ")", "continue", "try", ":", "repl_set", ",", "hosts", "=", "shard_doc", "[", "\"host\"", "]", ".", "split", "(", "\"/\"", ")", "except", "ValueError", ":", "cause", "=", "\"The system only uses replica sets!\"", "LOG", ".", "exception", "(", "\"MongoConnector: %s\"", ",", "cause", ")", "self", ".", "oplog_thread_join", "(", ")", "for", "dm", "in", "self", ".", "doc_managers", ":", "dm", ".", "stop", "(", ")", "return", "shard_conn", "=", "self", ".", "create_authed_client", "(", "hosts", ",", "replicaSet", "=", "repl_set", ")", "self", ".", "update_version_from_client", "(", "shard_conn", ")", "oplog", "=", "OplogThread", "(", "shard_conn", ",", "self", ".", "doc_managers", ",", "self", ".", "oplog_progress", ",", "self", ".", "namespace_config", ",", "mongos_client", "=", "self", ".", "main_conn", ",", "*", "*", "self", ".", "kwargs", ")", "self", ".", "shard_set", "[", "shard_id", "]", "=", "oplog", "msg", "=", "\"Starting connection thread\"", "LOG", ".", "info", "(", "\"MongoConnector: %s %s\"", "%", "(", "msg", ",", "shard_conn", ")", ")", "oplog", ".", "start", "(", ")", "if", "self", ".", "signal", "is", "not", "None", ":", "LOG", ".", "info", "(", "\"recieved signal %s: shutting down...\"", ",", "self", ".", "signal", ")", "self", ".", "oplog_thread_join", "(", ")", "self", ".", "write_oplog_progress", "(", ")" ]
Discovers the mongo cluster and creates a thread for each primary.
[ "Discovers", "the", "mongo", "cluster", "and", "creates", "a", "thread", "for", "each", "primary", "." ]
python
train
40.222222
saltstack/salt
salt/modules/selinux.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/selinux.py#L331-L376
def list_semod(): ''' Return a structure listing all of the selinux modules on the system and what state they are in CLI Example: .. code-block:: bash salt '*' selinux.list_semod .. versionadded:: 2016.3.0 ''' helptext = __salt__['cmd.run']('semodule -h').splitlines() semodule_version = '' for line in helptext: if line.strip().startswith('full'): semodule_version = 'new' if semodule_version == 'new': mdata = __salt__['cmd.run']('semodule -lfull').splitlines() ret = {} for line in mdata: if not line.strip(): continue comps = line.split() if len(comps) == 4: ret[comps[1]] = {'Enabled': False, 'Version': None} else: ret[comps[1]] = {'Enabled': True, 'Version': None} else: mdata = __salt__['cmd.run']('semodule -l').splitlines() ret = {} for line in mdata: if not line.strip(): continue comps = line.split() if len(comps) == 3: ret[comps[0]] = {'Enabled': False, 'Version': comps[1]} else: ret[comps[0]] = {'Enabled': True, 'Version': comps[1]} return ret
[ "def", "list_semod", "(", ")", ":", "helptext", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'semodule -h'", ")", ".", "splitlines", "(", ")", "semodule_version", "=", "''", "for", "line", "in", "helptext", ":", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "'full'", ")", ":", "semodule_version", "=", "'new'", "if", "semodule_version", "==", "'new'", ":", "mdata", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'semodule -lfull'", ")", ".", "splitlines", "(", ")", "ret", "=", "{", "}", "for", "line", "in", "mdata", ":", "if", "not", "line", ".", "strip", "(", ")", ":", "continue", "comps", "=", "line", ".", "split", "(", ")", "if", "len", "(", "comps", ")", "==", "4", ":", "ret", "[", "comps", "[", "1", "]", "]", "=", "{", "'Enabled'", ":", "False", ",", "'Version'", ":", "None", "}", "else", ":", "ret", "[", "comps", "[", "1", "]", "]", "=", "{", "'Enabled'", ":", "True", ",", "'Version'", ":", "None", "}", "else", ":", "mdata", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'semodule -l'", ")", ".", "splitlines", "(", ")", "ret", "=", "{", "}", "for", "line", "in", "mdata", ":", "if", "not", "line", ".", "strip", "(", ")", ":", "continue", "comps", "=", "line", ".", "split", "(", ")", "if", "len", "(", "comps", ")", "==", "3", ":", "ret", "[", "comps", "[", "0", "]", "]", "=", "{", "'Enabled'", ":", "False", ",", "'Version'", ":", "comps", "[", "1", "]", "}", "else", ":", "ret", "[", "comps", "[", "0", "]", "]", "=", "{", "'Enabled'", ":", "True", ",", "'Version'", ":", "comps", "[", "1", "]", "}", "return", "ret" ]
Return a structure listing all of the selinux modules on the system and what state they are in CLI Example: .. code-block:: bash salt '*' selinux.list_semod .. versionadded:: 2016.3.0
[ "Return", "a", "structure", "listing", "all", "of", "the", "selinux", "modules", "on", "the", "system", "and", "what", "state", "they", "are", "in" ]
python
train
29.826087
sanger-pathogens/Fastaq
pyfastaq/intervals.py
https://github.com/sanger-pathogens/Fastaq/blob/2c775c846d2491678a9637daa320592e02c26c72/pyfastaq/intervals.py#L34-L39
def distance_to_point(self, p): '''Returns the distance from the point to the interval. Zero if the point lies inside the interval.''' if self.start <= p <= self.end: return 0 else: return min(abs(self.start - p), abs(self.end - p))
[ "def", "distance_to_point", "(", "self", ",", "p", ")", ":", "if", "self", ".", "start", "<=", "p", "<=", "self", ".", "end", ":", "return", "0", "else", ":", "return", "min", "(", "abs", "(", "self", ".", "start", "-", "p", ")", ",", "abs", "(", "self", ".", "end", "-", "p", ")", ")" ]
Returns the distance from the point to the interval. Zero if the point lies inside the interval.
[ "Returns", "the", "distance", "from", "the", "point", "to", "the", "interval", ".", "Zero", "if", "the", "point", "lies", "inside", "the", "interval", "." ]
python
valid
45.833333
samirelanduk/quickplots
quickplots/charts.py
https://github.com/samirelanduk/quickplots/blob/59f5e6ff367b2c1c24ba7cf1805d03552034c6d8/quickplots/charts.py#L516-L534
def y_ticks(self, *ticks): """The points on the y-axis for which there are markers and grid lines. There are default ticks, but you can pass values to this method to override the defaults. Otherwise the method will return the ticks. :param \*ticks: if given, these will be chart's x-ticks. :rtype: ``tuple``""" if ticks: for tick in ticks: if not is_numeric(tick): raise TypeError("'%s' is not a numeric tick" % str(tick)) self._y_ticks = tuple(sorted(ticks)) else: if self._y_ticks: return self._y_ticks else: return determine_ticks(self.y_lower_limit(), self.y_upper_limit())
[ "def", "y_ticks", "(", "self", ",", "*", "ticks", ")", ":", "if", "ticks", ":", "for", "tick", "in", "ticks", ":", "if", "not", "is_numeric", "(", "tick", ")", ":", "raise", "TypeError", "(", "\"'%s' is not a numeric tick\"", "%", "str", "(", "tick", ")", ")", "self", ".", "_y_ticks", "=", "tuple", "(", "sorted", "(", "ticks", ")", ")", "else", ":", "if", "self", ".", "_y_ticks", ":", "return", "self", ".", "_y_ticks", "else", ":", "return", "determine_ticks", "(", "self", ".", "y_lower_limit", "(", ")", ",", "self", ".", "y_upper_limit", "(", ")", ")" ]
The points on the y-axis for which there are markers and grid lines. There are default ticks, but you can pass values to this method to override the defaults. Otherwise the method will return the ticks. :param \*ticks: if given, these will be chart's x-ticks. :rtype: ``tuple``
[ "The", "points", "on", "the", "y", "-", "axis", "for", "which", "there", "are", "markers", "and", "grid", "lines", "." ]
python
train
38.631579
DataDog/integrations-core
kube_dns/datadog_checks/kube_dns/kube_dns.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/kube_dns/datadog_checks/kube_dns/kube_dns.py#L56-L86
def _create_kube_dns_instance(self, instance): """ Set up kube_dns instance so it can be used in OpenMetricsBaseCheck """ kube_dns_instance = deepcopy(instance) # kube_dns uses 'prometheus_endpoint' and not 'prometheus_url', so we have to rename the key kube_dns_instance['prometheus_url'] = instance.get('prometheus_endpoint', None) kube_dns_instance.update( { 'namespace': 'kubedns', # Note: the count metrics were moved to specific functions list below to be submitted # as both gauges and monotonic_counts 'metrics': [ { # metrics have been renamed to kubedns in kubernetes 1.6.0 'kubedns_kubedns_dns_response_size_bytes': 'response_size.bytes', 'kubedns_kubedns_dns_request_duration_seconds': 'request_duration.seconds', # metrics names for kubernetes < 1.6.0 'skydns_skydns_dns_response_size_bytes': 'response_size.bytes', 'skydns_skydns_dns_request_duration_seconds': 'request_duration.seconds', } ], # Defaults that were set when kube_dns was based on PrometheusCheck 'send_monotonic_counter': instance.get('send_monotonic_counter', False), 'health_service_check': instance.get('health_service_check', False), } ) return kube_dns_instance
[ "def", "_create_kube_dns_instance", "(", "self", ",", "instance", ")", ":", "kube_dns_instance", "=", "deepcopy", "(", "instance", ")", "# kube_dns uses 'prometheus_endpoint' and not 'prometheus_url', so we have to rename the key", "kube_dns_instance", "[", "'prometheus_url'", "]", "=", "instance", ".", "get", "(", "'prometheus_endpoint'", ",", "None", ")", "kube_dns_instance", ".", "update", "(", "{", "'namespace'", ":", "'kubedns'", ",", "# Note: the count metrics were moved to specific functions list below to be submitted", "# as both gauges and monotonic_counts", "'metrics'", ":", "[", "{", "# metrics have been renamed to kubedns in kubernetes 1.6.0", "'kubedns_kubedns_dns_response_size_bytes'", ":", "'response_size.bytes'", ",", "'kubedns_kubedns_dns_request_duration_seconds'", ":", "'request_duration.seconds'", ",", "# metrics names for kubernetes < 1.6.0", "'skydns_skydns_dns_response_size_bytes'", ":", "'response_size.bytes'", ",", "'skydns_skydns_dns_request_duration_seconds'", ":", "'request_duration.seconds'", ",", "}", "]", ",", "# Defaults that were set when kube_dns was based on PrometheusCheck", "'send_monotonic_counter'", ":", "instance", ".", "get", "(", "'send_monotonic_counter'", ",", "False", ")", ",", "'health_service_check'", ":", "instance", ".", "get", "(", "'health_service_check'", ",", "False", ")", ",", "}", ")", "return", "kube_dns_instance" ]
Set up kube_dns instance so it can be used in OpenMetricsBaseCheck
[ "Set", "up", "kube_dns", "instance", "so", "it", "can", "be", "used", "in", "OpenMetricsBaseCheck" ]
python
train
49.258065
seomoz/qless-py
qless/util.py
https://github.com/seomoz/qless-py/blob/3eda4ffcd4c0016c9a7e44f780d6155e1a354dda/qless/util.py#L4-L9
def import_class(klass): '''Import the named class and return that class''' mod = __import__(klass.rpartition('.')[0]) for segment in klass.split('.')[1:-1]: mod = getattr(mod, segment) return getattr(mod, klass.rpartition('.')[2])
[ "def", "import_class", "(", "klass", ")", ":", "mod", "=", "__import__", "(", "klass", ".", "rpartition", "(", "'.'", ")", "[", "0", "]", ")", "for", "segment", "in", "klass", ".", "split", "(", "'.'", ")", "[", "1", ":", "-", "1", "]", ":", "mod", "=", "getattr", "(", "mod", ",", "segment", ")", "return", "getattr", "(", "mod", ",", "klass", ".", "rpartition", "(", "'.'", ")", "[", "2", "]", ")" ]
Import the named class and return that class
[ "Import", "the", "named", "class", "and", "return", "that", "class" ]
python
train
41.666667
LonamiWebs/Telethon
telethon/utils.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/utils.py#L457-L469
def get_input_message(message): """Similar to :meth:`get_input_peer`, but for input messages.""" try: if isinstance(message, int): # This case is really common too return types.InputMessageID(message) elif message.SUBCLASS_OF_ID == 0x54b6bcc5: # crc32(b'InputMessage'): return message elif message.SUBCLASS_OF_ID == 0x790009e3: # crc32(b'Message'): return types.InputMessageID(message.id) except AttributeError: pass _raise_cast_fail(message, 'InputMedia')
[ "def", "get_input_message", "(", "message", ")", ":", "try", ":", "if", "isinstance", "(", "message", ",", "int", ")", ":", "# This case is really common too", "return", "types", ".", "InputMessageID", "(", "message", ")", "elif", "message", ".", "SUBCLASS_OF_ID", "==", "0x54b6bcc5", ":", "# crc32(b'InputMessage'):", "return", "message", "elif", "message", ".", "SUBCLASS_OF_ID", "==", "0x790009e3", ":", "# crc32(b'Message'):", "return", "types", ".", "InputMessageID", "(", "message", ".", "id", ")", "except", "AttributeError", ":", "pass", "_raise_cast_fail", "(", "message", ",", "'InputMedia'", ")" ]
Similar to :meth:`get_input_peer`, but for input messages.
[ "Similar", "to", ":", "meth", ":", "get_input_peer", "but", "for", "input", "messages", "." ]
python
train
40.923077
benley/butcher
butcher/targets/pkgfilegroup.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/pkgfilegroup.py#L62-L73
def output_files(self): """Returns the list of output files from this rule. Paths are generated from the outputs of this rule's dependencies, with their paths translated based on prefix and strip_prefix. Returned paths are relative to buildroot. """ for dep in self.subgraph.successors(self.address): dep_rule = self.subgraph.node[dep]['target_obj'] for dep_file in dep_rule.output_files: yield self.translate_path(dep_file, dep_rule).lstrip('/')
[ "def", "output_files", "(", "self", ")", ":", "for", "dep", "in", "self", ".", "subgraph", ".", "successors", "(", "self", ".", "address", ")", ":", "dep_rule", "=", "self", ".", "subgraph", ".", "node", "[", "dep", "]", "[", "'target_obj'", "]", "for", "dep_file", "in", "dep_rule", ".", "output_files", ":", "yield", "self", ".", "translate_path", "(", "dep_file", ",", "dep_rule", ")", ".", "lstrip", "(", "'/'", ")" ]
Returns the list of output files from this rule. Paths are generated from the outputs of this rule's dependencies, with their paths translated based on prefix and strip_prefix. Returned paths are relative to buildroot.
[ "Returns", "the", "list", "of", "output", "files", "from", "this", "rule", "." ]
python
train
43.75
buriburisuri/sugartensor
sugartensor/sg_transform.py
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L338-L353
def sg_mean(tensor, opt): r"""Computes the mean of elements across axis of a tensor. See `tf.reduce_mean()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.reduce_mean(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
[ "def", "sg_mean", "(", "tensor", ",", "opt", ")", ":", "return", "tf", ".", "reduce_mean", "(", "tensor", ",", "axis", "=", "opt", ".", "axis", ",", "keep_dims", "=", "opt", ".", "keep_dims", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Computes the mean of elements across axis of a tensor. See `tf.reduce_mean()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Computes", "the", "mean", "of", "elements", "across", "axis", "of", "a", "tensor", ".", "See", "tf", ".", "reduce_mean", "()", "in", "tensorflow", "." ]
python
train
33.1875
prompt-toolkit/pymux
pymux/commands/commands.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/commands/commands.py#L603-L610
def clear_history(pymux, variables): " Clear scrollback buffer. " pane = pymux.arrangement.get_active_pane() if pane.display_scroll_buffer: raise CommandException('Not available in copy mode') else: pane.process.screen.clear_history()
[ "def", "clear_history", "(", "pymux", ",", "variables", ")", ":", "pane", "=", "pymux", ".", "arrangement", ".", "get_active_pane", "(", ")", "if", "pane", ".", "display_scroll_buffer", ":", "raise", "CommandException", "(", "'Not available in copy mode'", ")", "else", ":", "pane", ".", "process", ".", "screen", ".", "clear_history", "(", ")" ]
Clear scrollback buffer.
[ "Clear", "scrollback", "buffer", "." ]
python
train
32.5
edx/edx-enterprise
integrated_channels/xapi/management/commands/send_course_completions.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/xapi/management/commands/send_course_completions.py#L168-L183
def prefetch_users(persistent_course_grades): """ Prefetch Users from the list of user_ids present in the persistent_course_grades. Arguments: persistent_course_grades (list): A list of PersistentCourseGrade. Returns: (dict): A dictionary containing user_id to user mapping. """ users = User.objects.filter( id__in=[grade.user_id for grade in persistent_course_grades] ) return { user.id: user for user in users }
[ "def", "prefetch_users", "(", "persistent_course_grades", ")", ":", "users", "=", "User", ".", "objects", ".", "filter", "(", "id__in", "=", "[", "grade", ".", "user_id", "for", "grade", "in", "persistent_course_grades", "]", ")", "return", "{", "user", ".", "id", ":", "user", "for", "user", "in", "users", "}" ]
Prefetch Users from the list of user_ids present in the persistent_course_grades. Arguments: persistent_course_grades (list): A list of PersistentCourseGrade. Returns: (dict): A dictionary containing user_id to user mapping.
[ "Prefetch", "Users", "from", "the", "list", "of", "user_ids", "present", "in", "the", "persistent_course_grades", "." ]
python
valid
32.5
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L487-L511
def simxAuxiliaryConsoleOpen(clientID, title, maxLines, mode, position, size, textColor, backgroundColor, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' consoleHandle = ct.c_int() if (sys.version_info[0] == 3) and (type(title) is str): title=title.encode('utf-8') if position != None: c_position = (ct.c_int*2)(*position) else: c_position = None if size != None: c_size = (ct.c_int*2)(*size) else: c_size = None if textColor != None: c_textColor = (ct.c_float*3)(*textColor) else: c_textColor = None if backgroundColor != None: c_backgroundColor = (ct.c_float*3)(*backgroundColor) else: c_backgroundColor = None return c_AuxiliaryConsoleOpen(clientID, title, maxLines, mode, c_position, c_size, c_textColor, c_backgroundColor, ct.byref(consoleHandle), operationMode), consoleHandle.value
[ "def", "simxAuxiliaryConsoleOpen", "(", "clientID", ",", "title", ",", "maxLines", ",", "mode", ",", "position", ",", "size", ",", "textColor", ",", "backgroundColor", ",", "operationMode", ")", ":", "consoleHandle", "=", "ct", ".", "c_int", "(", ")", "if", "(", "sys", ".", "version_info", "[", "0", "]", "==", "3", ")", "and", "(", "type", "(", "title", ")", "is", "str", ")", ":", "title", "=", "title", ".", "encode", "(", "'utf-8'", ")", "if", "position", "!=", "None", ":", "c_position", "=", "(", "ct", ".", "c_int", "*", "2", ")", "(", "*", "position", ")", "else", ":", "c_position", "=", "None", "if", "size", "!=", "None", ":", "c_size", "=", "(", "ct", ".", "c_int", "*", "2", ")", "(", "*", "size", ")", "else", ":", "c_size", "=", "None", "if", "textColor", "!=", "None", ":", "c_textColor", "=", "(", "ct", ".", "c_float", "*", "3", ")", "(", "*", "textColor", ")", "else", ":", "c_textColor", "=", "None", "if", "backgroundColor", "!=", "None", ":", "c_backgroundColor", "=", "(", "ct", ".", "c_float", "*", "3", ")", "(", "*", "backgroundColor", ")", "else", ":", "c_backgroundColor", "=", "None", "return", "c_AuxiliaryConsoleOpen", "(", "clientID", ",", "title", ",", "maxLines", ",", "mode", ",", "c_position", ",", "c_size", ",", "c_textColor", ",", "c_backgroundColor", ",", "ct", ".", "byref", "(", "consoleHandle", ")", ",", "operationMode", ")", ",", "consoleHandle", ".", "value" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
38.2
dwavesystems/dimod
dimod/binary_quadratic_model.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L1905-L1958
def from_networkx_graph(cls, G, vartype=None, node_attribute_name='bias', edge_attribute_name='bias'): """Create a binary quadratic model from a NetworkX graph. Args: G (:obj:`networkx.Graph`): A NetworkX graph with biases stored as node/edge attributes. vartype (:class:`.Vartype`/str/set, optional): Variable type for the binary quadratic model. Accepted input values: * :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}`` * :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}`` If not provided, the `G` should have a vartype attribute. If `vartype` is provided and `G.vartype` exists then the argument overrides the property. node_attribute_name (hashable, optional, default='bias'): Attribute name for linear biases. If the node does not have a matching attribute then the bias defaults to 0. edge_attribute_name (hashable, optional, default='bias'): Attribute name for quadratic biases. If the edge does not have a matching attribute then the bias defaults to 0. Returns: :obj:`.BinaryQuadraticModel` Examples: >>> import networkx as nx ... >>> G = nx.Graph() >>> G.add_node('a', bias=.5) >>> G.add_edge('a', 'b', bias=-1) >>> bqm = dimod.BinaryQuadraticModel.from_networkx_graph(G, 'SPIN') >>> bqm.adj['a']['b'] -1 """ if vartype is None: if not hasattr(G, 'vartype'): msg = ("either 'vartype' argument must be provided or " "the given graph should have a vartype attribute.") raise ValueError(msg) vartype = G.vartype linear = G.nodes(data=node_attribute_name, default=0) quadratic = G.edges(data=edge_attribute_name, default=0) offset = getattr(G, 'offset', 0) return cls(linear, quadratic, offset, vartype)
[ "def", "from_networkx_graph", "(", "cls", ",", "G", ",", "vartype", "=", "None", ",", "node_attribute_name", "=", "'bias'", ",", "edge_attribute_name", "=", "'bias'", ")", ":", "if", "vartype", "is", "None", ":", "if", "not", "hasattr", "(", "G", ",", "'vartype'", ")", ":", "msg", "=", "(", "\"either 'vartype' argument must be provided or \"", "\"the given graph should have a vartype attribute.\"", ")", "raise", "ValueError", "(", "msg", ")", "vartype", "=", "G", ".", "vartype", "linear", "=", "G", ".", "nodes", "(", "data", "=", "node_attribute_name", ",", "default", "=", "0", ")", "quadratic", "=", "G", ".", "edges", "(", "data", "=", "edge_attribute_name", ",", "default", "=", "0", ")", "offset", "=", "getattr", "(", "G", ",", "'offset'", ",", "0", ")", "return", "cls", "(", "linear", ",", "quadratic", ",", "offset", ",", "vartype", ")" ]
Create a binary quadratic model from a NetworkX graph. Args: G (:obj:`networkx.Graph`): A NetworkX graph with biases stored as node/edge attributes. vartype (:class:`.Vartype`/str/set, optional): Variable type for the binary quadratic model. Accepted input values: * :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}`` * :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}`` If not provided, the `G` should have a vartype attribute. If `vartype` is provided and `G.vartype` exists then the argument overrides the property. node_attribute_name (hashable, optional, default='bias'): Attribute name for linear biases. If the node does not have a matching attribute then the bias defaults to 0. edge_attribute_name (hashable, optional, default='bias'): Attribute name for quadratic biases. If the edge does not have a matching attribute then the bias defaults to 0. Returns: :obj:`.BinaryQuadraticModel` Examples: >>> import networkx as nx ... >>> G = nx.Graph() >>> G.add_node('a', bias=.5) >>> G.add_edge('a', 'b', bias=-1) >>> bqm = dimod.BinaryQuadraticModel.from_networkx_graph(G, 'SPIN') >>> bqm.adj['a']['b'] -1
[ "Create", "a", "binary", "quadratic", "model", "from", "a", "NetworkX", "graph", "." ]
python
train
38.925926
pyparsing/pyparsing
examples/pymicko.py
https://github.com/pyparsing/pyparsing/blob/f0264bd8d1a548a50b3e5f7d99cfefd577942d14/examples/pymicko.py#L409-L422
def insert_id(self, sname, skind, skinds, stype): """Inserts a new identifier at the end of the symbol table, if possible. Returns symbol index, or raises an exception if the symbol alredy exists sname - symbol name skind - symbol kind skinds - symbol kinds to check for stype - symbol type """ index = self.lookup_symbol(sname, skinds) if index == None: index = self.insert_symbol(sname, skind, stype) return index else: raise SemanticException("Redefinition of '%s'" % sname)
[ "def", "insert_id", "(", "self", ",", "sname", ",", "skind", ",", "skinds", ",", "stype", ")", ":", "index", "=", "self", ".", "lookup_symbol", "(", "sname", ",", "skinds", ")", "if", "index", "==", "None", ":", "index", "=", "self", ".", "insert_symbol", "(", "sname", ",", "skind", ",", "stype", ")", "return", "index", "else", ":", "raise", "SemanticException", "(", "\"Redefinition of '%s'\"", "%", "sname", ")" ]
Inserts a new identifier at the end of the symbol table, if possible. Returns symbol index, or raises an exception if the symbol alredy exists sname - symbol name skind - symbol kind skinds - symbol kinds to check for stype - symbol type
[ "Inserts", "a", "new", "identifier", "at", "the", "end", "of", "the", "symbol", "table", "if", "possible", ".", "Returns", "symbol", "index", "or", "raises", "an", "exception", "if", "the", "symbol", "alredy", "exists", "sname", "-", "symbol", "name", "skind", "-", "symbol", "kind", "skinds", "-", "symbol", "kinds", "to", "check", "for", "stype", "-", "symbol", "type" ]
python
train
43.928571
tanghaibao/jcvi
jcvi/formats/fasta.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fasta.py#L2038-L2093
def trim(args): """ %prog trim fasta.screen newfasta take the screen output from `cross_match` (against a vector db, for example), then trim the sequences to remove X's. Will also perform quality trim if fasta.screen.qual is found. The trimming algorithm is based on finding the subarray that maximize the sum """ from jcvi.algorithms.maxsum import max_sum p = OptionParser(trim.__doc__) p.add_option("-c", dest="min_length", type="int", default=64, help="minimum sequence length after trimming") p.add_option("-s", dest="score", default=QUAL, help="quality trimming cutoff [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, newfastafile = args qualfile = get_qual(fastafile) newqualfile = get_qual(newfastafile, check=False) logging.debug("Trim bad sequence from fasta file `%s` to `%s`" % \ (fastafile, newfastafile)) fw = must_open(newfastafile, "w") fw_qual = open(newqualfile, "w") dropped = trimmed = 0 for rec in iter_fasta_qual(fastafile, qualfile, modify=True): qv = [x - opts.score for x in \ rec.letter_annotations["phred_quality"]] msum, trim_start, trim_end = max_sum(qv) score = trim_end - trim_start + 1 if score < opts.min_length: dropped += 1 continue if score < len(rec): trimmed += 1 rec = rec[trim_start:trim_end + 1] write_fasta_qual(rec, fw, fw_qual) print("A total of %d sequences modified." % trimmed, file=sys.stderr) print("A total of %d sequences dropped (length < %d)." % \ (dropped, opts.min_length), file=sys.stderr) fw.close() fw_qual.close()
[ "def", "trim", "(", "args", ")", ":", "from", "jcvi", ".", "algorithms", ".", "maxsum", "import", "max_sum", "p", "=", "OptionParser", "(", "trim", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"-c\"", ",", "dest", "=", "\"min_length\"", ",", "type", "=", "\"int\"", ",", "default", "=", "64", ",", "help", "=", "\"minimum sequence length after trimming\"", ")", "p", ".", "add_option", "(", "\"-s\"", ",", "dest", "=", "\"score\"", ",", "default", "=", "QUAL", ",", "help", "=", "\"quality trimming cutoff [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "p", ".", "print_help", "(", ")", ")", "fastafile", ",", "newfastafile", "=", "args", "qualfile", "=", "get_qual", "(", "fastafile", ")", "newqualfile", "=", "get_qual", "(", "newfastafile", ",", "check", "=", "False", ")", "logging", ".", "debug", "(", "\"Trim bad sequence from fasta file `%s` to `%s`\"", "%", "(", "fastafile", ",", "newfastafile", ")", ")", "fw", "=", "must_open", "(", "newfastafile", ",", "\"w\"", ")", "fw_qual", "=", "open", "(", "newqualfile", ",", "\"w\"", ")", "dropped", "=", "trimmed", "=", "0", "for", "rec", "in", "iter_fasta_qual", "(", "fastafile", ",", "qualfile", ",", "modify", "=", "True", ")", ":", "qv", "=", "[", "x", "-", "opts", ".", "score", "for", "x", "in", "rec", ".", "letter_annotations", "[", "\"phred_quality\"", "]", "]", "msum", ",", "trim_start", ",", "trim_end", "=", "max_sum", "(", "qv", ")", "score", "=", "trim_end", "-", "trim_start", "+", "1", "if", "score", "<", "opts", ".", "min_length", ":", "dropped", "+=", "1", "continue", "if", "score", "<", "len", "(", "rec", ")", ":", "trimmed", "+=", "1", "rec", "=", "rec", "[", "trim_start", ":", "trim_end", "+", "1", "]", "write_fasta_qual", "(", "rec", ",", "fw", ",", "fw_qual", ")", "print", "(", "\"A total of %d sequences modified.\"", "%", "trimmed", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "\"A total of %d sequences dropped (length < %d).\"", "%", "(", "dropped", ",", "opts", ".", "min_length", ")", ",", "file", "=", "sys", ".", "stderr", ")", "fw", ".", "close", "(", ")", "fw_qual", ".", "close", "(", ")" ]
%prog trim fasta.screen newfasta take the screen output from `cross_match` (against a vector db, for example), then trim the sequences to remove X's. Will also perform quality trim if fasta.screen.qual is found. The trimming algorithm is based on finding the subarray that maximize the sum
[ "%prog", "trim", "fasta", ".", "screen", "newfasta" ]
python
train
31.321429
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/external/ssh/tunnel.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/external/ssh/tunnel.py#L108-L128
def _try_passwordless_paramiko(server, keyfile): """Try passwordless login with paramiko.""" if paramiko is None: msg = "Paramiko unavaliable, " if sys.platform == 'win32': msg += "Paramiko is required for ssh tunneled connections on Windows." else: msg += "use OpenSSH." raise ImportError(msg) username, server, port = _split_server(server) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy()) try: client.connect(server, port, username=username, key_filename=keyfile, look_for_keys=True) except paramiko.AuthenticationException: return False else: client.close() return True
[ "def", "_try_passwordless_paramiko", "(", "server", ",", "keyfile", ")", ":", "if", "paramiko", "is", "None", ":", "msg", "=", "\"Paramiko unavaliable, \"", "if", "sys", ".", "platform", "==", "'win32'", ":", "msg", "+=", "\"Paramiko is required for ssh tunneled connections on Windows.\"", "else", ":", "msg", "+=", "\"use OpenSSH.\"", "raise", "ImportError", "(", "msg", ")", "username", ",", "server", ",", "port", "=", "_split_server", "(", "server", ")", "client", "=", "paramiko", ".", "SSHClient", "(", ")", "client", ".", "load_system_host_keys", "(", ")", "client", ".", "set_missing_host_key_policy", "(", "paramiko", ".", "WarningPolicy", "(", ")", ")", "try", ":", "client", ".", "connect", "(", "server", ",", "port", ",", "username", "=", "username", ",", "key_filename", "=", "keyfile", ",", "look_for_keys", "=", "True", ")", "except", "paramiko", ".", "AuthenticationException", ":", "return", "False", "else", ":", "client", ".", "close", "(", ")", "return", "True" ]
Try passwordless login with paramiko.
[ "Try", "passwordless", "login", "with", "paramiko", "." ]
python
test
36.380952
DMSC-Instrument-Data/lewis
src/lewis/core/statemachine.py
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/core/statemachine.py#L56-L61
def set_context(self, new_context): """Assigns the new context to the member variable ``_context``.""" self._context = new_context if hasattr(self, '_set_logging_context'): self._set_logging_context(self._context)
[ "def", "set_context", "(", "self", ",", "new_context", ")", ":", "self", ".", "_context", "=", "new_context", "if", "hasattr", "(", "self", ",", "'_set_logging_context'", ")", ":", "self", ".", "_set_logging_context", "(", "self", ".", "_context", ")" ]
Assigns the new context to the member variable ``_context``.
[ "Assigns", "the", "new", "context", "to", "the", "member", "variable", "_context", "." ]
python
train
40.833333
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L5044-L5056
def get_stp_mst_detail_output_last_instance_instance_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") last_instance = ET.SubElement(output, "last-instance") instance_id = ET.SubElement(last_instance, "instance-id") instance_id.text = kwargs.pop('instance_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_last_instance_instance_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "=", "get_stp_mst_detail", "output", "=", "ET", ".", "SubElement", "(", "get_stp_mst_detail", ",", "\"output\"", ")", "last_instance", "=", "ET", ".", "SubElement", "(", "output", ",", "\"last-instance\"", ")", "instance_id", "=", "ET", ".", "SubElement", "(", "last_instance", ",", "\"instance-id\"", ")", "instance_id", ".", "text", "=", "kwargs", ".", "pop", "(", "'instance_id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
44.076923
reingart/gui2py
gui/controls/gridview.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/controls/gridview.py#L560-L570
def EndEdit(self, row, col, grid, val=None): "Complete the editing of the current cell. Returns True if changed" changed = False val = self._tc.GetStringSelection() print "val", val, row, col, self.startValue if val != self.startValue: changed = True grid.GetTable().SetValue(row, col, val) # update the table self.startValue = '' self._tc.SetStringSelection('') return changed
[ "def", "EndEdit", "(", "self", ",", "row", ",", "col", ",", "grid", ",", "val", "=", "None", ")", ":", "changed", "=", "False", "val", "=", "self", ".", "_tc", ".", "GetStringSelection", "(", ")", "print", "\"val\"", ",", "val", ",", "row", ",", "col", ",", "self", ".", "startValue", "if", "val", "!=", "self", ".", "startValue", ":", "changed", "=", "True", "grid", ".", "GetTable", "(", ")", ".", "SetValue", "(", "row", ",", "col", ",", "val", ")", "# update the table\r", "self", ".", "startValue", "=", "''", "self", ".", "_tc", ".", "SetStringSelection", "(", "''", ")", "return", "changed" ]
Complete the editing of the current cell. Returns True if changed
[ "Complete", "the", "editing", "of", "the", "current", "cell", ".", "Returns", "True", "if", "changed" ]
python
test
42.272727
ralphbean/bugwarrior
bugwarrior/services/trello.py
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/services/trello.py#L125-L139
def get_boards(self): """ Get the list of boards to pull cards from. If the user gave a value to trello.include_boards use that, otherwise ask the Trello API for the user's boards. """ if 'include_boards' in self.config: for boardid in self.config.get('include_boards', to_type=aslist): # Get the board name yield self.api_request( "/1/boards/{id}".format(id=boardid), fields='name') else: boards = self.api_request("/1/members/me/boards", fields='name') for board in boards: yield board
[ "def", "get_boards", "(", "self", ")", ":", "if", "'include_boards'", "in", "self", ".", "config", ":", "for", "boardid", "in", "self", ".", "config", ".", "get", "(", "'include_boards'", ",", "to_type", "=", "aslist", ")", ":", "# Get the board name", "yield", "self", ".", "api_request", "(", "\"/1/boards/{id}\"", ".", "format", "(", "id", "=", "boardid", ")", ",", "fields", "=", "'name'", ")", "else", ":", "boards", "=", "self", ".", "api_request", "(", "\"/1/members/me/boards\"", ",", "fields", "=", "'name'", ")", "for", "board", "in", "boards", ":", "yield", "board" ]
Get the list of boards to pull cards from. If the user gave a value to trello.include_boards use that, otherwise ask the Trello API for the user's boards.
[ "Get", "the", "list", "of", "boards", "to", "pull", "cards", "from", ".", "If", "the", "user", "gave", "a", "value", "to", "trello", ".", "include_boards", "use", "that", "otherwise", "ask", "the", "Trello", "API", "for", "the", "user", "s", "boards", "." ]
python
test
42.266667
Alignak-monitoring/alignak
alignak/modules/inner_metrics.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/modules/inner_metrics.py#L690-L740
def manage_host_check_result_brok(self, b): # pylint: disable=too-many-branches """An host check result brok has just arrived...""" host_name = b.data.get('host_name', None) if not host_name: return logger.debug("host check result: %s", host_name) # If host initial status brok has not been received, ignore ... if host_name not in self.hosts_cache and not self.ignore_unknown: logger.warning("received host check result for an unknown host: %s", host_name) return # Decode received metrics metrics = self.get_metrics_from_perfdata('host_check', b.data['perf_data']) if not metrics: logger.debug("no metrics to send ...") return # If checks latency is ignored if self.ignore_latency_limit >= b.data['latency'] > 0: check_time = int(b.data['last_chk']) - int(b.data['latency']) else: check_time = int(b.data['last_chk']) # Custom hosts variables hname = sanitize_name(host_name) if host_name in self.hosts_cache: if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None): hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname)) if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None): hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname)) # Graphite data source if self.graphite_data_source: path = '.'.join((hname, self.graphite_data_source)) if self.hostcheck: path = '.'.join((hname, self.graphite_data_source, self.hostcheck)) else: path = '.'.join((hname, self.hostcheck)) # Realm as a prefix if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None): path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path)) realm_name = None if host_name in self.hosts_cache: realm_name = self.hosts_cache[host_name].get('realm_name', None) # Send metrics self.send_to_tsdb(realm_name, host_name, self.hostcheck, metrics, check_time, path)
[ "def", "manage_host_check_result_brok", "(", "self", ",", "b", ")", ":", "# pylint: disable=too-many-branches", "host_name", "=", "b", ".", "data", ".", "get", "(", "'host_name'", ",", "None", ")", "if", "not", "host_name", ":", "return", "logger", ".", "debug", "(", "\"host check result: %s\"", ",", "host_name", ")", "# If host initial status brok has not been received, ignore ...", "if", "host_name", "not", "in", "self", ".", "hosts_cache", "and", "not", "self", ".", "ignore_unknown", ":", "logger", ".", "warning", "(", "\"received host check result for an unknown host: %s\"", ",", "host_name", ")", "return", "# Decode received metrics", "metrics", "=", "self", ".", "get_metrics_from_perfdata", "(", "'host_check'", ",", "b", ".", "data", "[", "'perf_data'", "]", ")", "if", "not", "metrics", ":", "logger", ".", "debug", "(", "\"no metrics to send ...\"", ")", "return", "# If checks latency is ignored", "if", "self", ".", "ignore_latency_limit", ">=", "b", ".", "data", "[", "'latency'", "]", ">", "0", ":", "check_time", "=", "int", "(", "b", ".", "data", "[", "'last_chk'", "]", ")", "-", "int", "(", "b", ".", "data", "[", "'latency'", "]", ")", "else", ":", "check_time", "=", "int", "(", "b", ".", "data", "[", "'last_chk'", "]", ")", "# Custom hosts variables", "hname", "=", "sanitize_name", "(", "host_name", ")", "if", "host_name", "in", "self", ".", "hosts_cache", ":", "if", "self", ".", "hosts_cache", "[", "host_name", "]", ".", "get", "(", "'_GRAPHITE_GROUP'", ",", "None", ")", ":", "hname", "=", "\".\"", ".", "join", "(", "(", "self", ".", "hosts_cache", "[", "host_name", "]", ".", "get", "(", "'_GRAPHITE_GROUP'", ")", ",", "hname", ")", ")", "if", "self", ".", "hosts_cache", "[", "host_name", "]", ".", "get", "(", "'_GRAPHITE_PRE'", ",", "None", ")", ":", "hname", "=", "\".\"", ".", "join", "(", "(", "self", ".", "hosts_cache", "[", "host_name", "]", ".", "get", "(", "'_GRAPHITE_PRE'", ")", ",", "hname", ")", ")", "# Graphite data source", "if", "self", ".", "graphite_data_source", ":", "path", "=", "'.'", ".", "join", "(", "(", "hname", ",", "self", ".", "graphite_data_source", ")", ")", "if", "self", ".", "hostcheck", ":", "path", "=", "'.'", ".", "join", "(", "(", "hname", ",", "self", ".", "graphite_data_source", ",", "self", ".", "hostcheck", ")", ")", "else", ":", "path", "=", "'.'", ".", "join", "(", "(", "hname", ",", "self", ".", "hostcheck", ")", ")", "# Realm as a prefix", "if", "self", ".", "realms_prefix", "and", "self", ".", "hosts_cache", "[", "host_name", "]", ".", "get", "(", "'realm_name'", ",", "None", ")", ":", "path", "=", "'.'", ".", "join", "(", "(", "self", ".", "hosts_cache", "[", "host_name", "]", ".", "get", "(", "'realm_name'", ")", ",", "path", ")", ")", "realm_name", "=", "None", "if", "host_name", "in", "self", ".", "hosts_cache", ":", "realm_name", "=", "self", ".", "hosts_cache", "[", "host_name", "]", ".", "get", "(", "'realm_name'", ",", "None", ")", "# Send metrics", "self", ".", "send_to_tsdb", "(", "realm_name", ",", "host_name", ",", "self", ".", "hostcheck", ",", "metrics", ",", "check_time", ",", "path", ")" ]
An host check result brok has just arrived...
[ "An", "host", "check", "result", "brok", "has", "just", "arrived", "..." ]
python
train
42.705882
open511/open511
open511/converter/o5json.py
https://github.com/open511/open511/blob/3d573f59d7efa06ff1b5419ea5ff4d90a90b3cf8/open511/converter/o5json.py#L9-L63
def xml_to_json(root): """Convert an Open511 XML document or document fragment to JSON. Takes an lxml Element object. Returns a dict ready to be JSON-serialized.""" j = {} if len(root) == 0: # Tag with no children, return str/int return _maybe_intify(root.text) if len(root) == 1 and root[0].tag.startswith('{' + NS_GML): # GML return gml_to_geojson(root[0]) if root.tag == 'open511': j['meta'] = {'version': root.get('version')} for elem in root: name = elem.tag if name == 'link' and elem.get('rel'): name = elem.get('rel') + '_url' if name == 'self_url': name = 'url' if root.tag == 'open511': j['meta'][name] = elem.get('href') continue elif name.startswith('{' + NS_PROTECTED): name = '!' + name[name.index('}') + 1:] elif name[0] == '{': # Namespace! name = '+' + name[name.index('}') + 1:] if name in j: continue # duplicate elif elem.tag == 'link' and not elem.text: j[name] = elem.get('href') elif len(elem): if name == 'grouped_events': # An array of URLs j[name] = [xml_link_to_json(child, to_dict=False) for child in elem] elif name in ('attachments', 'media_files'): # An array of JSON objects j[name] = [xml_link_to_json(child, to_dict=True) for child in elem] elif all((name == pluralize(child.tag) for child in elem)): # <something><somethings> serializes to a JSON array j[name] = [xml_to_json(child) for child in elem] else: j[name] = xml_to_json(elem) else: if root.tag == 'open511' and name.endswith('s') and not elem.text: # Special case: an empty e.g. <events /> container at the root level # should be serialized to [], not null j[name] = [] else: j[name] = _maybe_intify(elem.text) return j
[ "def", "xml_to_json", "(", "root", ")", ":", "j", "=", "{", "}", "if", "len", "(", "root", ")", "==", "0", ":", "# Tag with no children, return str/int", "return", "_maybe_intify", "(", "root", ".", "text", ")", "if", "len", "(", "root", ")", "==", "1", "and", "root", "[", "0", "]", ".", "tag", ".", "startswith", "(", "'{'", "+", "NS_GML", ")", ":", "# GML", "return", "gml_to_geojson", "(", "root", "[", "0", "]", ")", "if", "root", ".", "tag", "==", "'open511'", ":", "j", "[", "'meta'", "]", "=", "{", "'version'", ":", "root", ".", "get", "(", "'version'", ")", "}", "for", "elem", "in", "root", ":", "name", "=", "elem", ".", "tag", "if", "name", "==", "'link'", "and", "elem", ".", "get", "(", "'rel'", ")", ":", "name", "=", "elem", ".", "get", "(", "'rel'", ")", "+", "'_url'", "if", "name", "==", "'self_url'", ":", "name", "=", "'url'", "if", "root", ".", "tag", "==", "'open511'", ":", "j", "[", "'meta'", "]", "[", "name", "]", "=", "elem", ".", "get", "(", "'href'", ")", "continue", "elif", "name", ".", "startswith", "(", "'{'", "+", "NS_PROTECTED", ")", ":", "name", "=", "'!'", "+", "name", "[", "name", ".", "index", "(", "'}'", ")", "+", "1", ":", "]", "elif", "name", "[", "0", "]", "==", "'{'", ":", "# Namespace!", "name", "=", "'+'", "+", "name", "[", "name", ".", "index", "(", "'}'", ")", "+", "1", ":", "]", "if", "name", "in", "j", ":", "continue", "# duplicate", "elif", "elem", ".", "tag", "==", "'link'", "and", "not", "elem", ".", "text", ":", "j", "[", "name", "]", "=", "elem", ".", "get", "(", "'href'", ")", "elif", "len", "(", "elem", ")", ":", "if", "name", "==", "'grouped_events'", ":", "# An array of URLs", "j", "[", "name", "]", "=", "[", "xml_link_to_json", "(", "child", ",", "to_dict", "=", "False", ")", "for", "child", "in", "elem", "]", "elif", "name", "in", "(", "'attachments'", ",", "'media_files'", ")", ":", "# An array of JSON objects", "j", "[", "name", "]", "=", "[", "xml_link_to_json", "(", "child", ",", "to_dict", "=", "True", ")", "for", "child", "in", "elem", "]", "elif", "all", "(", "(", "name", "==", "pluralize", "(", "child", ".", "tag", ")", "for", "child", "in", "elem", ")", ")", ":", "# <something><somethings> serializes to a JSON array", "j", "[", "name", "]", "=", "[", "xml_to_json", "(", "child", ")", "for", "child", "in", "elem", "]", "else", ":", "j", "[", "name", "]", "=", "xml_to_json", "(", "elem", ")", "else", ":", "if", "root", ".", "tag", "==", "'open511'", "and", "name", ".", "endswith", "(", "'s'", ")", "and", "not", "elem", ".", "text", ":", "# Special case: an empty e.g. <events /> container at the root level", "# should be serialized to [], not null", "j", "[", "name", "]", "=", "[", "]", "else", ":", "j", "[", "name", "]", "=", "_maybe_intify", "(", "elem", ".", "text", ")", "return", "j" ]
Convert an Open511 XML document or document fragment to JSON. Takes an lxml Element object. Returns a dict ready to be JSON-serialized.
[ "Convert", "an", "Open511", "XML", "document", "or", "document", "fragment", "to", "JSON", "." ]
python
valid
37.872727
PMBio/limix-backup
limix/deprecated/archive/varianceDecompositionOld.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/deprecated/archive/varianceDecompositionOld.py#L75-L101
def setY(self,Y,standardize=False): """ Set phenotype matrix Args: Y: phenotype matrix [N, P] standardize: if True, phenotype is standardized (zero mean, unit variance) """ assert Y.shape[0]==self.N, 'CVarianceDecomposition:: Incompatible shape' assert Y.shape[1]==self.P, 'CVarianceDecomposition:: Incompatible shape' if standardize: Y=preprocess.standardize(Y) #check that missing values match the current structure assert (~(SP.isnan(Y).any(axis=1))==self.Iok).all(), 'CVarianceDecomposition:: pattern of missing values needs to match Y given at initialization' self.Y = Y self.vd.setPheno(Y) self.optimum = None self.cache['Sigma'] = None self.cache['Hessian'] = None self.cache['Lparams'] = None self.cache['paramsST']= None
[ "def", "setY", "(", "self", ",", "Y", ",", "standardize", "=", "False", ")", ":", "assert", "Y", ".", "shape", "[", "0", "]", "==", "self", ".", "N", ",", "'CVarianceDecomposition:: Incompatible shape'", "assert", "Y", ".", "shape", "[", "1", "]", "==", "self", ".", "P", ",", "'CVarianceDecomposition:: Incompatible shape'", "if", "standardize", ":", "Y", "=", "preprocess", ".", "standardize", "(", "Y", ")", "#check that missing values match the current structure", "assert", "(", "~", "(", "SP", ".", "isnan", "(", "Y", ")", ".", "any", "(", "axis", "=", "1", ")", ")", "==", "self", ".", "Iok", ")", ".", "all", "(", ")", ",", "'CVarianceDecomposition:: pattern of missing values needs to match Y given at initialization'", "self", ".", "Y", "=", "Y", "self", ".", "vd", ".", "setPheno", "(", "Y", ")", "self", ".", "optimum", "=", "None", "self", ".", "cache", "[", "'Sigma'", "]", "=", "None", "self", ".", "cache", "[", "'Hessian'", "]", "=", "None", "self", ".", "cache", "[", "'Lparams'", "]", "=", "None", "self", ".", "cache", "[", "'paramsST'", "]", "=", "None" ]
Set phenotype matrix Args: Y: phenotype matrix [N, P] standardize: if True, phenotype is standardized (zero mean, unit variance)
[ "Set", "phenotype", "matrix", "Args", ":", "Y", ":", "phenotype", "matrix", "[", "N", "P", "]", "standardize", ":", "if", "True", "phenotype", "is", "standardized", "(", "zero", "mean", "unit", "variance", ")" ]
python
train
33.666667
mongodb/mongo-python-driver
pymongo/uri_parser.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/uri_parser.py#L211-L234
def _parse_options(opts, delim): """Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ readpreferencetags portion and the use of the tlsInsecure option.""" options = _CaseInsensitiveDictionary() for uriopt in opts.split(delim): key, value = uriopt.split("=") if key.lower() == 'readpreferencetags': options.setdefault(key, []).append(value) else: if key in options: warnings.warn("Duplicate URI option '%s'." % (key,)) options[key] = unquote_plus(value) if 'tlsInsecure' in options: for implicit_option in _IMPLICIT_TLSINSECURE_OPTS: if implicit_option in options: warn_msg = "URI option '%s' overrides value implied by '%s'." warnings.warn(warn_msg % (options.cased_key(implicit_option), options.cased_key('tlsInsecure'))) continue options[implicit_option] = options['tlsInsecure'] return options
[ "def", "_parse_options", "(", "opts", ",", "delim", ")", ":", "options", "=", "_CaseInsensitiveDictionary", "(", ")", "for", "uriopt", "in", "opts", ".", "split", "(", "delim", ")", ":", "key", ",", "value", "=", "uriopt", ".", "split", "(", "\"=\"", ")", "if", "key", ".", "lower", "(", ")", "==", "'readpreferencetags'", ":", "options", ".", "setdefault", "(", "key", ",", "[", "]", ")", ".", "append", "(", "value", ")", "else", ":", "if", "key", "in", "options", ":", "warnings", ".", "warn", "(", "\"Duplicate URI option '%s'.\"", "%", "(", "key", ",", ")", ")", "options", "[", "key", "]", "=", "unquote_plus", "(", "value", ")", "if", "'tlsInsecure'", "in", "options", ":", "for", "implicit_option", "in", "_IMPLICIT_TLSINSECURE_OPTS", ":", "if", "implicit_option", "in", "options", ":", "warn_msg", "=", "\"URI option '%s' overrides value implied by '%s'.\"", "warnings", ".", "warn", "(", "warn_msg", "%", "(", "options", ".", "cased_key", "(", "implicit_option", ")", ",", "options", ".", "cased_key", "(", "'tlsInsecure'", ")", ")", ")", "continue", "options", "[", "implicit_option", "]", "=", "options", "[", "'tlsInsecure'", "]", "return", "options" ]
Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ readpreferencetags portion and the use of the tlsInsecure option.
[ "Helper", "method", "for", "split_options", "which", "creates", "the", "options", "dict", ".", "Also", "handles", "the", "creation", "of", "a", "list", "for", "the", "URI", "tag_sets", "/", "readpreferencetags", "portion", "and", "the", "use", "of", "the", "tlsInsecure", "option", "." ]
python
train
44.708333
luqasz/librouteros
librouteros/connections.py
https://github.com/luqasz/librouteros/blob/59293eb49c07a339af87b0416e4619e78ca5176d/librouteros/connections.py#L88-L114
def decodeLength(length): """ Decode length based on given bytes. :param length: Bytes string to decode. :return: Decoded length. """ bytes_length = len(length) if bytes_length < 2: offset = b'\x00\x00\x00' XOR = 0 elif bytes_length < 3: offset = b'\x00\x00' XOR = 0x8000 elif bytes_length < 4: offset = b'\x00' XOR = 0xC00000 elif bytes_length < 5: offset = b'' XOR = 0xE0000000 else: raise ConnectionError('Unable to decode length of {}'.format(length)) decoded = unpack('!I', (offset + length))[0] decoded ^= XOR return decoded
[ "def", "decodeLength", "(", "length", ")", ":", "bytes_length", "=", "len", "(", "length", ")", "if", "bytes_length", "<", "2", ":", "offset", "=", "b'\\x00\\x00\\x00'", "XOR", "=", "0", "elif", "bytes_length", "<", "3", ":", "offset", "=", "b'\\x00\\x00'", "XOR", "=", "0x8000", "elif", "bytes_length", "<", "4", ":", "offset", "=", "b'\\x00'", "XOR", "=", "0xC00000", "elif", "bytes_length", "<", "5", ":", "offset", "=", "b''", "XOR", "=", "0xE0000000", "else", ":", "raise", "ConnectionError", "(", "'Unable to decode length of {}'", ".", "format", "(", "length", ")", ")", "decoded", "=", "unpack", "(", "'!I'", ",", "(", "offset", "+", "length", ")", ")", "[", "0", "]", "decoded", "^=", "XOR", "return", "decoded" ]
Decode length based on given bytes. :param length: Bytes string to decode. :return: Decoded length.
[ "Decode", "length", "based", "on", "given", "bytes", "." ]
python
train
26.925926
sirfoga/pyhal
hal/charts/correlation.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/charts/correlation.py#L11-L31
def create_correlation_matrix_plot(correlation_matrix, title, feature_list): """Creates plot for correlation matrix :param correlation_matrix: Correlation matrix of features :param title: Title of plot :param feature_list: List of names of features :return: Shows the given correlation matrix as image """ chart = SimpleChart(title) ax1 = chart.get_ax() ax1.set_xticks(list(range(len(feature_list)))) ax1.set_xticklabels([feature_list[i] for i in range(len(feature_list))], rotation=90) ax1.set_yticks(list(range(len(feature_list)))) ax1.set_yticklabels([feature_list[i] for i in range(len(feature_list))]) cax = ax1.imshow(correlation_matrix, interpolation="nearest", cmap=cm.get_cmap("jet", 30)) chart.get_fig().colorbar(cax, ticks=np.linspace(-1, 1, 21)) plt.gcf().subplots_adjust(bottom=0.25)
[ "def", "create_correlation_matrix_plot", "(", "correlation_matrix", ",", "title", ",", "feature_list", ")", ":", "chart", "=", "SimpleChart", "(", "title", ")", "ax1", "=", "chart", ".", "get_ax", "(", ")", "ax1", ".", "set_xticks", "(", "list", "(", "range", "(", "len", "(", "feature_list", ")", ")", ")", ")", "ax1", ".", "set_xticklabels", "(", "[", "feature_list", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "feature_list", ")", ")", "]", ",", "rotation", "=", "90", ")", "ax1", ".", "set_yticks", "(", "list", "(", "range", "(", "len", "(", "feature_list", ")", ")", ")", ")", "ax1", ".", "set_yticklabels", "(", "[", "feature_list", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "feature_list", ")", ")", "]", ")", "cax", "=", "ax1", ".", "imshow", "(", "correlation_matrix", ",", "interpolation", "=", "\"nearest\"", ",", "cmap", "=", "cm", ".", "get_cmap", "(", "\"jet\"", ",", "30", ")", ")", "chart", ".", "get_fig", "(", ")", ".", "colorbar", "(", "cax", ",", "ticks", "=", "np", ".", "linspace", "(", "-", "1", ",", "1", ",", "21", ")", ")", "plt", ".", "gcf", "(", ")", ".", "subplots_adjust", "(", "bottom", "=", "0.25", ")" ]
Creates plot for correlation matrix :param correlation_matrix: Correlation matrix of features :param title: Title of plot :param feature_list: List of names of features :return: Shows the given correlation matrix as image
[ "Creates", "plot", "for", "correlation", "matrix" ]
python
train
42.095238
marcomusy/vtkplotter
vtkplotter/vtkio.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/vtkio.py#L313-L354
def loadOFF(filename, c="gold", alpha=1, wire=False, bc=None): """Read OFF file format.""" if not os.path.exists(filename): colors.printc("~noentry Error in loadOFF: Cannot find", filename, c=1) return None f = open(filename, "r") lines = f.readlines() f.close() vertices = [] faces = [] NumberOfVertices = None i = -1 for text in lines: if len(text) == 0: continue if text == '\n': continue if "#" in text: continue if "OFF" in text: continue ts = text.split() n = len(ts) if not NumberOfVertices and n > 1: NumberOfVertices, NumberOfFaces = int(ts[0]), int(ts[1]) continue i += 1 if i < NumberOfVertices and n == 3: x, y, z = float(ts[0]), float(ts[1]), float(ts[2]) vertices.append([x, y, z]) ids = [] if NumberOfVertices <= i < (NumberOfVertices + NumberOfFaces + 1) and n > 2: ids += [int(x) for x in ts[1:]] faces.append(ids) return Actor(buildPolyData(vertices, faces), c, alpha, wire, bc)
[ "def", "loadOFF", "(", "filename", ",", "c", "=", "\"gold\"", ",", "alpha", "=", "1", ",", "wire", "=", "False", ",", "bc", "=", "None", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "colors", ".", "printc", "(", "\"~noentry Error in loadOFF: Cannot find\"", ",", "filename", ",", "c", "=", "1", ")", "return", "None", "f", "=", "open", "(", "filename", ",", "\"r\"", ")", "lines", "=", "f", ".", "readlines", "(", ")", "f", ".", "close", "(", ")", "vertices", "=", "[", "]", "faces", "=", "[", "]", "NumberOfVertices", "=", "None", "i", "=", "-", "1", "for", "text", "in", "lines", ":", "if", "len", "(", "text", ")", "==", "0", ":", "continue", "if", "text", "==", "'\\n'", ":", "continue", "if", "\"#\"", "in", "text", ":", "continue", "if", "\"OFF\"", "in", "text", ":", "continue", "ts", "=", "text", ".", "split", "(", ")", "n", "=", "len", "(", "ts", ")", "if", "not", "NumberOfVertices", "and", "n", ">", "1", ":", "NumberOfVertices", ",", "NumberOfFaces", "=", "int", "(", "ts", "[", "0", "]", ")", ",", "int", "(", "ts", "[", "1", "]", ")", "continue", "i", "+=", "1", "if", "i", "<", "NumberOfVertices", "and", "n", "==", "3", ":", "x", ",", "y", ",", "z", "=", "float", "(", "ts", "[", "0", "]", ")", ",", "float", "(", "ts", "[", "1", "]", ")", ",", "float", "(", "ts", "[", "2", "]", ")", "vertices", ".", "append", "(", "[", "x", ",", "y", ",", "z", "]", ")", "ids", "=", "[", "]", "if", "NumberOfVertices", "<=", "i", "<", "(", "NumberOfVertices", "+", "NumberOfFaces", "+", "1", ")", "and", "n", ">", "2", ":", "ids", "+=", "[", "int", "(", "x", ")", "for", "x", "in", "ts", "[", "1", ":", "]", "]", "faces", ".", "append", "(", "ids", ")", "return", "Actor", "(", "buildPolyData", "(", "vertices", ",", "faces", ")", ",", "c", ",", "alpha", ",", "wire", ",", "bc", ")" ]
Read OFF file format.
[ "Read", "OFF", "file", "format", "." ]
python
train
26.904762
TUT-ARG/sed_eval
sed_eval/sound_event.py
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/sound_event.py#L1633-L1668
def validate_offset(reference_event, estimated_event, t_collar=0.200, percentage_of_length=0.5): """Validate estimated event based on event offset Parameters ---------- reference_event : dict Reference event. estimated_event : dict Estimated event. t_collar : float > 0, seconds First condition, Time collar with which the estimated offset has to be in order to be consider valid estimation. Default value 0.2 percentage_of_length : float in [0, 1] Second condition, percentage of the length within which the estimated offset has to be in order to be consider valid estimation. Default value 0.5 Returns ------- bool """ # Detect field naming style used and validate onset if 'event_offset' in reference_event and 'event_offset' in estimated_event: annotated_length = reference_event['event_offset'] - reference_event['event_onset'] return math.fabs(reference_event['event_offset'] - estimated_event['event_offset']) <= max(t_collar, percentage_of_length * annotated_length) elif 'offset' in reference_event and 'offset' in estimated_event: annotated_length = reference_event['offset'] - reference_event['onset'] return math.fabs(reference_event['offset'] - estimated_event['offset']) <= max(t_collar, percentage_of_length * annotated_length)
[ "def", "validate_offset", "(", "reference_event", ",", "estimated_event", ",", "t_collar", "=", "0.200", ",", "percentage_of_length", "=", "0.5", ")", ":", "# Detect field naming style used and validate onset", "if", "'event_offset'", "in", "reference_event", "and", "'event_offset'", "in", "estimated_event", ":", "annotated_length", "=", "reference_event", "[", "'event_offset'", "]", "-", "reference_event", "[", "'event_onset'", "]", "return", "math", ".", "fabs", "(", "reference_event", "[", "'event_offset'", "]", "-", "estimated_event", "[", "'event_offset'", "]", ")", "<=", "max", "(", "t_collar", ",", "percentage_of_length", "*", "annotated_length", ")", "elif", "'offset'", "in", "reference_event", "and", "'offset'", "in", "estimated_event", ":", "annotated_length", "=", "reference_event", "[", "'offset'", "]", "-", "reference_event", "[", "'onset'", "]", "return", "math", ".", "fabs", "(", "reference_event", "[", "'offset'", "]", "-", "estimated_event", "[", "'offset'", "]", ")", "<=", "max", "(", "t_collar", ",", "percentage_of_length", "*", "annotated_length", ")" ]
Validate estimated event based on event offset Parameters ---------- reference_event : dict Reference event. estimated_event : dict Estimated event. t_collar : float > 0, seconds First condition, Time collar with which the estimated offset has to be in order to be consider valid estimation. Default value 0.2 percentage_of_length : float in [0, 1] Second condition, percentage of the length within which the estimated offset has to be in order to be consider valid estimation. Default value 0.5 Returns ------- bool
[ "Validate", "estimated", "event", "based", "on", "event", "offset" ]
python
train
40.583333
eqcorrscan/EQcorrscan
eqcorrscan/utils/clustering.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/clustering.py#L574-L793
def extract_detections(detections, templates, archive, arc_type, extract_len=90.0, outdir=None, extract_Z=True, additional_stations=[]): """ Extract waveforms associated with detections Takes a list of detections for the template, template. Waveforms will be returned as a list of :class:`obspy.core.stream.Stream` containing segments of extract_len. They will also be saved if outdir is set. The default is unset. The default extract_len is 90 seconds per channel. :type detections: list :param detections: List of :class:`eqcorrscan.core.match_filter.Detection`. :type templates: list :param templates: A list of tuples of the template name and the template Stream used to detect detections. :type archive: str :param archive: Either name of archive or path to continuous data, see :func:`eqcorrscan.utils.archive_read` for details :type arc_type: str :param arc_type: Type of archive, either seishub, FDSN, day_vols :type extract_len: float :param extract_len: Length to extract around the detection (will be equally cut around the detection time) in seconds. Default is 90.0. :type outdir: str :param outdir: Default is None, with None set, no files will be saved, if set each detection will be saved into this directory with files named according to the detection time, NOT than the waveform start time. Detections will be saved into template subdirectories. Files written will be multiplexed miniseed files, the encoding will be chosen automatically and will likely be float. :type extract_Z: bool :param extract_Z: Set to True to also extract Z channels for detections delays will be the same as horizontal channels, only applies if only horizontal channels were used in the template. :type additional_stations: list :param additional_stations: List of tuples of (station, channel) to also extract data for using an average delay. :returns: list of :class:`obspy.core.streams.Stream` :rtype: list .. rubric: Example >>> from eqcorrscan.utils.clustering import extract_detections >>> from eqcorrscan.core.match_filter import Detection >>> from obspy import read, UTCDateTime >>> # Get the path to the test data >>> import eqcorrscan >>> import os >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> # Use some dummy detections, you would use real one >>> detections = [Detection( ... template_name='temp1', detect_time=UTCDateTime(2012, 3, 26, 9, 15), ... no_chans=2, chans=['WHYM', 'EORO'], detect_val=2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', threshold_input=8.0), ... Detection( ... template_name='temp2', detect_time=UTCDateTime(2012, 3, 26, 18, 5), ... no_chans=2, chans=['WHYM', 'EORO'], detect_val=2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', threshold_input=8.0)] >>> archive = os.path.join(TEST_PATH, 'day_vols') >>> template_files = [os.path.join(TEST_PATH, 'temp1.ms'), ... os.path.join(TEST_PATH, 'temp2.ms')] >>> templates = [('temp' + str(i), read(filename)) ... for i, filename in enumerate(template_files)] >>> extracted = extract_detections(detections, templates, ... archive=archive, arc_type='day_vols') Working on detections for day: 2012-03-26T00:00:00.000000Z Cutting for detections at: 2012/03/26 09:15:00 Cutting for detections at: 2012/03/26 18:05:00 >>> print(extracted[0].sort()) 2 Trace(s) in Stream: AF.EORO..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples AF.WHYM..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples >>> print(extracted[1].sort()) 2 Trace(s) in Stream: AF.EORO..SHZ | 2012-03-26T18:04:15.000000Z - 2012-03-26T18:05:45.000000Z |\ 1.0 Hz, 91 samples AF.WHYM..SHZ | 2012-03-26T18:04:15.000000Z - 2012-03-26T18:05:45.000000Z |\ 1.0 Hz, 91 samples >>> # Extract from stations not included in the detections >>> extracted = extract_detections( ... detections, templates, archive=archive, arc_type='day_vols', ... additional_stations=[('GOVA', 'SHZ')]) Adding additional stations Added station GOVA.SHZ Added station GOVA.SHZ Working on detections for day: 2012-03-26T00:00:00.000000Z Cutting for detections at: 2012/03/26 09:15:00 Cutting for detections at: 2012/03/26 18:05:00 >>> print(extracted[0].sort()) 3 Trace(s) in Stream: AF.EORO..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples AF.GOVA..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples AF.WHYM..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples >>> # The detections can be saved to a file: >>> extract_detections(detections, templates, archive=archive, ... arc_type='day_vols', ... additional_stations=[('GOVA', 'SHZ')], outdir='.') Adding additional stations Added station GOVA.SHZ Added station GOVA.SHZ Working on detections for day: 2012-03-26T00:00:00.000000Z Cutting for detections at: 2012/03/26 09:15:00 Written file: ./temp1/2012-03-26_09-15-00.ms Cutting for detections at: 2012/03/26 18:05:00 Written file: ./temp2/2012-03-26_18-05-00.ms """ # Sort the template according to start-times, needed so that stachan[i] # corresponds to delays[i] all_delays = [] # List of tuples of template name, delays all_stachans = [] for template in templates: templatestream = template[1].sort(['starttime']) stachans = [(tr.stats.station, tr.stats.channel) for tr in templatestream] mintime = templatestream[0].stats.starttime delays = [tr.stats.starttime - mintime for tr in templatestream] all_delays.append((template[0], delays)) all_stachans.append((template[0], stachans)) # Sort the detections and group by day detections.sort(key=lambda d: d.detect_time) detection_days = [detection.detect_time.date for detection in detections] detection_days = list(set(detection_days)) detection_days.sort() detection_days = [UTCDateTime(d) for d in detection_days] # Initialize output list detection_wavefiles = [] # Also include Z channels when extracting detections if extract_Z: new_all_stachans = [] new_all_delays = [] for t, template in enumerate(all_stachans): stachans = template[1] delays = all_delays[t][1] new_stachans = [] new_delays = [] j = 0 for i, stachan in enumerate(stachans): if j == 1: new_stachans.append((stachan[0], stachan[1][0] + 'Z')) new_delays.append(delays[i]) new_stachans.append(stachan) new_delays.append(delays[i]) j = 0 else: new_stachans.append(stachan) new_delays.append(delays[i]) j += 1 new_all_stachans.append((template[0], new_stachans)) new_all_delays.append((template[0], new_delays)) all_delays = new_all_delays all_stachans = new_all_stachans if not len(additional_stations) == 0: print('Adding additional stations') for t, template in enumerate(all_stachans): av_delay = np.mean(all_delays[t][1]) for sta in additional_stations: if sta not in template[1]: print('Added station ' + '.'.join(sta)) template[1].append(sta) all_delays[t][1].append(av_delay) del stachans # Loop through the days for detection_day in detection_days: print('Working on detections for day: ' + str(detection_day)) stachans = list(set([stachans[1] for stachans in all_stachans][0])) # List of all unique stachans - read in all data st = read_data(archive=archive, arc_type=arc_type, day=detection_day, stachans=stachans) st.merge(fill_value='interpolate') day_detections = [detection for detection in detections if UTCDateTime(detection.detect_time.date) == detection_day] del stachans, delays for detection in day_detections: print('Cutting for detections at: ' + detection.detect_time.strftime('%Y/%m/%d %H:%M:%S')) detect_wav = st.copy() for tr in detect_wav: t1 = UTCDateTime(detection.detect_time) - extract_len / 2 t2 = UTCDateTime(detection.detect_time) + extract_len / 2 tr.trim(starttime=t1, endtime=t2) if outdir: if not os.path.isdir(os.path.join(outdir, detection.template_name)): os.makedirs(os.path.join(outdir, detection.template_name)) detect_wav.write(os.path.join(outdir, detection.template_name, detection.detect_time. strftime('%Y-%m-%d_%H-%M-%S') + '.ms'), format='MSEED') print('Written file: %s' % '/'.join([outdir, detection.template_name, detection.detect_time. strftime('%Y-%m-%d_%H-%M-%S') + '.ms'])) if not outdir: detection_wavefiles.append(detect_wav) del detect_wav del st if outdir: detection_wavefiles = [] if not outdir: return detection_wavefiles else: return
[ "def", "extract_detections", "(", "detections", ",", "templates", ",", "archive", ",", "arc_type", ",", "extract_len", "=", "90.0", ",", "outdir", "=", "None", ",", "extract_Z", "=", "True", ",", "additional_stations", "=", "[", "]", ")", ":", "# Sort the template according to start-times, needed so that stachan[i]", "# corresponds to delays[i]", "all_delays", "=", "[", "]", "# List of tuples of template name, delays", "all_stachans", "=", "[", "]", "for", "template", "in", "templates", ":", "templatestream", "=", "template", "[", "1", "]", ".", "sort", "(", "[", "'starttime'", "]", ")", "stachans", "=", "[", "(", "tr", ".", "stats", ".", "station", ",", "tr", ".", "stats", ".", "channel", ")", "for", "tr", "in", "templatestream", "]", "mintime", "=", "templatestream", "[", "0", "]", ".", "stats", ".", "starttime", "delays", "=", "[", "tr", ".", "stats", ".", "starttime", "-", "mintime", "for", "tr", "in", "templatestream", "]", "all_delays", ".", "append", "(", "(", "template", "[", "0", "]", ",", "delays", ")", ")", "all_stachans", ".", "append", "(", "(", "template", "[", "0", "]", ",", "stachans", ")", ")", "# Sort the detections and group by day", "detections", ".", "sort", "(", "key", "=", "lambda", "d", ":", "d", ".", "detect_time", ")", "detection_days", "=", "[", "detection", ".", "detect_time", ".", "date", "for", "detection", "in", "detections", "]", "detection_days", "=", "list", "(", "set", "(", "detection_days", ")", ")", "detection_days", ".", "sort", "(", ")", "detection_days", "=", "[", "UTCDateTime", "(", "d", ")", "for", "d", "in", "detection_days", "]", "# Initialize output list", "detection_wavefiles", "=", "[", "]", "# Also include Z channels when extracting detections", "if", "extract_Z", ":", "new_all_stachans", "=", "[", "]", "new_all_delays", "=", "[", "]", "for", "t", ",", "template", "in", "enumerate", "(", "all_stachans", ")", ":", "stachans", "=", "template", "[", "1", "]", "delays", "=", "all_delays", "[", "t", "]", "[", "1", "]", "new_stachans", "=", "[", "]", "new_delays", "=", "[", "]", "j", "=", "0", "for", "i", ",", "stachan", "in", "enumerate", "(", "stachans", ")", ":", "if", "j", "==", "1", ":", "new_stachans", ".", "append", "(", "(", "stachan", "[", "0", "]", ",", "stachan", "[", "1", "]", "[", "0", "]", "+", "'Z'", ")", ")", "new_delays", ".", "append", "(", "delays", "[", "i", "]", ")", "new_stachans", ".", "append", "(", "stachan", ")", "new_delays", ".", "append", "(", "delays", "[", "i", "]", ")", "j", "=", "0", "else", ":", "new_stachans", ".", "append", "(", "stachan", ")", "new_delays", ".", "append", "(", "delays", "[", "i", "]", ")", "j", "+=", "1", "new_all_stachans", ".", "append", "(", "(", "template", "[", "0", "]", ",", "new_stachans", ")", ")", "new_all_delays", ".", "append", "(", "(", "template", "[", "0", "]", ",", "new_delays", ")", ")", "all_delays", "=", "new_all_delays", "all_stachans", "=", "new_all_stachans", "if", "not", "len", "(", "additional_stations", ")", "==", "0", ":", "print", "(", "'Adding additional stations'", ")", "for", "t", ",", "template", "in", "enumerate", "(", "all_stachans", ")", ":", "av_delay", "=", "np", ".", "mean", "(", "all_delays", "[", "t", "]", "[", "1", "]", ")", "for", "sta", "in", "additional_stations", ":", "if", "sta", "not", "in", "template", "[", "1", "]", ":", "print", "(", "'Added station '", "+", "'.'", ".", "join", "(", "sta", ")", ")", "template", "[", "1", "]", ".", "append", "(", "sta", ")", "all_delays", "[", "t", "]", "[", "1", "]", ".", "append", "(", "av_delay", ")", "del", "stachans", "# Loop through the days", "for", "detection_day", "in", "detection_days", ":", "print", "(", "'Working on detections for day: '", "+", "str", "(", "detection_day", ")", ")", "stachans", "=", "list", "(", "set", "(", "[", "stachans", "[", "1", "]", "for", "stachans", "in", "all_stachans", "]", "[", "0", "]", ")", ")", "# List of all unique stachans - read in all data", "st", "=", "read_data", "(", "archive", "=", "archive", ",", "arc_type", "=", "arc_type", ",", "day", "=", "detection_day", ",", "stachans", "=", "stachans", ")", "st", ".", "merge", "(", "fill_value", "=", "'interpolate'", ")", "day_detections", "=", "[", "detection", "for", "detection", "in", "detections", "if", "UTCDateTime", "(", "detection", ".", "detect_time", ".", "date", ")", "==", "detection_day", "]", "del", "stachans", ",", "delays", "for", "detection", "in", "day_detections", ":", "print", "(", "'Cutting for detections at: '", "+", "detection", ".", "detect_time", ".", "strftime", "(", "'%Y/%m/%d %H:%M:%S'", ")", ")", "detect_wav", "=", "st", ".", "copy", "(", ")", "for", "tr", "in", "detect_wav", ":", "t1", "=", "UTCDateTime", "(", "detection", ".", "detect_time", ")", "-", "extract_len", "/", "2", "t2", "=", "UTCDateTime", "(", "detection", ".", "detect_time", ")", "+", "extract_len", "/", "2", "tr", ".", "trim", "(", "starttime", "=", "t1", ",", "endtime", "=", "t2", ")", "if", "outdir", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "detection", ".", "template_name", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "detection", ".", "template_name", ")", ")", "detect_wav", ".", "write", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "detection", ".", "template_name", ",", "detection", ".", "detect_time", ".", "strftime", "(", "'%Y-%m-%d_%H-%M-%S'", ")", "+", "'.ms'", ")", ",", "format", "=", "'MSEED'", ")", "print", "(", "'Written file: %s'", "%", "'/'", ".", "join", "(", "[", "outdir", ",", "detection", ".", "template_name", ",", "detection", ".", "detect_time", ".", "strftime", "(", "'%Y-%m-%d_%H-%M-%S'", ")", "+", "'.ms'", "]", ")", ")", "if", "not", "outdir", ":", "detection_wavefiles", ".", "append", "(", "detect_wav", ")", "del", "detect_wav", "del", "st", "if", "outdir", ":", "detection_wavefiles", "=", "[", "]", "if", "not", "outdir", ":", "return", "detection_wavefiles", "else", ":", "return" ]
Extract waveforms associated with detections Takes a list of detections for the template, template. Waveforms will be returned as a list of :class:`obspy.core.stream.Stream` containing segments of extract_len. They will also be saved if outdir is set. The default is unset. The default extract_len is 90 seconds per channel. :type detections: list :param detections: List of :class:`eqcorrscan.core.match_filter.Detection`. :type templates: list :param templates: A list of tuples of the template name and the template Stream used to detect detections. :type archive: str :param archive: Either name of archive or path to continuous data, see :func:`eqcorrscan.utils.archive_read` for details :type arc_type: str :param arc_type: Type of archive, either seishub, FDSN, day_vols :type extract_len: float :param extract_len: Length to extract around the detection (will be equally cut around the detection time) in seconds. Default is 90.0. :type outdir: str :param outdir: Default is None, with None set, no files will be saved, if set each detection will be saved into this directory with files named according to the detection time, NOT than the waveform start time. Detections will be saved into template subdirectories. Files written will be multiplexed miniseed files, the encoding will be chosen automatically and will likely be float. :type extract_Z: bool :param extract_Z: Set to True to also extract Z channels for detections delays will be the same as horizontal channels, only applies if only horizontal channels were used in the template. :type additional_stations: list :param additional_stations: List of tuples of (station, channel) to also extract data for using an average delay. :returns: list of :class:`obspy.core.streams.Stream` :rtype: list .. rubric: Example >>> from eqcorrscan.utils.clustering import extract_detections >>> from eqcorrscan.core.match_filter import Detection >>> from obspy import read, UTCDateTime >>> # Get the path to the test data >>> import eqcorrscan >>> import os >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> # Use some dummy detections, you would use real one >>> detections = [Detection( ... template_name='temp1', detect_time=UTCDateTime(2012, 3, 26, 9, 15), ... no_chans=2, chans=['WHYM', 'EORO'], detect_val=2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', threshold_input=8.0), ... Detection( ... template_name='temp2', detect_time=UTCDateTime(2012, 3, 26, 18, 5), ... no_chans=2, chans=['WHYM', 'EORO'], detect_val=2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', threshold_input=8.0)] >>> archive = os.path.join(TEST_PATH, 'day_vols') >>> template_files = [os.path.join(TEST_PATH, 'temp1.ms'), ... os.path.join(TEST_PATH, 'temp2.ms')] >>> templates = [('temp' + str(i), read(filename)) ... for i, filename in enumerate(template_files)] >>> extracted = extract_detections(detections, templates, ... archive=archive, arc_type='day_vols') Working on detections for day: 2012-03-26T00:00:00.000000Z Cutting for detections at: 2012/03/26 09:15:00 Cutting for detections at: 2012/03/26 18:05:00 >>> print(extracted[0].sort()) 2 Trace(s) in Stream: AF.EORO..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples AF.WHYM..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples >>> print(extracted[1].sort()) 2 Trace(s) in Stream: AF.EORO..SHZ | 2012-03-26T18:04:15.000000Z - 2012-03-26T18:05:45.000000Z |\ 1.0 Hz, 91 samples AF.WHYM..SHZ | 2012-03-26T18:04:15.000000Z - 2012-03-26T18:05:45.000000Z |\ 1.0 Hz, 91 samples >>> # Extract from stations not included in the detections >>> extracted = extract_detections( ... detections, templates, archive=archive, arc_type='day_vols', ... additional_stations=[('GOVA', 'SHZ')]) Adding additional stations Added station GOVA.SHZ Added station GOVA.SHZ Working on detections for day: 2012-03-26T00:00:00.000000Z Cutting for detections at: 2012/03/26 09:15:00 Cutting for detections at: 2012/03/26 18:05:00 >>> print(extracted[0].sort()) 3 Trace(s) in Stream: AF.EORO..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples AF.GOVA..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples AF.WHYM..SHZ | 2012-03-26T09:14:15.000000Z - 2012-03-26T09:15:45.000000Z |\ 1.0 Hz, 91 samples >>> # The detections can be saved to a file: >>> extract_detections(detections, templates, archive=archive, ... arc_type='day_vols', ... additional_stations=[('GOVA', 'SHZ')], outdir='.') Adding additional stations Added station GOVA.SHZ Added station GOVA.SHZ Working on detections for day: 2012-03-26T00:00:00.000000Z Cutting for detections at: 2012/03/26 09:15:00 Written file: ./temp1/2012-03-26_09-15-00.ms Cutting for detections at: 2012/03/26 18:05:00 Written file: ./temp2/2012-03-26_18-05-00.ms
[ "Extract", "waveforms", "associated", "with", "detections" ]
python
train
45.736364
nicolargo/glances
glances/plugins/glances_irq.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_irq.py#L165-L179
def __sum(self, line): """Return the IRQ sum number. IRQ line samples: 1: 44487 341 44 72 IO-APIC 1-edge i8042 LOC: 33549868 22394684 32474570 21855077 Local timer interrupts FIQ: usb_fiq """ splitted_line = line.split() try: ret = sum(map(int, splitted_line[1:(self.cpu_number + 1)])) except ValueError: # Correct issue #1007 on some conf (Raspberry Pi with Raspbian) ret = 0 return ret
[ "def", "__sum", "(", "self", ",", "line", ")", ":", "splitted_line", "=", "line", ".", "split", "(", ")", "try", ":", "ret", "=", "sum", "(", "map", "(", "int", ",", "splitted_line", "[", "1", ":", "(", "self", ".", "cpu_number", "+", "1", ")", "]", ")", ")", "except", "ValueError", ":", "# Correct issue #1007 on some conf (Raspberry Pi with Raspbian)", "ret", "=", "0", "return", "ret" ]
Return the IRQ sum number. IRQ line samples: 1: 44487 341 44 72 IO-APIC 1-edge i8042 LOC: 33549868 22394684 32474570 21855077 Local timer interrupts FIQ: usb_fiq
[ "Return", "the", "IRQ", "sum", "number", "." ]
python
train
35.866667
noxdafox/clipspy
clips/classes.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L294-L296
def watch_instances(self, flag): """Whether or not the Class Instances are being watched.""" lib.EnvSetDefclassWatchInstances(self._env, int(flag), self._cls)
[ "def", "watch_instances", "(", "self", ",", "flag", ")", ":", "lib", ".", "EnvSetDefclassWatchInstances", "(", "self", ".", "_env", ",", "int", "(", "flag", ")", ",", "self", ".", "_cls", ")" ]
Whether or not the Class Instances are being watched.
[ "Whether", "or", "not", "the", "Class", "Instances", "are", "being", "watched", "." ]
python
train
57.333333
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L949-L967
def Value(self, p): """Returns InverseCDF(p), the value that corresponds to probability p. Args: p: number in the range [0, 1] Returns: number value """ if p < 0 or p > 1: raise ValueError('Probability p must be in range [0, 1]') if p == 0: return self.xs[0] if p == 1: return self.xs[-1] index = bisect.bisect(self.ps, p) if p == self.ps[index - 1]: return self.xs[index - 1] else: return self.xs[index]
[ "def", "Value", "(", "self", ",", "p", ")", ":", "if", "p", "<", "0", "or", "p", ">", "1", ":", "raise", "ValueError", "(", "'Probability p must be in range [0, 1]'", ")", "if", "p", "==", "0", ":", "return", "self", ".", "xs", "[", "0", "]", "if", "p", "==", "1", ":", "return", "self", ".", "xs", "[", "-", "1", "]", "index", "=", "bisect", ".", "bisect", "(", "self", ".", "ps", ",", "p", ")", "if", "p", "==", "self", ".", "ps", "[", "index", "-", "1", "]", ":", "return", "self", ".", "xs", "[", "index", "-", "1", "]", "else", ":", "return", "self", ".", "xs", "[", "index", "]" ]
Returns InverseCDF(p), the value that corresponds to probability p. Args: p: number in the range [0, 1] Returns: number value
[ "Returns", "InverseCDF", "(", "p", ")", "the", "value", "that", "corresponds", "to", "probability", "p", "." ]
python
train
27.842105
heikomuller/sco-datastore
scodata/subject.py
https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/subject.py#L193-L213
def upload_file(self, filename, file_type=FILE_TYPE_FREESURFER_DIRECTORY): """Create an anatomy object on local disk from the given file. Currently, only Freesurfer anatomy directories are supported. Expects a tar file. Parameters ---------- filename : string Name of the (uploaded) file file_type : string File type (currently expects FILE_TYPE_FREESURFER_DIRECTORY) Returns ------- SubjectHandle Handle for created subject in database """ # We currently only support one file type (i.e., FREESURFER_DIRECTORY). if file_type != FILE_TYPE_FREESURFER_DIRECTORY: raise ValueError('Unsupported file type: ' + file_type) return self.upload_freesurfer_archive(filename)
[ "def", "upload_file", "(", "self", ",", "filename", ",", "file_type", "=", "FILE_TYPE_FREESURFER_DIRECTORY", ")", ":", "# We currently only support one file type (i.e., FREESURFER_DIRECTORY).", "if", "file_type", "!=", "FILE_TYPE_FREESURFER_DIRECTORY", ":", "raise", "ValueError", "(", "'Unsupported file type: '", "+", "file_type", ")", "return", "self", ".", "upload_freesurfer_archive", "(", "filename", ")" ]
Create an anatomy object on local disk from the given file. Currently, only Freesurfer anatomy directories are supported. Expects a tar file. Parameters ---------- filename : string Name of the (uploaded) file file_type : string File type (currently expects FILE_TYPE_FREESURFER_DIRECTORY) Returns ------- SubjectHandle Handle for created subject in database
[ "Create", "an", "anatomy", "object", "on", "local", "disk", "from", "the", "given", "file", ".", "Currently", "only", "Freesurfer", "anatomy", "directories", "are", "supported", ".", "Expects", "a", "tar", "file", "." ]
python
train
38.380952
mcs07/CIRpy
cirpy.py
https://github.com/mcs07/CIRpy/blob/fee2bbbb08eb39bbbe003f835d64e8c0c1688904/cirpy.py#L441-L448
def download(self, filename, representation, overwrite=False): """Download the resolved structure as a file. :param string filename: File path to save to :param string representation: Desired output representation :param bool overwrite: (Optional) Whether to allow overwriting of an existing file """ download(self.input, filename, representation, overwrite, self.resolvers, self.get3d, **self.kwargs)
[ "def", "download", "(", "self", ",", "filename", ",", "representation", ",", "overwrite", "=", "False", ")", ":", "download", "(", "self", ".", "input", ",", "filename", ",", "representation", ",", "overwrite", ",", "self", ".", "resolvers", ",", "self", ".", "get3d", ",", "*", "*", "self", ".", "kwargs", ")" ]
Download the resolved structure as a file. :param string filename: File path to save to :param string representation: Desired output representation :param bool overwrite: (Optional) Whether to allow overwriting of an existing file
[ "Download", "the", "resolved", "structure", "as", "a", "file", "." ]
python
train
55.375
genepattern/genepattern-python
gp/core.py
https://github.com/genepattern/genepattern-python/blob/9478ea65362b91c72a94f7300c3de8d710bebb71/gp/core.py#L212-L228
def open(self): """ Opens the URL associated with the GPFile and returns a file-like object with three extra methods: * geturl() - return the ultimate URL (can be used to determine if a redirect was followed) * info() - return the meta-information of the page, such as headers * getcode() - return the HTTP status code of the response """ request = urllib.request.Request(self.uri) if self.server_data.authorization_header() is not None: request.add_header('Authorization', self.server_data.authorization_header()) request.add_header('User-Agent', 'GenePatternRest') return urllib.request.urlopen(request)
[ "def", "open", "(", "self", ")", ":", "request", "=", "urllib", ".", "request", ".", "Request", "(", "self", ".", "uri", ")", "if", "self", ".", "server_data", ".", "authorization_header", "(", ")", "is", "not", "None", ":", "request", ".", "add_header", "(", "'Authorization'", ",", "self", ".", "server_data", ".", "authorization_header", "(", ")", ")", "request", ".", "add_header", "(", "'User-Agent'", ",", "'GenePatternRest'", ")", "return", "urllib", ".", "request", ".", "urlopen", "(", "request", ")" ]
Opens the URL associated with the GPFile and returns a file-like object with three extra methods: * geturl() - return the ultimate URL (can be used to determine if a redirect was followed) * info() - return the meta-information of the page, such as headers * getcode() - return the HTTP status code of the response
[ "Opens", "the", "URL", "associated", "with", "the", "GPFile", "and", "returns", "a", "file", "-", "like", "object", "with", "three", "extra", "methods", ":" ]
python
train
42.352941
saltstack/salt
salt/modules/boto_s3_bucket.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_s3_bucket.py#L348-L373
def list(region=None, key=None, keyid=None, profile=None): ''' List all buckets owned by the authenticated sender of the request. Returns list of buckets CLI Example: .. code-block:: yaml Owner: {...} Buckets: - {...} - {...} ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) buckets = conn.list_buckets() if not bool(buckets.get('Buckets')): log.warning('No buckets found') if 'ResponseMetadata' in buckets: del buckets['ResponseMetadata'] return buckets except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
[ "def", "list", "(", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "buckets", "=", "conn", ".", "list_buckets", "(", ")", "if", "not", "bool", "(", "buckets", ".", "get", "(", "'Buckets'", ")", ")", ":", "log", ".", "warning", "(", "'No buckets found'", ")", "if", "'ResponseMetadata'", "in", "buckets", ":", "del", "buckets", "[", "'ResponseMetadata'", "]", "return", "buckets", "except", "ClientError", "as", "e", ":", "return", "{", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
List all buckets owned by the authenticated sender of the request. Returns list of buckets CLI Example: .. code-block:: yaml Owner: {...} Buckets: - {...} - {...}
[ "List", "all", "buckets", "owned", "by", "the", "authenticated", "sender", "of", "the", "request", "." ]
python
train
26.153846
zyga/python-glibc
tempfile_ext.py
https://github.com/zyga/python-glibc/blob/d6fdb306b123a995471584a5201155c60a34448a/tempfile_ext.py#L279-L315
def _get_default_tempdir(): """Calculate the default directory to use for temporary files. This routine should be called exactly once. We determine whether or not a candidate temp dir is usable by trying to create and write to a file in that directory. If this is successful, the test file is deleted. To prevent denial of service, the name of the test file must be randomized.""" namer = _RandomNameSequence() dirlist = _candidate_tempdir_list() for dir in dirlist: if dir != _os.curdir: dir = _os.path.abspath(dir) # Try only a few names per directory. for seq in range(100): name = next(namer) filename = _os.path.join(dir, name) try: fd = _os.open(filename, _bin_openflags, 0o600) try: try: with _io.open(fd, 'wb', closefd=False) as fp: fp.write(b'blat') finally: _os.close(fd) finally: _os.unlink(filename) return dir except FileExistsError: pass except OSError: break # no point trying more names in this directory raise FileNotFoundError(_errno.ENOENT, "No usable temporary directory found in %s" % dirlist)
[ "def", "_get_default_tempdir", "(", ")", ":", "namer", "=", "_RandomNameSequence", "(", ")", "dirlist", "=", "_candidate_tempdir_list", "(", ")", "for", "dir", "in", "dirlist", ":", "if", "dir", "!=", "_os", ".", "curdir", ":", "dir", "=", "_os", ".", "path", ".", "abspath", "(", "dir", ")", "# Try only a few names per directory.", "for", "seq", "in", "range", "(", "100", ")", ":", "name", "=", "next", "(", "namer", ")", "filename", "=", "_os", ".", "path", ".", "join", "(", "dir", ",", "name", ")", "try", ":", "fd", "=", "_os", ".", "open", "(", "filename", ",", "_bin_openflags", ",", "0o600", ")", "try", ":", "try", ":", "with", "_io", ".", "open", "(", "fd", ",", "'wb'", ",", "closefd", "=", "False", ")", "as", "fp", ":", "fp", ".", "write", "(", "b'blat'", ")", "finally", ":", "_os", ".", "close", "(", "fd", ")", "finally", ":", "_os", ".", "unlink", "(", "filename", ")", "return", "dir", "except", "FileExistsError", ":", "pass", "except", "OSError", ":", "break", "# no point trying more names in this directory", "raise", "FileNotFoundError", "(", "_errno", ".", "ENOENT", ",", "\"No usable temporary directory found in %s\"", "%", "dirlist", ")" ]
Calculate the default directory to use for temporary files. This routine should be called exactly once. We determine whether or not a candidate temp dir is usable by trying to create and write to a file in that directory. If this is successful, the test file is deleted. To prevent denial of service, the name of the test file must be randomized.
[ "Calculate", "the", "default", "directory", "to", "use", "for", "temporary", "files", ".", "This", "routine", "should", "be", "called", "exactly", "once", "." ]
python
train
38.081081
timothycrosley/isort
isort/isort.py
https://github.com/timothycrosley/isort/blob/493c02a1a000fe782cec56f1f43262bacb316381/isort/isort.py#L139-L143
def _get_line(self) -> str: """Returns the current line from the file while incrementing the index.""" line = self.in_lines[self.index] self.index += 1 return line
[ "def", "_get_line", "(", "self", ")", "->", "str", ":", "line", "=", "self", ".", "in_lines", "[", "self", ".", "index", "]", "self", ".", "index", "+=", "1", "return", "line" ]
Returns the current line from the file while incrementing the index.
[ "Returns", "the", "current", "line", "from", "the", "file", "while", "incrementing", "the", "index", "." ]
python
train
38.2
Kortemme-Lab/klab
klab/biblio/doi.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/biblio/doi.py#L398-L441
def to_dict(self): '''A representation of that publication data that matches the schema we use in our databases.''' if not self.record_type == 'journal': # todo: it may be worthwhile creating subclasses for each entry type (journal, conference, etc.) with a common # API e.g. to_json which creates output appropriately raise Exception('This function has only been tested on journal entries at present.') author_list = [] authors = self.article.get('authors', []) for x in range(len(authors)): author = authors[x] first_name = None middle_names = None if author.get('given_name'): names = author['given_name'].split() first_name = names[0] middle_names = (' '.join(names[1:])) or None author_list.append( dict( AuthorOrder = x + 1, FirstName = first_name, MiddleNames = middle_names, Surname = author.get('surname') ) ) return dict( Title = self.article.get('title'), PublicationName = self.issue.get('full_title'), Volume = self.issue.get('volume'), Issue = self.issue.get('issue'), StartPage = self.article.get('first_page'), EndPage = self.article.get('last_page'), PublicationYear = self.get_year(), PublicationDate = self.get_earliest_date(), RIS = None, DOI = self.doi, PubMedID = self.get_pubmed_id(), URL = 'http://dx.doi.org/%s' % self.doi, ISSN = None, # eight-digit number authors = author_list, # RecordType = DOI.record_types.get(self.record_type) )
[ "def", "to_dict", "(", "self", ")", ":", "if", "not", "self", ".", "record_type", "==", "'journal'", ":", "# todo: it may be worthwhile creating subclasses for each entry type (journal, conference, etc.) with a common", "# API e.g. to_json which creates output appropriately", "raise", "Exception", "(", "'This function has only been tested on journal entries at present.'", ")", "author_list", "=", "[", "]", "authors", "=", "self", ".", "article", ".", "get", "(", "'authors'", ",", "[", "]", ")", "for", "x", "in", "range", "(", "len", "(", "authors", ")", ")", ":", "author", "=", "authors", "[", "x", "]", "first_name", "=", "None", "middle_names", "=", "None", "if", "author", ".", "get", "(", "'given_name'", ")", ":", "names", "=", "author", "[", "'given_name'", "]", ".", "split", "(", ")", "first_name", "=", "names", "[", "0", "]", "middle_names", "=", "(", "' '", ".", "join", "(", "names", "[", "1", ":", "]", ")", ")", "or", "None", "author_list", ".", "append", "(", "dict", "(", "AuthorOrder", "=", "x", "+", "1", ",", "FirstName", "=", "first_name", ",", "MiddleNames", "=", "middle_names", ",", "Surname", "=", "author", ".", "get", "(", "'surname'", ")", ")", ")", "return", "dict", "(", "Title", "=", "self", ".", "article", ".", "get", "(", "'title'", ")", ",", "PublicationName", "=", "self", ".", "issue", ".", "get", "(", "'full_title'", ")", ",", "Volume", "=", "self", ".", "issue", ".", "get", "(", "'volume'", ")", ",", "Issue", "=", "self", ".", "issue", ".", "get", "(", "'issue'", ")", ",", "StartPage", "=", "self", ".", "article", ".", "get", "(", "'first_page'", ")", ",", "EndPage", "=", "self", ".", "article", ".", "get", "(", "'last_page'", ")", ",", "PublicationYear", "=", "self", ".", "get_year", "(", ")", ",", "PublicationDate", "=", "self", ".", "get_earliest_date", "(", ")", ",", "RIS", "=", "None", ",", "DOI", "=", "self", ".", "doi", ",", "PubMedID", "=", "self", ".", "get_pubmed_id", "(", ")", ",", "URL", "=", "'http://dx.doi.org/%s'", "%", "self", ".", "doi", ",", "ISSN", "=", "None", ",", "# eight-digit number", "authors", "=", "author_list", ",", "#", "RecordType", "=", "DOI", ".", "record_types", ".", "get", "(", "self", ".", "record_type", ")", ")" ]
A representation of that publication data that matches the schema we use in our databases.
[ "A", "representation", "of", "that", "publication", "data", "that", "matches", "the", "schema", "we", "use", "in", "our", "databases", "." ]
python
train
41.727273
nephics/mat4py
mat4py/loadmat.py
https://github.com/nephics/mat4py/blob/6c1a2ad903937437cc5f24f3c3f5aa2c5a77a1c1/mat4py/loadmat.py#L94-L100
def diff(iterable): """Diff elements of a sequence: s -> s0 - s1, s1 - s2, s2 - s3, ... """ a, b = tee(iterable) next(b, None) return (i - j for i, j in izip(a, b))
[ "def", "diff", "(", "iterable", ")", ":", "a", ",", "b", "=", "tee", "(", "iterable", ")", "next", "(", "b", ",", "None", ")", "return", "(", "i", "-", "j", "for", "i", ",", "j", "in", "izip", "(", "a", ",", "b", ")", ")" ]
Diff elements of a sequence: s -> s0 - s1, s1 - s2, s2 - s3, ...
[ "Diff", "elements", "of", "a", "sequence", ":", "s", "-", ">", "s0", "-", "s1", "s1", "-", "s2", "s2", "-", "s3", "..." ]
python
valid
26
Microsoft/ApplicationInsights-Python
applicationinsights/channel/contracts/Device.py
https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/channel/contracts/Device.py#L105-L114
def oem_name(self, value): """The oem_name property. Args: value (string). the property value. """ if value == self._defaults['ai.device.oemName'] and 'ai.device.oemName' in self._values: del self._values['ai.device.oemName'] else: self._values['ai.device.oemName'] = value
[ "def", "oem_name", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "_defaults", "[", "'ai.device.oemName'", "]", "and", "'ai.device.oemName'", "in", "self", ".", "_values", ":", "del", "self", ".", "_values", "[", "'ai.device.oemName'", "]", "else", ":", "self", ".", "_values", "[", "'ai.device.oemName'", "]", "=", "value" ]
The oem_name property. Args: value (string). the property value.
[ "The", "oem_name", "property", ".", "Args", ":", "value", "(", "string", ")", ".", "the", "property", "value", "." ]
python
train
34.9
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L735-L777
def _run1(self): """workhorse for do_run_1""" if self.check_update_J(): self.update_J() else: if self.check_Broyden_J(): self.update_Broyden_J() if self.check_update_eig_J(): self.update_eig_J() #1. Assuming that J starts updated: delta_vals = self.find_LM_updates(self.calc_grad()) #2. Increase damping until we get a good step: er1 = self.update_function(self.param_vals + delta_vals) good_step = (find_best_step([self.error, er1]) == 1) if not good_step: er0 = self.update_function(self.param_vals) if np.abs(er0 -self.error)/er0 > 1e-7: raise RuntimeError('Function updates are not exact.') CLOG.debug('Bad step, increasing damping') CLOG.debug('\t\t%f\t%f' % (self.error, er1)) grad = self.calc_grad() for _try in range(self._max_inner_loop): self.increase_damping() delta_vals = self.find_LM_updates(grad) er1 = self.update_function(self.param_vals + delta_vals) good_step = (find_best_step([self.error, er1]) == 1) if good_step: break else: er0 = self.update_function(self.param_vals) CLOG.warn('Stuck!') if np.abs(er0 -self.error)/er0 > 1e-7: raise RuntimeError('Function updates are not exact.') #state is updated, now params: if good_step: self._last_error = self.error self.error = er1 CLOG.debug('Good step\t%f\t%f' % (self._last_error, self.error)) self.update_param_vals(delta_vals, incremental=True) self.decrease_damping()
[ "def", "_run1", "(", "self", ")", ":", "if", "self", ".", "check_update_J", "(", ")", ":", "self", ".", "update_J", "(", ")", "else", ":", "if", "self", ".", "check_Broyden_J", "(", ")", ":", "self", ".", "update_Broyden_J", "(", ")", "if", "self", ".", "check_update_eig_J", "(", ")", ":", "self", ".", "update_eig_J", "(", ")", "#1. Assuming that J starts updated:", "delta_vals", "=", "self", ".", "find_LM_updates", "(", "self", ".", "calc_grad", "(", ")", ")", "#2. Increase damping until we get a good step:", "er1", "=", "self", ".", "update_function", "(", "self", ".", "param_vals", "+", "delta_vals", ")", "good_step", "=", "(", "find_best_step", "(", "[", "self", ".", "error", ",", "er1", "]", ")", "==", "1", ")", "if", "not", "good_step", ":", "er0", "=", "self", ".", "update_function", "(", "self", ".", "param_vals", ")", "if", "np", ".", "abs", "(", "er0", "-", "self", ".", "error", ")", "/", "er0", ">", "1e-7", ":", "raise", "RuntimeError", "(", "'Function updates are not exact.'", ")", "CLOG", ".", "debug", "(", "'Bad step, increasing damping'", ")", "CLOG", ".", "debug", "(", "'\\t\\t%f\\t%f'", "%", "(", "self", ".", "error", ",", "er1", ")", ")", "grad", "=", "self", ".", "calc_grad", "(", ")", "for", "_try", "in", "range", "(", "self", ".", "_max_inner_loop", ")", ":", "self", ".", "increase_damping", "(", ")", "delta_vals", "=", "self", ".", "find_LM_updates", "(", "grad", ")", "er1", "=", "self", ".", "update_function", "(", "self", ".", "param_vals", "+", "delta_vals", ")", "good_step", "=", "(", "find_best_step", "(", "[", "self", ".", "error", ",", "er1", "]", ")", "==", "1", ")", "if", "good_step", ":", "break", "else", ":", "er0", "=", "self", ".", "update_function", "(", "self", ".", "param_vals", ")", "CLOG", ".", "warn", "(", "'Stuck!'", ")", "if", "np", ".", "abs", "(", "er0", "-", "self", ".", "error", ")", "/", "er0", ">", "1e-7", ":", "raise", "RuntimeError", "(", "'Function updates are not exact.'", ")", "#state is updated, now params:", "if", "good_step", ":", "self", ".", "_last_error", "=", "self", ".", "error", "self", ".", "error", "=", "er1", "CLOG", ".", "debug", "(", "'Good step\\t%f\\t%f'", "%", "(", "self", ".", "_last_error", ",", "self", ".", "error", ")", ")", "self", ".", "update_param_vals", "(", "delta_vals", ",", "incremental", "=", "True", ")", "self", ".", "decrease_damping", "(", ")" ]
workhorse for do_run_1
[ "workhorse", "for", "do_run_1" ]
python
valid
41.465116
Azure/azure-storage-python
azure-storage-common/azure/storage/common/_http/httpclient.py
https://github.com/Azure/azure-storage-python/blob/52327354b192cbcf6b7905118ec6b5d57fa46275/azure-storage-common/azure/storage/common/_http/httpclient.py#L42-L66
def set_proxy(self, host, port, user, password): ''' Sets the proxy server host and port for the HTTP CONNECT Tunnelling. Note that we set the proxies directly on the request later on rather than using the session object as requests has a bug where session proxy is ignored in favor of environment proxy. So, auth will not work unless it is passed directly when making the request as this overrides both. :param str host: Address of the proxy. Ex: '192.168.0.100' :param int port: Port of the proxy. Ex: 6000 :param str user: User for proxy authorization. :param str password: Password for proxy authorization. ''' if user and password: proxy_string = '{}:{}@{}:{}'.format(user, password, host, port) else: proxy_string = '{}:{}'.format(host, port) self.proxies = {'http': 'http://{}'.format(proxy_string), 'https': 'https://{}'.format(proxy_string)}
[ "def", "set_proxy", "(", "self", ",", "host", ",", "port", ",", "user", ",", "password", ")", ":", "if", "user", "and", "password", ":", "proxy_string", "=", "'{}:{}@{}:{}'", ".", "format", "(", "user", ",", "password", ",", "host", ",", "port", ")", "else", ":", "proxy_string", "=", "'{}:{}'", ".", "format", "(", "host", ",", "port", ")", "self", ".", "proxies", "=", "{", "'http'", ":", "'http://{}'", ".", "format", "(", "proxy_string", ")", ",", "'https'", ":", "'https://{}'", ".", "format", "(", "proxy_string", ")", "}" ]
Sets the proxy server host and port for the HTTP CONNECT Tunnelling. Note that we set the proxies directly on the request later on rather than using the session object as requests has a bug where session proxy is ignored in favor of environment proxy. So, auth will not work unless it is passed directly when making the request as this overrides both. :param str host: Address of the proxy. Ex: '192.168.0.100' :param int port: Port of the proxy. Ex: 6000 :param str user: User for proxy authorization. :param str password: Password for proxy authorization.
[ "Sets", "the", "proxy", "server", "host", "and", "port", "for", "the", "HTTP", "CONNECT", "Tunnelling", "." ]
python
train
41.48
projectatomic/osbs-client
osbs/build/plugins_configuration.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/build/plugins_configuration.py#L175-L187
def adjust_for_isolated(self): """ Remove certain plugins in order to handle the "isolated build" scenario. """ if self.user_params.isolated.value: remove_plugins = [ ("prebuild_plugins", "check_and_set_rebuild"), ("prebuild_plugins", "stop_autorebuild_if_disabled") ] for when, which in remove_plugins: self.pt.remove_plugin(when, which, 'removed from isolated build request')
[ "def", "adjust_for_isolated", "(", "self", ")", ":", "if", "self", ".", "user_params", ".", "isolated", ".", "value", ":", "remove_plugins", "=", "[", "(", "\"prebuild_plugins\"", ",", "\"check_and_set_rebuild\"", ")", ",", "(", "\"prebuild_plugins\"", ",", "\"stop_autorebuild_if_disabled\"", ")", "]", "for", "when", ",", "which", "in", "remove_plugins", ":", "self", ".", "pt", ".", "remove_plugin", "(", "when", ",", "which", ",", "'removed from isolated build request'", ")" ]
Remove certain plugins in order to handle the "isolated build" scenario.
[ "Remove", "certain", "plugins", "in", "order", "to", "handle", "the", "isolated", "build", "scenario", "." ]
python
train
37.692308
django-fluent/django-fluent-comments
fluent_comments/moderation.py
https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/moderation.py#L66-L104
def moderate(self, comment, content_object, request): """ Determine whether a given comment on a given object should be allowed to show up immediately, or should be marked non-public and await approval. Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise. """ # Soft delete checks are done first, so these comments are not mistakenly "just moderated" # for expiring the `close_after` date, but correctly get marked as spam instead. # This helps staff to quickly see which comments need real moderation. if self.akismet_check: akismet_result = akismet_check(comment, content_object, request) if akismet_result: # Typically action=delete never gets here, unless the service was having problems. if akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam) and \ self.akismet_check_action in ('auto', 'soft_delete', 'delete'): comment.is_removed = True # Set extra marker # SpamStatus.Unknown or action=moderate will end up in the moderation queue return True # Parent class check if super(FluentCommentsModerator, self).moderate(comment, content_object, request): return True # Bad words check if self.moderate_bad_words: input_words = split_words(comment.comment) if self.moderate_bad_words.intersection(input_words): return True # Akismet check if self.akismet_check and self.akismet_check_action not in ('soft_delete', 'delete'): # Return True if akismet marks this comment as spam and we want to moderate it. if akismet_check(comment, content_object, request): return True return False
[ "def", "moderate", "(", "self", ",", "comment", ",", "content_object", ",", "request", ")", ":", "# Soft delete checks are done first, so these comments are not mistakenly \"just moderated\"", "# for expiring the `close_after` date, but correctly get marked as spam instead.", "# This helps staff to quickly see which comments need real moderation.", "if", "self", ".", "akismet_check", ":", "akismet_result", "=", "akismet_check", "(", "comment", ",", "content_object", ",", "request", ")", "if", "akismet_result", ":", "# Typically action=delete never gets here, unless the service was having problems.", "if", "akismet_result", "in", "(", "SpamStatus", ".", "ProbableSpam", ",", "SpamStatus", ".", "DefiniteSpam", ")", "and", "self", ".", "akismet_check_action", "in", "(", "'auto'", ",", "'soft_delete'", ",", "'delete'", ")", ":", "comment", ".", "is_removed", "=", "True", "# Set extra marker", "# SpamStatus.Unknown or action=moderate will end up in the moderation queue", "return", "True", "# Parent class check", "if", "super", "(", "FluentCommentsModerator", ",", "self", ")", ".", "moderate", "(", "comment", ",", "content_object", ",", "request", ")", ":", "return", "True", "# Bad words check", "if", "self", ".", "moderate_bad_words", ":", "input_words", "=", "split_words", "(", "comment", ".", "comment", ")", "if", "self", ".", "moderate_bad_words", ".", "intersection", "(", "input_words", ")", ":", "return", "True", "# Akismet check", "if", "self", ".", "akismet_check", "and", "self", ".", "akismet_check_action", "not", "in", "(", "'soft_delete'", ",", "'delete'", ")", ":", "# Return True if akismet marks this comment as spam and we want to moderate it.", "if", "akismet_check", "(", "comment", ",", "content_object", ",", "request", ")", ":", "return", "True", "return", "False" ]
Determine whether a given comment on a given object should be allowed to show up immediately, or should be marked non-public and await approval. Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise.
[ "Determine", "whether", "a", "given", "comment", "on", "a", "given", "object", "should", "be", "allowed", "to", "show", "up", "immediately", "or", "should", "be", "marked", "non", "-", "public", "and", "await", "approval", "." ]
python
train
47.666667
gwastro/pycbc-glue
pycbc_glue/ligolw/dbtables.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/dbtables.py#L164-L183
def uninstall_signal_trap(signums = None): """ Undo the effects of install_signal_trap(). Restores the original signal handlers. If signums is a sequence of signal numbers only the signal handlers for those signals will be restored (KeyError will be raised if one of them is not one that install_signal_trap() installed a handler for, in which case some undefined number of handlers will have been restored). If signums is None (the default) then all signals that have been modified by previous calls to install_signal_trap() are restored. Note: this function is called by put_connection_filename() and discard_connection_filename() whenever they remove a scratch file and there are then no more scrach files in use. """ # NOTE: this must be called with the temporary_files_lock held. if signums is None: signums = origactions.keys() for signum in signums: signal.signal(signum, origactions.pop(signum))
[ "def", "uninstall_signal_trap", "(", "signums", "=", "None", ")", ":", "# NOTE: this must be called with the temporary_files_lock held.", "if", "signums", "is", "None", ":", "signums", "=", "origactions", ".", "keys", "(", ")", "for", "signum", "in", "signums", ":", "signal", ".", "signal", "(", "signum", ",", "origactions", ".", "pop", "(", "signum", ")", ")" ]
Undo the effects of install_signal_trap(). Restores the original signal handlers. If signums is a sequence of signal numbers only the signal handlers for those signals will be restored (KeyError will be raised if one of them is not one that install_signal_trap() installed a handler for, in which case some undefined number of handlers will have been restored). If signums is None (the default) then all signals that have been modified by previous calls to install_signal_trap() are restored. Note: this function is called by put_connection_filename() and discard_connection_filename() whenever they remove a scratch file and there are then no more scrach files in use.
[ "Undo", "the", "effects", "of", "install_signal_trap", "()", ".", "Restores", "the", "original", "signal", "handlers", ".", "If", "signums", "is", "a", "sequence", "of", "signal", "numbers", "only", "the", "signal", "handlers", "for", "those", "signals", "will", "be", "restored", "(", "KeyError", "will", "be", "raised", "if", "one", "of", "them", "is", "not", "one", "that", "install_signal_trap", "()", "installed", "a", "handler", "for", "in", "which", "case", "some", "undefined", "number", "of", "handlers", "will", "have", "been", "restored", ")", ".", "If", "signums", "is", "None", "(", "the", "default", ")", "then", "all", "signals", "that", "have", "been", "modified", "by", "previous", "calls", "to", "install_signal_trap", "()", "are", "restored", "." ]
python
train
45.5
cakebread/yolk
yolk/cli.py
https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/cli.py#L236-L279
def show_updates(self): """ Check installed packages for available updates on PyPI @param project_name: optional package name to check; checks every installed pacakge if none specified @type project_name: string @returns: None """ dists = Distributions() if self.project_name: #Check for a single package pkg_list = [self.project_name] else: #Check for every installed package pkg_list = get_pkglist() found = None for pkg in pkg_list: for (dist, active) in dists.get_distributions("all", pkg, dists.get_highest_installed(pkg)): (project_name, versions) = \ self.pypi.query_versions_pypi(dist.project_name) if versions: #PyPI returns them in chronological order, #but who knows if its guaranteed in the API? #Make sure we grab the highest version: newest = get_highest_version(versions) if newest != dist.version: #We may have newer than what PyPI knows about if pkg_resources.parse_version(dist.version) < \ pkg_resources.parse_version(newest): found = True print(" %s %s (%s)" % (project_name, dist.version, newest)) if not found and self.project_name: self.logger.info("You have the latest version installed.") elif not found: self.logger.info("No newer packages found at The Cheese Shop") return 0
[ "def", "show_updates", "(", "self", ")", ":", "dists", "=", "Distributions", "(", ")", "if", "self", ".", "project_name", ":", "#Check for a single package", "pkg_list", "=", "[", "self", ".", "project_name", "]", "else", ":", "#Check for every installed package", "pkg_list", "=", "get_pkglist", "(", ")", "found", "=", "None", "for", "pkg", "in", "pkg_list", ":", "for", "(", "dist", ",", "active", ")", "in", "dists", ".", "get_distributions", "(", "\"all\"", ",", "pkg", ",", "dists", ".", "get_highest_installed", "(", "pkg", ")", ")", ":", "(", "project_name", ",", "versions", ")", "=", "self", ".", "pypi", ".", "query_versions_pypi", "(", "dist", ".", "project_name", ")", "if", "versions", ":", "#PyPI returns them in chronological order,", "#but who knows if its guaranteed in the API?", "#Make sure we grab the highest version:", "newest", "=", "get_highest_version", "(", "versions", ")", "if", "newest", "!=", "dist", ".", "version", ":", "#We may have newer than what PyPI knows about", "if", "pkg_resources", ".", "parse_version", "(", "dist", ".", "version", ")", "<", "pkg_resources", ".", "parse_version", "(", "newest", ")", ":", "found", "=", "True", "print", "(", "\" %s %s (%s)\"", "%", "(", "project_name", ",", "dist", ".", "version", ",", "newest", ")", ")", "if", "not", "found", "and", "self", ".", "project_name", ":", "self", ".", "logger", ".", "info", "(", "\"You have the latest version installed.\"", ")", "elif", "not", "found", ":", "self", ".", "logger", ".", "info", "(", "\"No newer packages found at The Cheese Shop\"", ")", "return", "0" ]
Check installed packages for available updates on PyPI @param project_name: optional package name to check; checks every installed pacakge if none specified @type project_name: string @returns: None
[ "Check", "installed", "packages", "for", "available", "updates", "on", "PyPI" ]
python
train
39.386364
guillermo-carrasco/bcbio-nextgen-monitor
bcbio_monitor/analysis/__init__.py
https://github.com/guillermo-carrasco/bcbio-nextgen-monitor/blob/6d059154d774140e1fd03a0e3625f607cef06f5a/bcbio_monitor/analysis/__init__.py#L155-L163
def update_frontend(self, info): """Updates frontend with info from the log :param info: dict - Information from a line in the log. i.e regular line, new step. """ headers = {'Content-Type': 'text/event-stream'} if info.get('when'): info['when'] = info['when'].isoformat() requests.post(self.base_url + '/publish', data=json.dumps(info), headers=headers)
[ "def", "update_frontend", "(", "self", ",", "info", ")", ":", "headers", "=", "{", "'Content-Type'", ":", "'text/event-stream'", "}", "if", "info", ".", "get", "(", "'when'", ")", ":", "info", "[", "'when'", "]", "=", "info", "[", "'when'", "]", ".", "isoformat", "(", ")", "requests", ".", "post", "(", "self", ".", "base_url", "+", "'/publish'", ",", "data", "=", "json", ".", "dumps", "(", "info", ")", ",", "headers", "=", "headers", ")" ]
Updates frontend with info from the log :param info: dict - Information from a line in the log. i.e regular line, new step.
[ "Updates", "frontend", "with", "info", "from", "the", "log" ]
python
train
45.222222
apache/incubator-superset
superset/models/core.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/core.py#L933-L938
def all_table_names_in_database(self, cache=False, cache_timeout=None, force=False): """Parameters need to be passed as keyword arguments.""" if not self.allow_multi_schema_metadata_fetch: return [] return self.db_engine_spec.fetch_result_sets(self, 'table')
[ "def", "all_table_names_in_database", "(", "self", ",", "cache", "=", "False", ",", "cache_timeout", "=", "None", ",", "force", "=", "False", ")", ":", "if", "not", "self", ".", "allow_multi_schema_metadata_fetch", ":", "return", "[", "]", "return", "self", ".", "db_engine_spec", ".", "fetch_result_sets", "(", "self", ",", "'table'", ")" ]
Parameters need to be passed as keyword arguments.
[ "Parameters", "need", "to", "be", "passed", "as", "keyword", "arguments", "." ]
python
train
54.166667
twilio/twilio-python
twilio/rest/preview/bulk_exports/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/bulk_exports/__init__.py#L38-L44
def export_configuration(self): """ :rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationList """ if self._export_configuration is None: self._export_configuration = ExportConfigurationList(self) return self._export_configuration
[ "def", "export_configuration", "(", "self", ")", ":", "if", "self", ".", "_export_configuration", "is", "None", ":", "self", ".", "_export_configuration", "=", "ExportConfigurationList", "(", "self", ")", "return", "self", ".", "_export_configuration" ]
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationList
[ ":", "rtype", ":", "twilio", ".", "rest", ".", "preview", ".", "bulk_exports", ".", "export_configuration", ".", "ExportConfigurationList" ]
python
train
43.285714
OCA/odoorpc
odoorpc/fields.py
https://github.com/OCA/odoorpc/blob/d90aa0b2bc4fafbab8bd8f50d50e3fb0b9ba91f0/odoorpc/fields.py#L612-L625
def _check_relation(self, relation): """Raise a `ValueError` if `relation` is not allowed among the possible values. """ selection = [val[0] for val in self.selection] if relation not in selection: raise ValueError( ("The value '{value}' supplied doesn't match with the possible" " values '{selection}' for the '{field_name}' field").format( value=relation, selection=selection, field_name=self.name, )) return relation
[ "def", "_check_relation", "(", "self", ",", "relation", ")", ":", "selection", "=", "[", "val", "[", "0", "]", "for", "val", "in", "self", ".", "selection", "]", "if", "relation", "not", "in", "selection", ":", "raise", "ValueError", "(", "(", "\"The value '{value}' supplied doesn't match with the possible\"", "\" values '{selection}' for the '{field_name}' field\"", ")", ".", "format", "(", "value", "=", "relation", ",", "selection", "=", "selection", ",", "field_name", "=", "self", ".", "name", ",", ")", ")", "return", "relation" ]
Raise a `ValueError` if `relation` is not allowed among the possible values.
[ "Raise", "a", "ValueError", "if", "relation", "is", "not", "allowed", "among", "the", "possible", "values", "." ]
python
train
41.357143
welbornprod/colr
colr/controls.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/controls.py#L244-L271
def print_inplace(*args, **kwargs): """ Save cursor position, write some text, and then restore the position. Arguments: Same as `print()`. Keyword Arguments: Same as `print()`, except `end` defaults to '' (empty str), and these: delay : Time in seconds between character writes. """ kwargs.setdefault('file', sys.stdout) kwargs.setdefault('end', '') pos_save(file=kwargs['file']) delay = None with suppress(KeyError): delay = kwargs.pop('delay') if delay is None: print(*args, **kwargs) else: for c in kwargs.get('sep', ' ').join(str(a) for a in args): kwargs['file'].write(c) kwargs['file'].flush() sleep(delay) if kwargs['end']: kwargs['file'].write(kwargs['end']) pos_restore(file=kwargs['file']) # Must flush to see changes. kwargs['file'].flush()
[ "def", "print_inplace", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'file'", ",", "sys", ".", "stdout", ")", "kwargs", ".", "setdefault", "(", "'end'", ",", "''", ")", "pos_save", "(", "file", "=", "kwargs", "[", "'file'", "]", ")", "delay", "=", "None", "with", "suppress", "(", "KeyError", ")", ":", "delay", "=", "kwargs", ".", "pop", "(", "'delay'", ")", "if", "delay", "is", "None", ":", "print", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "for", "c", "in", "kwargs", ".", "get", "(", "'sep'", ",", "' '", ")", ".", "join", "(", "str", "(", "a", ")", "for", "a", "in", "args", ")", ":", "kwargs", "[", "'file'", "]", ".", "write", "(", "c", ")", "kwargs", "[", "'file'", "]", ".", "flush", "(", ")", "sleep", "(", "delay", ")", "if", "kwargs", "[", "'end'", "]", ":", "kwargs", "[", "'file'", "]", ".", "write", "(", "kwargs", "[", "'end'", "]", ")", "pos_restore", "(", "file", "=", "kwargs", "[", "'file'", "]", ")", "# Must flush to see changes.", "kwargs", "[", "'file'", "]", ".", "flush", "(", ")" ]
Save cursor position, write some text, and then restore the position. Arguments: Same as `print()`. Keyword Arguments: Same as `print()`, except `end` defaults to '' (empty str), and these: delay : Time in seconds between character writes.
[ "Save", "cursor", "position", "write", "some", "text", "and", "then", "restore", "the", "position", ".", "Arguments", ":", "Same", "as", "print", "()", "." ]
python
train
32.928571
blackecho/Deep-Learning-TensorFlow
yadlt/models/recurrent/lstm.py
https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/models/recurrent/lstm.py#L105-L113
def build_model(self): """Build the model's computational graph.""" with tf.variable_scope( "model", reuse=None, initializer=self.initializer): self._create_placeholders() self._create_rnn_cells() self._create_initstate_and_embeddings() self._create_rnn_architecture() self._create_optimizer_node()
[ "def", "build_model", "(", "self", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"model\"", ",", "reuse", "=", "None", ",", "initializer", "=", "self", ".", "initializer", ")", ":", "self", ".", "_create_placeholders", "(", ")", "self", ".", "_create_rnn_cells", "(", ")", "self", ".", "_create_initstate_and_embeddings", "(", ")", "self", ".", "_create_rnn_architecture", "(", ")", "self", ".", "_create_optimizer_node", "(", ")" ]
Build the model's computational graph.
[ "Build", "the", "model", "s", "computational", "graph", "." ]
python
train
42.444444
mickybart/python-atlasbroker
atlasbroker/service.py
https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/service.py#L95-L114
def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails): """Unbinding the instance see openbrokerapi documentation Raises: ErrBindingDoesNotExist: Binding does not exist. """ # Find the instance instance = self._backend.find(instance_id) # Find the binding binding = self._backend.find(binding_id, instance) if not binding.isProvisioned(): # The binding does not exist raise ErrBindingDoesNotExist() # Delete the binding self._backend.unbind(binding)
[ "def", "unbind", "(", "self", ",", "instance_id", ":", "str", ",", "binding_id", ":", "str", ",", "details", ":", "UnbindDetails", ")", ":", "# Find the instance", "instance", "=", "self", ".", "_backend", ".", "find", "(", "instance_id", ")", "# Find the binding", "binding", "=", "self", ".", "_backend", ".", "find", "(", "binding_id", ",", "instance", ")", "if", "not", "binding", ".", "isProvisioned", "(", ")", ":", "# The binding does not exist", "raise", "ErrBindingDoesNotExist", "(", ")", "# Delete the binding", "self", ".", "_backend", ".", "unbind", "(", "binding", ")" ]
Unbinding the instance see openbrokerapi documentation Raises: ErrBindingDoesNotExist: Binding does not exist.
[ "Unbinding", "the", "instance", "see", "openbrokerapi", "documentation", "Raises", ":", "ErrBindingDoesNotExist", ":", "Binding", "does", "not", "exist", "." ]
python
train
31
radujica/baloo
baloo/io/csv.py
https://github.com/radujica/baloo/blob/f6e05e35b73a75e8a300754c6bdc575e5f2d53b9/baloo/io/csv.py#L56-L86
def to_csv(df, filepath, sep=',', header=True, index=True): """Save DataFrame as csv. Note data is expected to be evaluated. Currently delegates to Pandas. Parameters ---------- df : DataFrame filepath : str sep : str, optional Separator used between values. header : bool, optional Whether to save the header. index : bool, optional Whether to save the index columns. Returns ------- None See Also -------- pandas.DataFrame.to_csv : https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html """ df.to_pandas().to_csv(filepath, sep=sep, header=header, index=index)
[ "def", "to_csv", "(", "df", ",", "filepath", ",", "sep", "=", "','", ",", "header", "=", "True", ",", "index", "=", "True", ")", ":", "df", ".", "to_pandas", "(", ")", ".", "to_csv", "(", "filepath", ",", "sep", "=", "sep", ",", "header", "=", "header", ",", "index", "=", "index", ")" ]
Save DataFrame as csv. Note data is expected to be evaluated. Currently delegates to Pandas. Parameters ---------- df : DataFrame filepath : str sep : str, optional Separator used between values. header : bool, optional Whether to save the header. index : bool, optional Whether to save the index columns. Returns ------- None See Also -------- pandas.DataFrame.to_csv : https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html
[ "Save", "DataFrame", "as", "csv", "." ]
python
train
23.870968
hardbyte/python-can
can/interfaces/usb2can/serial_selector.py
https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/usb2can/serial_selector.py#L32-L45
def find_serial_devices(serial_matcher="ED"): """ Finds a list of USB devices where the serial number (partially) matches the given string. :param str serial_matcher (optional): only device IDs starting with this string are returned :rtype: List[str] """ objWMIService = win32com.client.Dispatch("WbemScripting.SWbemLocator") objSWbemServices = objWMIService.ConnectServer(".", "root\cimv2") items = objSWbemServices.ExecQuery("SELECT * FROM Win32_USBControllerDevice") ids = (item.Dependent.strip('"')[-8:] for item in items) return [e for e in ids if e.startswith(serial_matcher)]
[ "def", "find_serial_devices", "(", "serial_matcher", "=", "\"ED\"", ")", ":", "objWMIService", "=", "win32com", ".", "client", ".", "Dispatch", "(", "\"WbemScripting.SWbemLocator\"", ")", "objSWbemServices", "=", "objWMIService", ".", "ConnectServer", "(", "\".\"", ",", "\"root\\cimv2\"", ")", "items", "=", "objSWbemServices", ".", "ExecQuery", "(", "\"SELECT * FROM Win32_USBControllerDevice\"", ")", "ids", "=", "(", "item", ".", "Dependent", ".", "strip", "(", "'\"'", ")", "[", "-", "8", ":", "]", "for", "item", "in", "items", ")", "return", "[", "e", "for", "e", "in", "ids", "if", "e", ".", "startswith", "(", "serial_matcher", ")", "]" ]
Finds a list of USB devices where the serial number (partially) matches the given string. :param str serial_matcher (optional): only device IDs starting with this string are returned :rtype: List[str]
[ "Finds", "a", "list", "of", "USB", "devices", "where", "the", "serial", "number", "(", "partially", ")", "matches", "the", "given", "string", "." ]
python
train
44.214286
tdryer/hangups
hangups/conversation_event.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/conversation_event.py#L123-L141
def serialize(self): """Serialize this segment to a ``Segment`` message. Returns: ``Segment`` message. """ segment = hangouts_pb2.Segment( type=self.type_, text=self.text, formatting=hangouts_pb2.Formatting( bold=self.is_bold, italic=self.is_italic, strikethrough=self.is_strikethrough, underline=self.is_underline, ), ) if self.link_target is not None: segment.link_data.link_target = self.link_target return segment
[ "def", "serialize", "(", "self", ")", ":", "segment", "=", "hangouts_pb2", ".", "Segment", "(", "type", "=", "self", ".", "type_", ",", "text", "=", "self", ".", "text", ",", "formatting", "=", "hangouts_pb2", ".", "Formatting", "(", "bold", "=", "self", ".", "is_bold", ",", "italic", "=", "self", ".", "is_italic", ",", "strikethrough", "=", "self", ".", "is_strikethrough", ",", "underline", "=", "self", ".", "is_underline", ",", ")", ",", ")", "if", "self", ".", "link_target", "is", "not", "None", ":", "segment", ".", "link_data", ".", "link_target", "=", "self", ".", "link_target", "return", "segment" ]
Serialize this segment to a ``Segment`` message. Returns: ``Segment`` message.
[ "Serialize", "this", "segment", "to", "a", "Segment", "message", "." ]
python
valid
31.157895
Dentosal/python-sc2
sc2/units.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/units.py#L117-L124
def closest_distance_to(self, position: Union[Unit, Point2, Point3]) -> Union[int, float]: """ Returns the distance between the closest unit from this group to the target unit """ assert self.exists if isinstance(position, Unit): position = position.position return position.distance_to_closest( [u.position for u in self] )
[ "def", "closest_distance_to", "(", "self", ",", "position", ":", "Union", "[", "Unit", ",", "Point2", ",", "Point3", "]", ")", "->", "Union", "[", "int", ",", "float", "]", ":", "assert", "self", ".", "exists", "if", "isinstance", "(", "position", ",", "Unit", ")", ":", "position", "=", "position", ".", "position", "return", "position", ".", "distance_to_closest", "(", "[", "u", ".", "position", "for", "u", "in", "self", "]", ")" ]
Returns the distance between the closest unit from this group to the target unit
[ "Returns", "the", "distance", "between", "the", "closest", "unit", "from", "this", "group", "to", "the", "target", "unit" ]
python
train
47.625
pydata/xarray
xarray/core/groupby.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/groupby.py#L448-L451
def first(self, skipna=None, keep_attrs=None): """Return the first element of each group along the group dimension """ return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)
[ "def", "first", "(", "self", ",", "skipna", "=", "None", ",", "keep_attrs", "=", "None", ")", ":", "return", "self", ".", "_first_or_last", "(", "duck_array_ops", ".", "first", ",", "skipna", ",", "keep_attrs", ")" ]
Return the first element of each group along the group dimension
[ "Return", "the", "first", "element", "of", "each", "group", "along", "the", "group", "dimension" ]
python
train
52
Yelp/kafka-utils
kafka_utils/util/validation.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/validation.py#L36-L47
def assignment_to_plan(assignment): """Convert an assignment to the format used by Kafka to describe a reassignment plan. """ return { 'version': 1, 'partitions': [{'topic': t_p[0], 'partition': t_p[1], 'replicas': replica } for t_p, replica in six.iteritems(assignment)] }
[ "def", "assignment_to_plan", "(", "assignment", ")", ":", "return", "{", "'version'", ":", "1", ",", "'partitions'", ":", "[", "{", "'topic'", ":", "t_p", "[", "0", "]", ",", "'partition'", ":", "t_p", "[", "1", "]", ",", "'replicas'", ":", "replica", "}", "for", "t_p", ",", "replica", "in", "six", ".", "iteritems", "(", "assignment", ")", "]", "}" ]
Convert an assignment to the format used by Kafka to describe a reassignment plan.
[ "Convert", "an", "assignment", "to", "the", "format", "used", "by", "Kafka", "to", "describe", "a", "reassignment", "plan", "." ]
python
train
28
chaoss/grimoirelab-elk
grimoire_elk/raw/elastic.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/raw/elastic.py#L147-L153
def add_update_date(self, item): """ All item['updated_on'] from perceval is epoch """ updated = unixtime_to_datetime(item['updated_on']) timestamp = unixtime_to_datetime(item['timestamp']) item['metadata__updated_on'] = updated.isoformat() # Also add timestamp used in incremental enrichment item['metadata__timestamp'] = timestamp.isoformat()
[ "def", "add_update_date", "(", "self", ",", "item", ")", ":", "updated", "=", "unixtime_to_datetime", "(", "item", "[", "'updated_on'", "]", ")", "timestamp", "=", "unixtime_to_datetime", "(", "item", "[", "'timestamp'", "]", ")", "item", "[", "'metadata__updated_on'", "]", "=", "updated", ".", "isoformat", "(", ")", "# Also add timestamp used in incremental enrichment", "item", "[", "'metadata__timestamp'", "]", "=", "timestamp", ".", "isoformat", "(", ")" ]
All item['updated_on'] from perceval is epoch
[ "All", "item", "[", "updated_on", "]", "from", "perceval", "is", "epoch" ]
python
train
55.142857
DLR-RM/RAFCON
source/rafcon/gui/utils/shell_execution.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/utils/shell_execution.py#L8-L29
def execute_command_with_path_in_process(command, path, shell=False, cwd=None, logger=None): """Executes a specific command in a separate process with a path as argument. :param command: the command to be executed :param path: the path as first argument to the shell command :param bool shell: Whether to use a shell :param str cwd: The working directory of the command :param logger: optional logger instance which can be handed from other module :return: None """ if logger is None: logger = _logger logger.debug("Opening path with command: {0} {1}".format(command, path)) # This splits the command in a matter so that the command gets called in a separate shell and thus # does not lock the window. args = shlex.split('{0} "{1}"'.format(command, path)) try: subprocess.Popen(args, shell=shell, cwd=cwd) return True except OSError as e: logger.error('The operating system raised an error: {}'.format(e)) return False
[ "def", "execute_command_with_path_in_process", "(", "command", ",", "path", ",", "shell", "=", "False", ",", "cwd", "=", "None", ",", "logger", "=", "None", ")", ":", "if", "logger", "is", "None", ":", "logger", "=", "_logger", "logger", ".", "debug", "(", "\"Opening path with command: {0} {1}\"", ".", "format", "(", "command", ",", "path", ")", ")", "# This splits the command in a matter so that the command gets called in a separate shell and thus", "# does not lock the window.", "args", "=", "shlex", ".", "split", "(", "'{0} \"{1}\"'", ".", "format", "(", "command", ",", "path", ")", ")", "try", ":", "subprocess", ".", "Popen", "(", "args", ",", "shell", "=", "shell", ",", "cwd", "=", "cwd", ")", "return", "True", "except", "OSError", "as", "e", ":", "logger", ".", "error", "(", "'The operating system raised an error: {}'", ".", "format", "(", "e", ")", ")", "return", "False" ]
Executes a specific command in a separate process with a path as argument. :param command: the command to be executed :param path: the path as first argument to the shell command :param bool shell: Whether to use a shell :param str cwd: The working directory of the command :param logger: optional logger instance which can be handed from other module :return: None
[ "Executes", "a", "specific", "command", "in", "a", "separate", "process", "with", "a", "path", "as", "argument", "." ]
python
train
45.181818
geronimp/graftM
graftm/orfm.py
https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/orfm.py#L11-L33
def command_line(self, input_path=None): '''Return a string to run OrfM with, assuming sequences are incoming on stdin and printed to stdout Parameters ---------- input_path: str path to the input path, or None for STDIN being the input ''' if self.min_orf_length: orfm_arg_l = " -m %d" % self.min_orf_length else: orfm_arg_l = '' if self.restrict_read_length: orfm_arg_l = " -l %d" % self.restrict_read_length cmd = 'orfm %s ' % orfm_arg_l if input_path: cmd += input_path logging.debug("OrfM command chunk: %s" % cmd) return cmd
[ "def", "command_line", "(", "self", ",", "input_path", "=", "None", ")", ":", "if", "self", ".", "min_orf_length", ":", "orfm_arg_l", "=", "\" -m %d\"", "%", "self", ".", "min_orf_length", "else", ":", "orfm_arg_l", "=", "''", "if", "self", ".", "restrict_read_length", ":", "orfm_arg_l", "=", "\" -l %d\"", "%", "self", ".", "restrict_read_length", "cmd", "=", "'orfm %s '", "%", "orfm_arg_l", "if", "input_path", ":", "cmd", "+=", "input_path", "logging", ".", "debug", "(", "\"OrfM command chunk: %s\"", "%", "cmd", ")", "return", "cmd" ]
Return a string to run OrfM with, assuming sequences are incoming on stdin and printed to stdout Parameters ---------- input_path: str path to the input path, or None for STDIN being the input
[ "Return", "a", "string", "to", "run", "OrfM", "with", "assuming", "sequences", "are", "incoming", "on", "stdin", "and", "printed", "to", "stdout", "Parameters", "----------", "input_path", ":", "str", "path", "to", "the", "input", "path", "or", "None", "for", "STDIN", "being", "the", "input" ]
python
train
30.956522
angr/angr
angr/analyses/cfg/cfb.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfb.py#L181-L212
def dbg_repr(self): """ The debugging representation of this CFBlanket. :return: The debugging representation of this CFBlanket. :rtype: str """ output = [ ] for obj in self.project.loader.all_objects: for section in obj.sections: if section.memsize == 0: continue min_addr, max_addr = section.min_addr, section.max_addr output.append("### Object %s" % repr(section)) output.append("### Range %#x-%#x" % (min_addr, max_addr)) pos = min_addr while pos < max_addr: try: addr, thing = self.floor_item(pos) output.append("%#x: %s" % (addr, repr(thing))) if thing.size == 0: pos += 1 else: pos += thing.size except KeyError: pos += 1 output.append("") return "\n".join(output)
[ "def", "dbg_repr", "(", "self", ")", ":", "output", "=", "[", "]", "for", "obj", "in", "self", ".", "project", ".", "loader", ".", "all_objects", ":", "for", "section", "in", "obj", ".", "sections", ":", "if", "section", ".", "memsize", "==", "0", ":", "continue", "min_addr", ",", "max_addr", "=", "section", ".", "min_addr", ",", "section", ".", "max_addr", "output", ".", "append", "(", "\"### Object %s\"", "%", "repr", "(", "section", ")", ")", "output", ".", "append", "(", "\"### Range %#x-%#x\"", "%", "(", "min_addr", ",", "max_addr", ")", ")", "pos", "=", "min_addr", "while", "pos", "<", "max_addr", ":", "try", ":", "addr", ",", "thing", "=", "self", ".", "floor_item", "(", "pos", ")", "output", ".", "append", "(", "\"%#x: %s\"", "%", "(", "addr", ",", "repr", "(", "thing", ")", ")", ")", "if", "thing", ".", "size", "==", "0", ":", "pos", "+=", "1", "else", ":", "pos", "+=", "thing", ".", "size", "except", "KeyError", ":", "pos", "+=", "1", "output", ".", "append", "(", "\"\"", ")", "return", "\"\\n\"", ".", "join", "(", "output", ")" ]
The debugging representation of this CFBlanket. :return: The debugging representation of this CFBlanket. :rtype: str
[ "The", "debugging", "representation", "of", "this", "CFBlanket", "." ]
python
train
31.9375
limpyd/redis-limpyd-jobs
limpyd_jobs/workers.py
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L751-L800
def manage_options(self): """ Create a parser given the command-line arguments, creates a parser Return True if the programme must exit. """ self.parser = self.create_parser() self.options, self.args = self.parser.parse_args(self.argv) self.do_imports() if self.options.callback and not callable(self.options.callback): self.parser.error('The callback is not callable') self.logger_level = None if self.options.logger_level: if self.options.logger_level.isdigit(): self.options.logger_level = int(self.options.logger_level) else: try: self.options.logger_level = getattr(logging, self.options.logger_level.upper()) except: self.parser.error('Invalid logger-level %s' % self.options.logger_level) if self.options.max_loops is not None and self.options.max_loops < 0: self.parser.error('The max-loops argument (%s) must be a <positive></positive> integer' % self.options.max_loops) if self.options.max_duration is not None and self.options.max_duration < 0: self.parser.error('The max-duration argument (%s) must be a positive integer' % self.options.max_duration) if self.options.timeout is not None and self.options.timeout < 0: self.parser.error('The timeout argument (%s) must be a positive integer (including 0)' % self.options.timeout) if self.options.fetch_priorities_delay is not None and self.options.fetch_priorities_delay <= 0: self.parser.error('The fetch-priorities-delay argument (%s) must be a positive integer' % self.options.fetch_priorities_delay) if self.options.fetch_delayed_delay is not None and self.options.fetch_delayed_delay <= 0: self.parser.error('The fetch-delayed-delay argument (%s) must be a positive integer' % self.options.fetch_delayed_delay) if self.options.requeue_times is not None and self.options.requeue_times < 0: self.parser.error('The requeue-times argument (%s) must be a positive integer (including 0)' % self.options.requeue_times) if self.options.requeue_delay_delta is not None and self.options.requeue_delay_delta < 0: self.parser.error('The rrequeue-delay-delta argument (%s) must be a positive integer (including 0)' % self.options.requeue_delay_delta) self.database_config = None if self.options.database: host, port, db = self.options.database.split(':') self.database_config = dict(host=host, port=int(port), db=int(db)) self.update_title = self.options.update_title
[ "def", "manage_options", "(", "self", ")", ":", "self", ".", "parser", "=", "self", ".", "create_parser", "(", ")", "self", ".", "options", ",", "self", ".", "args", "=", "self", ".", "parser", ".", "parse_args", "(", "self", ".", "argv", ")", "self", ".", "do_imports", "(", ")", "if", "self", ".", "options", ".", "callback", "and", "not", "callable", "(", "self", ".", "options", ".", "callback", ")", ":", "self", ".", "parser", ".", "error", "(", "'The callback is not callable'", ")", "self", ".", "logger_level", "=", "None", "if", "self", ".", "options", ".", "logger_level", ":", "if", "self", ".", "options", ".", "logger_level", ".", "isdigit", "(", ")", ":", "self", ".", "options", ".", "logger_level", "=", "int", "(", "self", ".", "options", ".", "logger_level", ")", "else", ":", "try", ":", "self", ".", "options", ".", "logger_level", "=", "getattr", "(", "logging", ",", "self", ".", "options", ".", "logger_level", ".", "upper", "(", ")", ")", "except", ":", "self", ".", "parser", ".", "error", "(", "'Invalid logger-level %s'", "%", "self", ".", "options", ".", "logger_level", ")", "if", "self", ".", "options", ".", "max_loops", "is", "not", "None", "and", "self", ".", "options", ".", "max_loops", "<", "0", ":", "self", ".", "parser", ".", "error", "(", "'The max-loops argument (%s) must be a <positive></positive> integer'", "%", "self", ".", "options", ".", "max_loops", ")", "if", "self", ".", "options", ".", "max_duration", "is", "not", "None", "and", "self", ".", "options", ".", "max_duration", "<", "0", ":", "self", ".", "parser", ".", "error", "(", "'The max-duration argument (%s) must be a positive integer'", "%", "self", ".", "options", ".", "max_duration", ")", "if", "self", ".", "options", ".", "timeout", "is", "not", "None", "and", "self", ".", "options", ".", "timeout", "<", "0", ":", "self", ".", "parser", ".", "error", "(", "'The timeout argument (%s) must be a positive integer (including 0)'", "%", "self", ".", "options", ".", "timeout", ")", "if", "self", ".", "options", ".", "fetch_priorities_delay", "is", "not", "None", "and", "self", ".", "options", ".", "fetch_priorities_delay", "<=", "0", ":", "self", ".", "parser", ".", "error", "(", "'The fetch-priorities-delay argument (%s) must be a positive integer'", "%", "self", ".", "options", ".", "fetch_priorities_delay", ")", "if", "self", ".", "options", ".", "fetch_delayed_delay", "is", "not", "None", "and", "self", ".", "options", ".", "fetch_delayed_delay", "<=", "0", ":", "self", ".", "parser", ".", "error", "(", "'The fetch-delayed-delay argument (%s) must be a positive integer'", "%", "self", ".", "options", ".", "fetch_delayed_delay", ")", "if", "self", ".", "options", ".", "requeue_times", "is", "not", "None", "and", "self", ".", "options", ".", "requeue_times", "<", "0", ":", "self", ".", "parser", ".", "error", "(", "'The requeue-times argument (%s) must be a positive integer (including 0)'", "%", "self", ".", "options", ".", "requeue_times", ")", "if", "self", ".", "options", ".", "requeue_delay_delta", "is", "not", "None", "and", "self", ".", "options", ".", "requeue_delay_delta", "<", "0", ":", "self", ".", "parser", ".", "error", "(", "'The rrequeue-delay-delta argument (%s) must be a positive integer (including 0)'", "%", "self", ".", "options", ".", "requeue_delay_delta", ")", "self", ".", "database_config", "=", "None", "if", "self", ".", "options", ".", "database", ":", "host", ",", "port", ",", "db", "=", "self", ".", "options", ".", "database", ".", "split", "(", "':'", ")", "self", ".", "database_config", "=", "dict", "(", "host", "=", "host", ",", "port", "=", "int", "(", "port", ")", ",", "db", "=", "int", "(", "db", ")", ")", "self", ".", "update_title", "=", "self", ".", "options", ".", "update_title" ]
Create a parser given the command-line arguments, creates a parser Return True if the programme must exit.
[ "Create", "a", "parser", "given", "the", "command", "-", "line", "arguments", "creates", "a", "parser", "Return", "True", "if", "the", "programme", "must", "exit", "." ]
python
train
53.48
NoneGG/aredis
aredis/commands/lists.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/commands/lists.py#L139-L147
async def ltrim(self, name, start, end): """ Trim the list ``name``, removing all values not within the slice between ``start`` and ``end`` ``start`` and ``end`` can be negative numbers just like Python slicing notation """ return await self.execute_command('LTRIM', name, start, end)
[ "async", "def", "ltrim", "(", "self", ",", "name", ",", "start", ",", "end", ")", ":", "return", "await", "self", ".", "execute_command", "(", "'LTRIM'", ",", "name", ",", "start", ",", "end", ")" ]
Trim the list ``name``, removing all values not within the slice between ``start`` and ``end`` ``start`` and ``end`` can be negative numbers just like Python slicing notation
[ "Trim", "the", "list", "name", "removing", "all", "values", "not", "within", "the", "slice", "between", "start", "and", "end" ]
python
train
37
jamescooke/flake8-aaa
src/flake8_aaa/helpers.py
https://github.com/jamescooke/flake8-aaa/blob/29938b96845fe32ced4358ba66af3b3be2a37794/src/flake8_aaa/helpers.py#L188-L197
def find_stringy_lines(tree: ast.AST, first_line_no: int) -> Set[int]: """ Finds all lines that contain a string in a tree, usually a function. These lines will be ignored when searching for blank lines. """ str_footprints = set() for node in ast.walk(tree): if isinstance(node, ast.Str): str_footprints.update(build_footprint(node, first_line_no)) return str_footprints
[ "def", "find_stringy_lines", "(", "tree", ":", "ast", ".", "AST", ",", "first_line_no", ":", "int", ")", "->", "Set", "[", "int", "]", ":", "str_footprints", "=", "set", "(", ")", "for", "node", "in", "ast", ".", "walk", "(", "tree", ")", ":", "if", "isinstance", "(", "node", ",", "ast", ".", "Str", ")", ":", "str_footprints", ".", "update", "(", "build_footprint", "(", "node", ",", "first_line_no", ")", ")", "return", "str_footprints" ]
Finds all lines that contain a string in a tree, usually a function. These lines will be ignored when searching for blank lines.
[ "Finds", "all", "lines", "that", "contain", "a", "string", "in", "a", "tree", "usually", "a", "function", ".", "These", "lines", "will", "be", "ignored", "when", "searching", "for", "blank", "lines", "." ]
python
train
40.9
OSSOS/MOP
src/ossos/core/ossos/util.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/util.py#L87-L98
def stream(self): """ the stream to write the log content too. @return: """ if self._stream is None: self._stream = tempfile.NamedTemporaryFile(delete=False) try: self._stream.write(self.client.open(self.filename, view='data').read()) except: pass return self._stream
[ "def", "stream", "(", "self", ")", ":", "if", "self", ".", "_stream", "is", "None", ":", "self", ".", "_stream", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "try", ":", "self", ".", "_stream", ".", "write", "(", "self", ".", "client", ".", "open", "(", "self", ".", "filename", ",", "view", "=", "'data'", ")", ".", "read", "(", ")", ")", "except", ":", "pass", "return", "self", ".", "_stream" ]
the stream to write the log content too. @return:
[ "the", "stream", "to", "write", "the", "log", "content", "too", "." ]
python
train
30.833333
Crunch-io/crunch-cube
src/cr/cube/crunch_cube.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L189-L198
def index(self, weighted=True, prune=False): """Return cube index measurement. This function is deprecated. Use index_table from CubeSlice. """ warnings.warn( "CrunchCube.index() is deprecated. Use CubeSlice.index_table().", DeprecationWarning, ) return Index.data(self, weighted, prune)
[ "def", "index", "(", "self", ",", "weighted", "=", "True", ",", "prune", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"CrunchCube.index() is deprecated. Use CubeSlice.index_table().\"", ",", "DeprecationWarning", ",", ")", "return", "Index", ".", "data", "(", "self", ",", "weighted", ",", "prune", ")" ]
Return cube index measurement. This function is deprecated. Use index_table from CubeSlice.
[ "Return", "cube", "index", "measurement", "." ]
python
train
35.1
pywavefront/PyWavefront
pywavefront/parser.py
https://github.com/pywavefront/PyWavefront/blob/39ee5186cb37750d4654d19ebe43f723ecd01e2f/pywavefront/parser.py#L104-L107
def next_line(self): """Read the next line from the line generator and split it""" self.line = next(self.lines) # Will raise StopIteration when there are no more lines self.values = self.line.split()
[ "def", "next_line", "(", "self", ")", ":", "self", ".", "line", "=", "next", "(", "self", ".", "lines", ")", "# Will raise StopIteration when there are no more lines", "self", ".", "values", "=", "self", ".", "line", ".", "split", "(", ")" ]
Read the next line from the line generator and split it
[ "Read", "the", "next", "line", "from", "the", "line", "generator", "and", "split", "it" ]
python
train
55.25
senaite/senaite.core
bika/lims/browser/publish/emailview.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/publish/emailview.py#L468-L506
def get_responsibles_data(self, reports): """Responsibles data to be used in the template """ if not reports: return [] recipients = [] recipient_names = [] for num, report in enumerate(reports): # get the linked AR of this ARReport ar = report.getAnalysisRequest() # recipient names of this report report_recipient_names = [] responsibles = ar.getResponsible() for manager_id in responsibles.get("ids", []): responsible = responsibles["dict"][manager_id] name = responsible.get("name") email = responsible.get("email") record = { "name": name, "email": email, "valid": True, } if record not in recipients: recipients.append(record) # remember the name of the recipient for this report report_recipient_names.append(name) recipient_names.append(report_recipient_names) # recipient names, which all of the reports have in common common_names = set(recipient_names[0]).intersection(*recipient_names) # mark recipients not in common for recipient in recipients: if recipient.get("name") not in common_names: recipient["valid"] = False return recipients
[ "def", "get_responsibles_data", "(", "self", ",", "reports", ")", ":", "if", "not", "reports", ":", "return", "[", "]", "recipients", "=", "[", "]", "recipient_names", "=", "[", "]", "for", "num", ",", "report", "in", "enumerate", "(", "reports", ")", ":", "# get the linked AR of this ARReport", "ar", "=", "report", ".", "getAnalysisRequest", "(", ")", "# recipient names of this report", "report_recipient_names", "=", "[", "]", "responsibles", "=", "ar", ".", "getResponsible", "(", ")", "for", "manager_id", "in", "responsibles", ".", "get", "(", "\"ids\"", ",", "[", "]", ")", ":", "responsible", "=", "responsibles", "[", "\"dict\"", "]", "[", "manager_id", "]", "name", "=", "responsible", ".", "get", "(", "\"name\"", ")", "email", "=", "responsible", ".", "get", "(", "\"email\"", ")", "record", "=", "{", "\"name\"", ":", "name", ",", "\"email\"", ":", "email", ",", "\"valid\"", ":", "True", ",", "}", "if", "record", "not", "in", "recipients", ":", "recipients", ".", "append", "(", "record", ")", "# remember the name of the recipient for this report", "report_recipient_names", ".", "append", "(", "name", ")", "recipient_names", ".", "append", "(", "report_recipient_names", ")", "# recipient names, which all of the reports have in common", "common_names", "=", "set", "(", "recipient_names", "[", "0", "]", ")", ".", "intersection", "(", "*", "recipient_names", ")", "# mark recipients not in common", "for", "recipient", "in", "recipients", ":", "if", "recipient", ".", "get", "(", "\"name\"", ")", "not", "in", "common_names", ":", "recipient", "[", "\"valid\"", "]", "=", "False", "return", "recipients" ]
Responsibles data to be used in the template
[ "Responsibles", "data", "to", "be", "used", "in", "the", "template" ]
python
train
36.820513
singnet/snet-cli
snet_cli/utils_config.py
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/utils_config.py#L48-L62
def get_field_from_args_or_session(config, args, field_name): """ We try to get field_name from diffent sources: The order of priorioty is following: - command line argument (--<field_name>) - current session configuration (default_<filed_name>) """ rez = getattr(args, field_name, None) #type(rez) can be int in case of wallet-index, so we cannot make simply if(rez) if (rez != None): return rez rez = config.get_session_field("default_%s"%field_name, exception_if_not_found=False) if (rez): return rez raise Exception("Fail to get default_%s from config, should specify %s via --%s parameter"%(field_name, field_name, field_name.replace("_","-")))
[ "def", "get_field_from_args_or_session", "(", "config", ",", "args", ",", "field_name", ")", ":", "rez", "=", "getattr", "(", "args", ",", "field_name", ",", "None", ")", "#type(rez) can be int in case of wallet-index, so we cannot make simply if(rez)", "if", "(", "rez", "!=", "None", ")", ":", "return", "rez", "rez", "=", "config", ".", "get_session_field", "(", "\"default_%s\"", "%", "field_name", ",", "exception_if_not_found", "=", "False", ")", "if", "(", "rez", ")", ":", "return", "rez", "raise", "Exception", "(", "\"Fail to get default_%s from config, should specify %s via --%s parameter\"", "%", "(", "field_name", ",", "field_name", ",", "field_name", ".", "replace", "(", "\"_\"", ",", "\"-\"", ")", ")", ")" ]
We try to get field_name from diffent sources: The order of priorioty is following: - command line argument (--<field_name>) - current session configuration (default_<filed_name>)
[ "We", "try", "to", "get", "field_name", "from", "diffent", "sources", ":", "The", "order", "of", "priorioty", "is", "following", ":", "-", "command", "line", "argument", "(", "--", "<field_name", ">", ")", "-", "current", "session", "configuration", "(", "default_<filed_name", ">", ")" ]
python
train
46.6
secynic/ipwhois
ipwhois/utils.py
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/utils.py#L609-L631
def ipv6_generate_random(total=100): """ The generator to produce random, unique IPv6 addresses that are not defined (can be looked up using ipwhois). Args: total (:obj:`int`): The total number of IPv6 addresses to generate. Yields: str: The next IPv6 address. """ count = 0 yielded = set() while count < total: address = str(IPv6Address(random.randint(0, 2**128-1))) if not ipv6_is_defined(address)[0] and address not in yielded: count += 1 yielded.add(address) yield address
[ "def", "ipv6_generate_random", "(", "total", "=", "100", ")", ":", "count", "=", "0", "yielded", "=", "set", "(", ")", "while", "count", "<", "total", ":", "address", "=", "str", "(", "IPv6Address", "(", "random", ".", "randint", "(", "0", ",", "2", "**", "128", "-", "1", ")", ")", ")", "if", "not", "ipv6_is_defined", "(", "address", ")", "[", "0", "]", "and", "address", "not", "in", "yielded", ":", "count", "+=", "1", "yielded", ".", "add", "(", "address", ")", "yield", "address" ]
The generator to produce random, unique IPv6 addresses that are not defined (can be looked up using ipwhois). Args: total (:obj:`int`): The total number of IPv6 addresses to generate. Yields: str: The next IPv6 address.
[ "The", "generator", "to", "produce", "random", "unique", "IPv6", "addresses", "that", "are", "not", "defined", "(", "can", "be", "looked", "up", "using", "ipwhois", ")", "." ]
python
train
24.521739
mitsei/dlkit
dlkit/json_/commenting/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/managers.py#L405-L420
def get_comment_book_assignment_session(self): """Gets the session for assigning comment to book mappings. return: (osid.commenting.CommentBookAssignmentSession) - a ``CommentBookAssignmentSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_book_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_book_assignment()`` is ``true``.* """ if not self.supports_comment_book_assignment(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CommentBookAssignmentSession(runtime=self._runtime)
[ "def", "get_comment_book_assignment_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_comment_book_assignment", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "CommentBookAssignmentSession", "(", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the session for assigning comment to book mappings. return: (osid.commenting.CommentBookAssignmentSession) - a ``CommentBookAssignmentSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_book_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_book_assignment()`` is ``true``.*
[ "Gets", "the", "session", "for", "assigning", "comment", "to", "book", "mappings", "." ]
python
train
45.5
brechtm/rinohtype
src/rinoh/text.py
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/text.py#L329-L340
def font(self, container): """The :class:`Font` described by this single-styled text's style. If the exact font style as described by the `font_weight`, `font_slant` and `font_width` style attributes is not present in the `typeface`, the closest font available is returned instead, and a warning is printed.""" typeface = self.get_style('typeface', container) weight = self.get_style('font_weight', container) slant = self.get_style('font_slant', container) width = self.get_style('font_width', container) return typeface.get_font(weight=weight, slant=slant, width=width)
[ "def", "font", "(", "self", ",", "container", ")", ":", "typeface", "=", "self", ".", "get_style", "(", "'typeface'", ",", "container", ")", "weight", "=", "self", ".", "get_style", "(", "'font_weight'", ",", "container", ")", "slant", "=", "self", ".", "get_style", "(", "'font_slant'", ",", "container", ")", "width", "=", "self", ".", "get_style", "(", "'font_width'", ",", "container", ")", "return", "typeface", ".", "get_font", "(", "weight", "=", "weight", ",", "slant", "=", "slant", ",", "width", "=", "width", ")" ]
The :class:`Font` described by this single-styled text's style. If the exact font style as described by the `font_weight`, `font_slant` and `font_width` style attributes is not present in the `typeface`, the closest font available is returned instead, and a warning is printed.
[ "The", ":", "class", ":", "Font", "described", "by", "this", "single", "-", "styled", "text", "s", "style", "." ]
python
train
53.416667
cgrok/cr-async
crasync/models.py
https://github.com/cgrok/cr-async/blob/f65a968e54704168706d137d1ba662f55f8ab852/crasync/models.py#L344-L351
def clan_badge_url(self): '''Returns clan badge url''' if self.clan_tag is None: return None url = self.raw_data.get('clan').get('badge').get('url') if not url: return None return "http://api.cr-api.com" + url
[ "def", "clan_badge_url", "(", "self", ")", ":", "if", "self", ".", "clan_tag", "is", "None", ":", "return", "None", "url", "=", "self", ".", "raw_data", ".", "get", "(", "'clan'", ")", ".", "get", "(", "'badge'", ")", ".", "get", "(", "'url'", ")", "if", "not", "url", ":", "return", "None", "return", "\"http://api.cr-api.com\"", "+", "url" ]
Returns clan badge url
[ "Returns", "clan", "badge", "url" ]
python
train
33.25
DataONEorg/d1_python
lib_common/src/d1_common/type_conversions.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/type_conversions.py#L190-L210
def str_to_v1_str(xml_str): """Convert a API v2 XML doc to v1 XML doc. Removes elements that are only valid for v2 and changes namespace to v1. If doc is already v1, it is returned unchanged. Args: xml_str : str API v2 XML doc. E.g.: ``SystemMetadata v2``. Returns: str : API v1 XML doc. E.g.: ``SystemMetadata v1``. """ if str_is_v1(xml_str): return xml_str etree_obj = str_to_etree(xml_str) strip_v2_elements(etree_obj) etree_replace_namespace(etree_obj, d1_common.types.dataoneTypes_v1.Namespace) return etree_to_str(etree_obj)
[ "def", "str_to_v1_str", "(", "xml_str", ")", ":", "if", "str_is_v1", "(", "xml_str", ")", ":", "return", "xml_str", "etree_obj", "=", "str_to_etree", "(", "xml_str", ")", "strip_v2_elements", "(", "etree_obj", ")", "etree_replace_namespace", "(", "etree_obj", ",", "d1_common", ".", "types", ".", "dataoneTypes_v1", ".", "Namespace", ")", "return", "etree_to_str", "(", "etree_obj", ")" ]
Convert a API v2 XML doc to v1 XML doc. Removes elements that are only valid for v2 and changes namespace to v1. If doc is already v1, it is returned unchanged. Args: xml_str : str API v2 XML doc. E.g.: ``SystemMetadata v2``. Returns: str : API v1 XML doc. E.g.: ``SystemMetadata v1``.
[ "Convert", "a", "API", "v2", "XML", "doc", "to", "v1", "XML", "doc", "." ]
python
train
27.952381
meejah/txtorcon
txtorcon/circuit.py
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/circuit.py#L346-L383
def close(self, **kw): """ This asks Tor to close the underlying circuit object. See :meth:`txtorcon.torstate.TorState.close_circuit` for details. You may pass keyword arguments to take care of any Flags Tor accepts for the CLOSECIRCUIT command. Currently, this is only "IfUnused". So for example: circ.close(IfUnused=True) :return: Deferred which callbacks with this Circuit instance ONLY after Tor has confirmed it is gone (not simply that the CLOSECIRCUIT command has been queued). This could be a while if you included IfUnused. """ # we're already closed; nothing to do if self.state == 'CLOSED': return defer.succeed(None) # someone already called close() but we're not closed yet if self._closing_deferred: d = defer.Deferred() def closed(arg): d.callback(arg) return arg self._closing_deferred.addBoth(closed) return d # actually-close the circuit self._closing_deferred = defer.Deferred() def close_command_is_queued(*args): return self._closing_deferred d = self._torstate.close_circuit(self.id, **kw) d.addCallback(close_command_is_queued) return d
[ "def", "close", "(", "self", ",", "*", "*", "kw", ")", ":", "# we're already closed; nothing to do", "if", "self", ".", "state", "==", "'CLOSED'", ":", "return", "defer", ".", "succeed", "(", "None", ")", "# someone already called close() but we're not closed yet", "if", "self", ".", "_closing_deferred", ":", "d", "=", "defer", ".", "Deferred", "(", ")", "def", "closed", "(", "arg", ")", ":", "d", ".", "callback", "(", "arg", ")", "return", "arg", "self", ".", "_closing_deferred", ".", "addBoth", "(", "closed", ")", "return", "d", "# actually-close the circuit", "self", ".", "_closing_deferred", "=", "defer", ".", "Deferred", "(", ")", "def", "close_command_is_queued", "(", "*", "args", ")", ":", "return", "self", ".", "_closing_deferred", "d", "=", "self", ".", "_torstate", ".", "close_circuit", "(", "self", ".", "id", ",", "*", "*", "kw", ")", "d", ".", "addCallback", "(", "close_command_is_queued", ")", "return", "d" ]
This asks Tor to close the underlying circuit object. See :meth:`txtorcon.torstate.TorState.close_circuit` for details. You may pass keyword arguments to take care of any Flags Tor accepts for the CLOSECIRCUIT command. Currently, this is only "IfUnused". So for example: circ.close(IfUnused=True) :return: Deferred which callbacks with this Circuit instance ONLY after Tor has confirmed it is gone (not simply that the CLOSECIRCUIT command has been queued). This could be a while if you included IfUnused.
[ "This", "asks", "Tor", "to", "close", "the", "underlying", "circuit", "object", ".", "See", ":", "meth", ":", "txtorcon", ".", "torstate", ".", "TorState", ".", "close_circuit", "for", "details", "." ]
python
train
34.763158
QInfer/python-qinfer
src/qinfer/abstract_model.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/abstract_model.py#L218-L239
def are_expparam_dtypes_consistent(self, expparams): """ Returns ``True`` iff all of the given expparams correspond to outcome domains with the same dtype. For efficiency, concrete subclasses should override this method if the result is always ``True``. :param np.ndarray expparams: Array of expparamms of type ``expparams_dtype`` :rtype: ``bool`` """ if self.is_n_outcomes_constant: # This implies that all domains are equal, so this must be true return True # otherwise we have to actually check all the dtypes if expparams.size > 0: domains = self.domain(expparams) first_dtype = domains[0].dtype return all(domain.dtype == first_dtype for domain in domains[1:]) else: return True
[ "def", "are_expparam_dtypes_consistent", "(", "self", ",", "expparams", ")", ":", "if", "self", ".", "is_n_outcomes_constant", ":", "# This implies that all domains are equal, so this must be true", "return", "True", "# otherwise we have to actually check all the dtypes", "if", "expparams", ".", "size", ">", "0", ":", "domains", "=", "self", ".", "domain", "(", "expparams", ")", "first_dtype", "=", "domains", "[", "0", "]", ".", "dtype", "return", "all", "(", "domain", ".", "dtype", "==", "first_dtype", "for", "domain", "in", "domains", "[", "1", ":", "]", ")", "else", ":", "return", "True" ]
Returns ``True`` iff all of the given expparams correspond to outcome domains with the same dtype. For efficiency, concrete subclasses should override this method if the result is always ``True``. :param np.ndarray expparams: Array of expparamms of type ``expparams_dtype`` :rtype: ``bool``
[ "Returns", "True", "iff", "all", "of", "the", "given", "expparams", "correspond", "to", "outcome", "domains", "with", "the", "same", "dtype", ".", "For", "efficiency", "concrete", "subclasses", "should", "override", "this", "method", "if", "the", "result", "is", "always", "True", "." ]
python
train
38.545455
soasme/rio-client
rio_client/contrib/flask.py
https://github.com/soasme/rio-client/blob/c6d684c6f9deea5b43f2b05bcaf40714c48b5619/rio_client/contrib/flask.py#L111-L123
def current(self): """A namedtuple contains `uuid`, `project`, `action`. Example:: @app.route('/webhook/broadcast-news') def broadcast_news(): if rio.current.action.startswith('news-'): broadcast(request.get_json()) """ event = request.headers.get('X-RIO-EVENT') data = dict([elem.split('=') for elem in event.split(',')]) return Current(**data)
[ "def", "current", "(", "self", ")", ":", "event", "=", "request", ".", "headers", ".", "get", "(", "'X-RIO-EVENT'", ")", "data", "=", "dict", "(", "[", "elem", ".", "split", "(", "'='", ")", "for", "elem", "in", "event", ".", "split", "(", "','", ")", "]", ")", "return", "Current", "(", "*", "*", "data", ")" ]
A namedtuple contains `uuid`, `project`, `action`. Example:: @app.route('/webhook/broadcast-news') def broadcast_news(): if rio.current.action.startswith('news-'): broadcast(request.get_json())
[ "A", "namedtuple", "contains", "uuid", "project", "action", "." ]
python
train
34.076923
3DLIRIOUS/MeshLabXML
meshlabxml/mlx.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/mlx.py#L270-L316
def handle_error(program_name, cmd, log=None): """Subprocess program error handling Args: program_name (str): name of the subprocess program Returns: break_now (bool): indicate whether calling program should break out of loop """ print('\nHouston, we have a problem.', '\n%s did not finish successfully. Review the log' % program_name, 'file and the input file(s) to see what went wrong.') print('%s command: "%s"' % (program_name, cmd)) if log is not None: print('log: "%s"' % log) print('Where do we go from here?') print(' r - retry running %s (probably after' % program_name, 'you\'ve fixed any problems with the input files)') print(' c - continue on with the script (probably after', 'you\'ve manually re-run and generated the desired', 'output file(s)') print(' x - exit, keeping the TEMP3D files and log') print(' xd - exit, deleting the TEMP3D files and log') while True: choice = input('Select r, c, x (default), or xd: ') if choice not in ('r', 'c', 'x', 'xd'): #print('Please enter a valid option.') choice = 'x' #else: break if choice == 'x': print('Exiting ...') sys.exit(1) elif choice == 'xd': print('Deleting TEMP3D* and log files and exiting ...') util.delete_all('TEMP3D*') if log is not None: os.remove(log) sys.exit(1) elif choice == 'c': print('Continuing on ...') break_now = True elif choice == 'r': print('Retrying %s cmd ...' % program_name) break_now = False return break_now
[ "def", "handle_error", "(", "program_name", ",", "cmd", ",", "log", "=", "None", ")", ":", "print", "(", "'\\nHouston, we have a problem.'", ",", "'\\n%s did not finish successfully. Review the log'", "%", "program_name", ",", "'file and the input file(s) to see what went wrong.'", ")", "print", "(", "'%s command: \"%s\"'", "%", "(", "program_name", ",", "cmd", ")", ")", "if", "log", "is", "not", "None", ":", "print", "(", "'log: \"%s\"'", "%", "log", ")", "print", "(", "'Where do we go from here?'", ")", "print", "(", "' r - retry running %s (probably after'", "%", "program_name", ",", "'you\\'ve fixed any problems with the input files)'", ")", "print", "(", "' c - continue on with the script (probably after'", ",", "'you\\'ve manually re-run and generated the desired'", ",", "'output file(s)'", ")", "print", "(", "' x - exit, keeping the TEMP3D files and log'", ")", "print", "(", "' xd - exit, deleting the TEMP3D files and log'", ")", "while", "True", ":", "choice", "=", "input", "(", "'Select r, c, x (default), or xd: '", ")", "if", "choice", "not", "in", "(", "'r'", ",", "'c'", ",", "'x'", ",", "'xd'", ")", ":", "#print('Please enter a valid option.')", "choice", "=", "'x'", "#else:", "break", "if", "choice", "==", "'x'", ":", "print", "(", "'Exiting ...'", ")", "sys", ".", "exit", "(", "1", ")", "elif", "choice", "==", "'xd'", ":", "print", "(", "'Deleting TEMP3D* and log files and exiting ...'", ")", "util", ".", "delete_all", "(", "'TEMP3D*'", ")", "if", "log", "is", "not", "None", ":", "os", ".", "remove", "(", "log", ")", "sys", ".", "exit", "(", "1", ")", "elif", "choice", "==", "'c'", ":", "print", "(", "'Continuing on ...'", ")", "break_now", "=", "True", "elif", "choice", "==", "'r'", ":", "print", "(", "'Retrying %s cmd ...'", "%", "program_name", ")", "break_now", "=", "False", "return", "break_now" ]
Subprocess program error handling Args: program_name (str): name of the subprocess program Returns: break_now (bool): indicate whether calling program should break out of loop
[ "Subprocess", "program", "error", "handling" ]
python
test
35.234043
pushyzheng/flask-rabbitmq
example/consumer/flask_rabbitmq/RabbitMQ.py
https://github.com/pushyzheng/flask-rabbitmq/blob/beecefdf7bb6ff0892388e2bc303aa96931588bd/example/consumer/flask_rabbitmq/RabbitMQ.py#L147-L180
def send_sync(self, body, exchange, key): """ 发送并同步接受回复消息 :return: """ callback_queue = self.declare_queue(exclusive=True, auto_delete=True) # 得到随机回调队列名 self._channel.basic_consume(self.on_response, # 客户端消费回调队列 no_ack=True, queue=callback_queue) corr_id = str(uuid.uuid4()) # 生成客户端请求id self.data[corr_id] = { 'isAccept': False, 'result': None, 'callbackQueue': callback_queue } self._channel.basic_publish( # 发送数据给服务端 exchange=exchange, routing_key=key, body=body, properties=pika.BasicProperties( reply_to=callback_queue, correlation_id=corr_id, ) ) while not self.data[corr_id]['isAccept']: # 判断是否接收到服务端返回的消息 self._connection.process_data_events() time.sleep(0.3) continue logger.info("Got the RPC server response => {}".format(self.data[corr_id]['result'])) return self.data[corr_id]['result']
[ "def", "send_sync", "(", "self", ",", "body", ",", "exchange", ",", "key", ")", ":", "callback_queue", "=", "self", ".", "declare_queue", "(", "exclusive", "=", "True", ",", "auto_delete", "=", "True", ")", "# 得到随机回调队列名", "self", ".", "_channel", ".", "basic_consume", "(", "self", ".", "on_response", ",", "# 客户端消费回调队列", "no_ack", "=", "True", ",", "queue", "=", "callback_queue", ")", "corr_id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "# 生成客户端请求id", "self", ".", "data", "[", "corr_id", "]", "=", "{", "'isAccept'", ":", "False", ",", "'result'", ":", "None", ",", "'callbackQueue'", ":", "callback_queue", "}", "self", ".", "_channel", ".", "basic_publish", "(", "# 发送数据给服务端", "exchange", "=", "exchange", ",", "routing_key", "=", "key", ",", "body", "=", "body", ",", "properties", "=", "pika", ".", "BasicProperties", "(", "reply_to", "=", "callback_queue", ",", "correlation_id", "=", "corr_id", ",", ")", ")", "while", "not", "self", ".", "data", "[", "corr_id", "]", "[", "'isAccept'", "]", ":", "# 判断是否接收到服务端返回的消息", "self", ".", "_connection", ".", "process_data_events", "(", ")", "time", ".", "sleep", "(", "0.3", ")", "continue", "logger", ".", "info", "(", "\"Got the RPC server response => {}\"", ".", "format", "(", "self", ".", "data", "[", "corr_id", "]", "[", "'result'", "]", ")", ")", "return", "self", ".", "data", "[", "corr_id", "]", "[", "'result'", "]" ]
发送并同步接受回复消息 :return:
[ "发送并同步接受回复消息", ":", "return", ":" ]
python
train
34.205882
rbarrois/xworkflows
src/xworkflows/base.py
https://github.com/rbarrois/xworkflows/blob/4a94b04ba83cb43f61d4b0f7db6964a667c86b5b/src/xworkflows/base.py#L988-L990
def _add_workflow(mcs, field_name, state_field, attrs): """Attach a workflow to the attribute list (create a StateProperty).""" attrs[field_name] = StateProperty(state_field.workflow, field_name)
[ "def", "_add_workflow", "(", "mcs", ",", "field_name", ",", "state_field", ",", "attrs", ")", ":", "attrs", "[", "field_name", "]", "=", "StateProperty", "(", "state_field", ".", "workflow", ",", "field_name", ")" ]
Attach a workflow to the attribute list (create a StateProperty).
[ "Attach", "a", "workflow", "to", "the", "attribute", "list", "(", "create", "a", "StateProperty", ")", "." ]
python
train
69.666667
blockstack/blockstack-core
blockstack/lib/operations/transfer.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/transfer.py#L87-L146
def find_transfer_consensus_hash( name_rec, block_id, vtxindex, nameop_consensus_hash ): """ Given a name record, find the last consensus hash set by a non-NAME_TRANSFER operation. @name_rec is the current name record, before this NAME_TRANSFER. @block_id is the current block height. @vtxindex is the relative index of this transaction in this block. @nameop_consensus_hash is the consensus hash given in the NAME_TRANSFER. This preserves compatibility from a bug prior to 0.14.x where the consensus hash from a NAME_TRANSFER is ignored in favor of the last consensus hash (if any) supplied by an operation to the affected name. This method finds that consensus hash (if present). The behavior emulated comes from the fact that in the original release of this software, the fields from a name operation fed into the block's consensus hash included the consensus hashes given in each of the a name operations' transactions. However, a quirk in the behavior of the NAME_TRANSFER-handling code prevented this from happening consistently for NAME_TRANSFERs. Specifically, the only time a NAME_TRANSFER's consensus hash was used to calculate the block's new consensus hash was if the name it affected had never been affected by a prior state transition other than a NAME_TRANSFER. If the name was affected by a prior state transition that set a consensus hash, then that prior state transition's consensus hash (not the NAME_TRANSFER's) would be used in the block consensus hash calculation. If the name was NOT affected by a prior state transition that set a consensus hash (back to the point of its last NAME_REGISTRATION), then the consensus hash fed into the block would be that from the NAME_TRANSFER itself. In practice, the only name operation that consistently sets a consensus hash is NAME_UPDATE. As for the others: * NAME_REGISTRATION sets it to None * NAME_IMPORT sets it to None * NAME_RENEWAL doesn't set it at all; it just takes what was already there * NAME_TRANSFER only sets it if there were no prior NAME_UPDATEs between now and the last NAME_REGISTRATION or NAME_IMPORT. Here are some example name histories, and the consensus hash that should be used to calculate this block's consensus hash: NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_TRANSFER: nameop_consensus_hash NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_RENEWAL, NAME_TRANSFER: whatever it was from the last NAME_UPDATE NAME_PREORDER, NAME_REGISTRATION, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash NAME_IMPORT, NAME_TRANSFER: nameop_consensus_hash NAME_IMPORT, NAME_UPDATE, NAME_TRANSFER whatever it was from the last NAME_UPDATE NAME_IMPORT, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash NAME_IMPORT, NAME_TRANSFER, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash """ # work backwards from the last block for historic_block_number in reversed(sorted(name_rec['history'].keys())): for historic_state in reversed(name_rec['history'][historic_block_number]): if historic_state['block_number'] > block_id or (historic_state['block_number'] == block_id and historic_state['vtxindex'] > vtxindex): # from the future continue if historic_state['op'] in [NAME_REGISTRATION, NAME_IMPORT]: # out of history without finding a NAME_UPDATE return nameop_consensus_hash if historic_state['op'] == NAME_UPDATE: # reuse this consensus hash assert historic_state['consensus_hash'] is not None, 'BUG: NAME_UPDATE did not set "consensus_hash": {}'.format(historic_state) return historic_state['consensus_hash'] return nameop_consensus_hash
[ "def", "find_transfer_consensus_hash", "(", "name_rec", ",", "block_id", ",", "vtxindex", ",", "nameop_consensus_hash", ")", ":", "# work backwards from the last block", "for", "historic_block_number", "in", "reversed", "(", "sorted", "(", "name_rec", "[", "'history'", "]", ".", "keys", "(", ")", ")", ")", ":", "for", "historic_state", "in", "reversed", "(", "name_rec", "[", "'history'", "]", "[", "historic_block_number", "]", ")", ":", "if", "historic_state", "[", "'block_number'", "]", ">", "block_id", "or", "(", "historic_state", "[", "'block_number'", "]", "==", "block_id", "and", "historic_state", "[", "'vtxindex'", "]", ">", "vtxindex", ")", ":", "# from the future", "continue", "if", "historic_state", "[", "'op'", "]", "in", "[", "NAME_REGISTRATION", ",", "NAME_IMPORT", "]", ":", "# out of history without finding a NAME_UPDATE", "return", "nameop_consensus_hash", "if", "historic_state", "[", "'op'", "]", "==", "NAME_UPDATE", ":", "# reuse this consensus hash ", "assert", "historic_state", "[", "'consensus_hash'", "]", "is", "not", "None", ",", "'BUG: NAME_UPDATE did not set \"consensus_hash\": {}'", ".", "format", "(", "historic_state", ")", "return", "historic_state", "[", "'consensus_hash'", "]", "return", "nameop_consensus_hash" ]
Given a name record, find the last consensus hash set by a non-NAME_TRANSFER operation. @name_rec is the current name record, before this NAME_TRANSFER. @block_id is the current block height. @vtxindex is the relative index of this transaction in this block. @nameop_consensus_hash is the consensus hash given in the NAME_TRANSFER. This preserves compatibility from a bug prior to 0.14.x where the consensus hash from a NAME_TRANSFER is ignored in favor of the last consensus hash (if any) supplied by an operation to the affected name. This method finds that consensus hash (if present). The behavior emulated comes from the fact that in the original release of this software, the fields from a name operation fed into the block's consensus hash included the consensus hashes given in each of the a name operations' transactions. However, a quirk in the behavior of the NAME_TRANSFER-handling code prevented this from happening consistently for NAME_TRANSFERs. Specifically, the only time a NAME_TRANSFER's consensus hash was used to calculate the block's new consensus hash was if the name it affected had never been affected by a prior state transition other than a NAME_TRANSFER. If the name was affected by a prior state transition that set a consensus hash, then that prior state transition's consensus hash (not the NAME_TRANSFER's) would be used in the block consensus hash calculation. If the name was NOT affected by a prior state transition that set a consensus hash (back to the point of its last NAME_REGISTRATION), then the consensus hash fed into the block would be that from the NAME_TRANSFER itself. In practice, the only name operation that consistently sets a consensus hash is NAME_UPDATE. As for the others: * NAME_REGISTRATION sets it to None * NAME_IMPORT sets it to None * NAME_RENEWAL doesn't set it at all; it just takes what was already there * NAME_TRANSFER only sets it if there were no prior NAME_UPDATEs between now and the last NAME_REGISTRATION or NAME_IMPORT. Here are some example name histories, and the consensus hash that should be used to calculate this block's consensus hash: NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_TRANSFER: nameop_consensus_hash NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_TRANSFER, NAME_UPDATE, NAME_TRANSFER: whatever it was from the last NAME_UPDATE NAME_PREORDER, NAME_REGISTRATION, NAME_UPDATE, NAME_RENEWAL, NAME_TRANSFER: whatever it was from the last NAME_UPDATE NAME_PREORDER, NAME_REGISTRATION, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER, NAME_RENEWAL, NAME_TRANSFER: nameop_consensus_hash NAME_IMPORT, NAME_TRANSFER: nameop_consensus_hash NAME_IMPORT, NAME_UPDATE, NAME_TRANSFER whatever it was from the last NAME_UPDATE NAME_IMPORT, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash NAME_IMPORT, NAME_TRANSFER, NAME_PREORDER, NAME_REGISTRATION, NAME_TRANSFER: nameop_consensus_hash
[ "Given", "a", "name", "record", "find", "the", "last", "consensus", "hash", "set", "by", "a", "non", "-", "NAME_TRANSFER", "operation", "." ]
python
train
76.4
NaPs/Kolekto
kolekto/printer.py
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/printer.py#L263-L274
def ask(self, question, default=False): """ Ask a y/n question to the user. """ choices = '[%s/%s]' % ('Y' if default else 'y', 'n' if default else 'N') while True: response = raw_input('%s %s' % (question, choices)).strip() if not response: return default elif response in 'yYoO': return True elif response in 'nN': return False
[ "def", "ask", "(", "self", ",", "question", ",", "default", "=", "False", ")", ":", "choices", "=", "'[%s/%s]'", "%", "(", "'Y'", "if", "default", "else", "'y'", ",", "'n'", "if", "default", "else", "'N'", ")", "while", "True", ":", "response", "=", "raw_input", "(", "'%s %s'", "%", "(", "question", ",", "choices", ")", ")", ".", "strip", "(", ")", "if", "not", "response", ":", "return", "default", "elif", "response", "in", "'yYoO'", ":", "return", "True", "elif", "response", "in", "'nN'", ":", "return", "False" ]
Ask a y/n question to the user.
[ "Ask", "a", "y", "/", "n", "question", "to", "the", "user", "." ]
python
train
37.166667
JustinLovinger/optimal
optimal/algorithms/genalg.py
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/algorithms/genalg.py#L109-L144
def _new_population_genalg(population, fitnesses, mutation_chance=0.02, crossover_chance=0.7, selection_function=gaoperators.tournament_selection, crossover_function=gaoperators.one_point_crossover): """Perform all genetic algorithm operations on a population, and return a new population. population must have an even number of chromosomes. Args: population: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]] fitness: A list of fitnesses that correspond with chromosomes in the population, ex. [1.2, 10.8] mutation_chance: the chance that a bit will be flipped during mutation crossover_chance: the chance that two parents will be crossed during crossover selection_function: A function that will select parents for crossover and mutation crossover_function: A function that will cross two parents Returns: list; A new population of chromosomes, that should be more fit. """ # Selection # Create the population of parents that will be crossed and mutated. intermediate_population = selection_function(population, fitnesses) # Crossover new_population = _crossover(intermediate_population, crossover_chance, crossover_function) # Mutation # Mutates chromosomes in place gaoperators.random_flip_mutate(new_population, mutation_chance) # Return new population return new_population
[ "def", "_new_population_genalg", "(", "population", ",", "fitnesses", ",", "mutation_chance", "=", "0.02", ",", "crossover_chance", "=", "0.7", ",", "selection_function", "=", "gaoperators", ".", "tournament_selection", ",", "crossover_function", "=", "gaoperators", ".", "one_point_crossover", ")", ":", "# Selection", "# Create the population of parents that will be crossed and mutated.", "intermediate_population", "=", "selection_function", "(", "population", ",", "fitnesses", ")", "# Crossover", "new_population", "=", "_crossover", "(", "intermediate_population", ",", "crossover_chance", ",", "crossover_function", ")", "# Mutation", "# Mutates chromosomes in place", "gaoperators", ".", "random_flip_mutate", "(", "new_population", ",", "mutation_chance", ")", "# Return new population", "return", "new_population" ]
Perform all genetic algorithm operations on a population, and return a new population. population must have an even number of chromosomes. Args: population: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]] fitness: A list of fitnesses that correspond with chromosomes in the population, ex. [1.2, 10.8] mutation_chance: the chance that a bit will be flipped during mutation crossover_chance: the chance that two parents will be crossed during crossover selection_function: A function that will select parents for crossover and mutation crossover_function: A function that will cross two parents Returns: list; A new population of chromosomes, that should be more fit.
[ "Perform", "all", "genetic", "algorithm", "operations", "on", "a", "population", "and", "return", "a", "new", "population", "." ]
python
train
43.083333
Iotic-Labs/py-IoticAgent
src/IoticAgent/IOT/Thing.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/Thing.py#L283-L299
def set_meta_rdf(self, rdf, fmt='n3'): """Set the metadata for this Thing in RDF fmt Advanced users who want to manipulate the RDF for this Thing directly without the [ThingMeta](ThingMeta.m.html#IoticAgent.IOT.ThingMeta.ThingMeta) helper object Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `fmt` (optional) (string) The format of RDF you have sent. Valid formats are: "xml", "n3", "turtle" """ evt = self._client._request_entity_meta_set(self.__lid, rdf, fmt=fmt) self._client._wait_and_except_if_failed(evt)
[ "def", "set_meta_rdf", "(", "self", ",", "rdf", ",", "fmt", "=", "'n3'", ")", ":", "evt", "=", "self", ".", "_client", ".", "_request_entity_meta_set", "(", "self", ".", "__lid", ",", "rdf", ",", "fmt", "=", "fmt", ")", "self", ".", "_client", ".", "_wait_and_except_if_failed", "(", "evt", ")" ]
Set the metadata for this Thing in RDF fmt Advanced users who want to manipulate the RDF for this Thing directly without the [ThingMeta](ThingMeta.m.html#IoticAgent.IOT.ThingMeta.ThingMeta) helper object Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `fmt` (optional) (string) The format of RDF you have sent. Valid formats are: "xml", "n3", "turtle"
[ "Set", "the", "metadata", "for", "this", "Thing", "in", "RDF", "fmt" ]
python
train
50
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L860-L883
def path(self, startLayer, endLayer): """ Used in error checking with verifyArchitecture() and in prop_from(). """ next = {startLayer.name : startLayer} visited = {} while next != {}: for item in list(next.items()): # item[0] : name, item[1] : layer reference # add layer to visited dict and del from next visited[item[0]] = item[1] del next[item[0]] for connection in self.connections: if connection.fromLayer.name == item[0]: if connection.toLayer.name == endLayer.name: return 1 # a path! elif connection.toLayer.name in next: pass # already in the list to be traversed elif connection.toLayer.name in visited: pass # already been there else: # add to next next[connection.toLayer.name] = connection.toLayer return 0 # didn't find it and ran out of places to go
[ "def", "path", "(", "self", ",", "startLayer", ",", "endLayer", ")", ":", "next", "=", "{", "startLayer", ".", "name", ":", "startLayer", "}", "visited", "=", "{", "}", "while", "next", "!=", "{", "}", ":", "for", "item", "in", "list", "(", "next", ".", "items", "(", ")", ")", ":", "# item[0] : name, item[1] : layer reference", "# add layer to visited dict and del from next ", "visited", "[", "item", "[", "0", "]", "]", "=", "item", "[", "1", "]", "del", "next", "[", "item", "[", "0", "]", "]", "for", "connection", "in", "self", ".", "connections", ":", "if", "connection", ".", "fromLayer", ".", "name", "==", "item", "[", "0", "]", ":", "if", "connection", ".", "toLayer", ".", "name", "==", "endLayer", ".", "name", ":", "return", "1", "# a path!", "elif", "connection", ".", "toLayer", ".", "name", "in", "next", ":", "pass", "# already in the list to be traversed", "elif", "connection", ".", "toLayer", ".", "name", "in", "visited", ":", "pass", "# already been there", "else", ":", "# add to next", "next", "[", "connection", ".", "toLayer", ".", "name", "]", "=", "connection", ".", "toLayer", "return", "0", "# didn't find it and ran out of places to go" ]
Used in error checking with verifyArchitecture() and in prop_from().
[ "Used", "in", "error", "checking", "with", "verifyArchitecture", "()", "and", "in", "prop_from", "()", "." ]
python
train
47.75
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/gallery/gallery_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/gallery/gallery_client.py#L1529-L1548
def create_review(self, review, pub_name, ext_name): """CreateReview. [Preview API] Creates a new review for an extension :param :class:`<Review> <azure.devops.v5_0.gallery.models.Review>` review: Review to be created for the extension :param str pub_name: Name of the publisher who published the extension :param str ext_name: Name of the extension :rtype: :class:`<Review> <azure.devops.v5_0.gallery.models.Review>` """ route_values = {} if pub_name is not None: route_values['pubName'] = self._serialize.url('pub_name', pub_name, 'str') if ext_name is not None: route_values['extName'] = self._serialize.url('ext_name', ext_name, 'str') content = self._serialize.body(review, 'Review') response = self._send(http_method='POST', location_id='e6e85b9d-aa70-40e6-aa28-d0fbf40b91a3', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('Review', response)
[ "def", "create_review", "(", "self", ",", "review", ",", "pub_name", ",", "ext_name", ")", ":", "route_values", "=", "{", "}", "if", "pub_name", "is", "not", "None", ":", "route_values", "[", "'pubName'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'pub_name'", ",", "pub_name", ",", "'str'", ")", "if", "ext_name", "is", "not", "None", ":", "route_values", "[", "'extName'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'ext_name'", ",", "ext_name", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "review", ",", "'Review'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'e6e85b9d-aa70-40e6-aa28-d0fbf40b91a3'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'Review'", ",", "response", ")" ]
CreateReview. [Preview API] Creates a new review for an extension :param :class:`<Review> <azure.devops.v5_0.gallery.models.Review>` review: Review to be created for the extension :param str pub_name: Name of the publisher who published the extension :param str ext_name: Name of the extension :rtype: :class:`<Review> <azure.devops.v5_0.gallery.models.Review>`
[ "CreateReview", ".", "[", "Preview", "API", "]", "Creates", "a", "new", "review", "for", "an", "extension", ":", "param", ":", "class", ":", "<Review", ">", "<azure", ".", "devops", ".", "v5_0", ".", "gallery", ".", "models", ".", "Review", ">", "review", ":", "Review", "to", "be", "created", "for", "the", "extension", ":", "param", "str", "pub_name", ":", "Name", "of", "the", "publisher", "who", "published", "the", "extension", ":", "param", "str", "ext_name", ":", "Name", "of", "the", "extension", ":", "rtype", ":", ":", "class", ":", "<Review", ">", "<azure", ".", "devops", ".", "v5_0", ".", "gallery", ".", "models", ".", "Review", ">" ]
python
train
56.25
yakupadakli/python-unsplash
unsplash/models.py
https://github.com/yakupadakli/python-unsplash/blob/6e43dce3225237e1b8111fd475fb98b1ea33972c/unsplash/models.py#L18-L25
def parse_list(cls, data): """Parse a list of JSON objects into a result set of model instances.""" results = ResultSet() data = data or [] for obj in data: if obj: results.append(cls.parse(obj)) return results
[ "def", "parse_list", "(", "cls", ",", "data", ")", ":", "results", "=", "ResultSet", "(", ")", "data", "=", "data", "or", "[", "]", "for", "obj", "in", "data", ":", "if", "obj", ":", "results", ".", "append", "(", "cls", ".", "parse", "(", "obj", ")", ")", "return", "results" ]
Parse a list of JSON objects into a result set of model instances.
[ "Parse", "a", "list", "of", "JSON", "objects", "into", "a", "result", "set", "of", "model", "instances", "." ]
python
train
33.875