nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
GoSecure/pyrdp
abd8b8762b6d7fd0e49d4a927b529f892b412743
pyrdp/player/BaseEventHandler.py
python
BaseEventHandler.onFastPathFragment
(self, pdu: PlayerPDU)
[]
def onFastPathFragment(self, pdu: PlayerPDU): parser = BasicFastPathParser(ParserMode.CLIENT) pdu = parser.parse(pdu.payload) for event in pdu.events: reassembledEvent = self.reassembleEvent(event) if reassembledEvent is not None: self.onFastPathOutput(reassembledEvent)
[ "def", "onFastPathFragment", "(", "self", ",", "pdu", ":", "PlayerPDU", ")", ":", "parser", "=", "BasicFastPathParser", "(", "ParserMode", ".", "CLIENT", ")", "pdu", "=", "parser", ".", "parse", "(", "pdu", ".", "payload", ")", "for", "event", "in", "pdu", ".", "events", ":", "reassembledEvent", "=", "self", ".", "reassembleEvent", "(", "event", ")", "if", "reassembledEvent", "is", "not", "None", ":", "self", ".", "onFastPathOutput", "(", "reassembledEvent", ")" ]
https://github.com/GoSecure/pyrdp/blob/abd8b8762b6d7fd0e49d4a927b529f892b412743/pyrdp/player/BaseEventHandler.py#L136-L144
smicallef/spiderfoot
fd4bf9394c9ab3ecc90adc3115c56349fb23165b
sflib.py
python
SpiderFoot.parseIBANNumbers
(self, data: str)
return list(ibans)
Find all International Bank Account Numbers (IBANs) within the supplied content. Extracts possible IBANs using a generic regex. Checks whether possible IBANs are valid or not using country-wise length check and Mod 97 algorithm. Args: data (str): text to search for IBANs Returns: list: list of IBAN
Find all International Bank Account Numbers (IBANs) within the supplied content.
[ "Find", "all", "International", "Bank", "Account", "Numbers", "(", "IBANs", ")", "within", "the", "supplied", "content", "." ]
def parseIBANNumbers(self, data: str) -> list: """Find all International Bank Account Numbers (IBANs) within the supplied content. Extracts possible IBANs using a generic regex. Checks whether possible IBANs are valid or not using country-wise length check and Mod 97 algorithm. Args: data (str): text to search for IBANs Returns: list: list of IBAN """ if not isinstance(data, str): return list() ibans = set() # Dictionary of country codes and their respective IBAN lengths ibanCountryLengths = { "AL": 28, "AD": 24, "AT": 20, "AZ": 28, "ME": 22, "BH": 22, "BY": 28, "BE": 16, "BA": 20, "BR": 29, "BG": 22, "CR": 22, "HR": 21, "CY": 28, "CZ": 24, "DK": 18, "DO": 28, "EG": 29, "SV": 28, "FO": 18, "FI": 18, "FR": 27, "GE": 22, "DE": 22, "GI": 23, "GR": 27, "GL": 18, "GT": 28, "VA": 22, "HU": 28, "IS": 26, "IQ": 23, "IE": 22, "IL": 23, "JO": 30, "KZ": 20, "XK": 20, "KW": 30, "LV": 21, "LB": 28, "LI": 21, "LT": 20, "LU": 20, "MT": 31, "MR": 27, "MU": 30, "MD": 24, "MC": 27, "DZ": 24, "AO": 25, "BJ": 28, "VG": 24, "BF": 27, "BI": 16, "CM": 27, "CV": 25, "CG": 27, "EE": 20, "GA": 27, "GG": 22, "IR": 26, "IM": 22, "IT": 27, "CI": 28, "JE": 22, "MK": 19, "MG": 27, "ML": 28, "MZ": 25, "NL": 18, "NO": 15, "PK": 24, "PS": 29, "PL": 28, "PT": 25, "QA": 29, "RO": 24, "LC": 32, "SM": 27, "ST": 25, "SA": 24, "SN": 28, "RS": 22, "SC": 31, "SK": 24, "SI": 19, "ES": 24, "CH": 21, "TL": 23, "TN": 24, "TR": 26, "UA": 29, "AE": 23, "GB": 22, "SE": 24 } # Normalize input data to remove whitespace data = data.replace(" ", "") # Extract alphanumeric characters of lengths ranging from 15 to 32 # and starting with two characters matches = re.findall("[A-Za-z]{2}[A-Za-z0-9]{13,30}", data) for match in matches: iban = match.upper() countryCode = iban[0:2] if countryCode not in ibanCountryLengths.keys(): continue if len(iban) != ibanCountryLengths[countryCode]: continue # Convert IBAN to integer format. # Move the first 4 characters to the end of the string, # then convert all characters to integers; where A = 10, B = 11, ...., Z = 35 iban_int = iban[4:] + iban[0:4] for character in iban_int: if character.isalpha(): iban_int = iban_int.replace(character, str((ord(character) - 65) + 10)) # Check IBAN integer mod 97 for remainder if int(iban_int) % 97 != 1: continue self.debug("Found IBAN: %s" % iban) ibans.add(iban) return list(ibans)
[ "def", "parseIBANNumbers", "(", "self", ",", "data", ":", "str", ")", "->", "list", ":", "if", "not", "isinstance", "(", "data", ",", "str", ")", ":", "return", "list", "(", ")", "ibans", "=", "set", "(", ")", "# Dictionary of country codes and their respective IBAN lengths", "ibanCountryLengths", "=", "{", "\"AL\"", ":", "28", ",", "\"AD\"", ":", "24", ",", "\"AT\"", ":", "20", ",", "\"AZ\"", ":", "28", ",", "\"ME\"", ":", "22", ",", "\"BH\"", ":", "22", ",", "\"BY\"", ":", "28", ",", "\"BE\"", ":", "16", ",", "\"BA\"", ":", "20", ",", "\"BR\"", ":", "29", ",", "\"BG\"", ":", "22", ",", "\"CR\"", ":", "22", ",", "\"HR\"", ":", "21", ",", "\"CY\"", ":", "28", ",", "\"CZ\"", ":", "24", ",", "\"DK\"", ":", "18", ",", "\"DO\"", ":", "28", ",", "\"EG\"", ":", "29", ",", "\"SV\"", ":", "28", ",", "\"FO\"", ":", "18", ",", "\"FI\"", ":", "18", ",", "\"FR\"", ":", "27", ",", "\"GE\"", ":", "22", ",", "\"DE\"", ":", "22", ",", "\"GI\"", ":", "23", ",", "\"GR\"", ":", "27", ",", "\"GL\"", ":", "18", ",", "\"GT\"", ":", "28", ",", "\"VA\"", ":", "22", ",", "\"HU\"", ":", "28", ",", "\"IS\"", ":", "26", ",", "\"IQ\"", ":", "23", ",", "\"IE\"", ":", "22", ",", "\"IL\"", ":", "23", ",", "\"JO\"", ":", "30", ",", "\"KZ\"", ":", "20", ",", "\"XK\"", ":", "20", ",", "\"KW\"", ":", "30", ",", "\"LV\"", ":", "21", ",", "\"LB\"", ":", "28", ",", "\"LI\"", ":", "21", ",", "\"LT\"", ":", "20", ",", "\"LU\"", ":", "20", ",", "\"MT\"", ":", "31", ",", "\"MR\"", ":", "27", ",", "\"MU\"", ":", "30", ",", "\"MD\"", ":", "24", ",", "\"MC\"", ":", "27", ",", "\"DZ\"", ":", "24", ",", "\"AO\"", ":", "25", ",", "\"BJ\"", ":", "28", ",", "\"VG\"", ":", "24", ",", "\"BF\"", ":", "27", ",", "\"BI\"", ":", "16", ",", "\"CM\"", ":", "27", ",", "\"CV\"", ":", "25", ",", "\"CG\"", ":", "27", ",", "\"EE\"", ":", "20", ",", "\"GA\"", ":", "27", ",", "\"GG\"", ":", "22", ",", "\"IR\"", ":", "26", ",", "\"IM\"", ":", "22", ",", "\"IT\"", ":", "27", ",", "\"CI\"", ":", "28", ",", "\"JE\"", ":", "22", ",", "\"MK\"", ":", "19", ",", "\"MG\"", ":", "27", ",", "\"ML\"", ":", "28", ",", "\"MZ\"", ":", "25", ",", "\"NL\"", ":", "18", ",", "\"NO\"", ":", "15", ",", "\"PK\"", ":", "24", ",", "\"PS\"", ":", "29", ",", "\"PL\"", ":", "28", ",", "\"PT\"", ":", "25", ",", "\"QA\"", ":", "29", ",", "\"RO\"", ":", "24", ",", "\"LC\"", ":", "32", ",", "\"SM\"", ":", "27", ",", "\"ST\"", ":", "25", ",", "\"SA\"", ":", "24", ",", "\"SN\"", ":", "28", ",", "\"RS\"", ":", "22", ",", "\"SC\"", ":", "31", ",", "\"SK\"", ":", "24", ",", "\"SI\"", ":", "19", ",", "\"ES\"", ":", "24", ",", "\"CH\"", ":", "21", ",", "\"TL\"", ":", "23", ",", "\"TN\"", ":", "24", ",", "\"TR\"", ":", "26", ",", "\"UA\"", ":", "29", ",", "\"AE\"", ":", "23", ",", "\"GB\"", ":", "22", ",", "\"SE\"", ":", "24", "}", "# Normalize input data to remove whitespace", "data", "=", "data", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "# Extract alphanumeric characters of lengths ranging from 15 to 32", "# and starting with two characters", "matches", "=", "re", ".", "findall", "(", "\"[A-Za-z]{2}[A-Za-z0-9]{13,30}\"", ",", "data", ")", "for", "match", "in", "matches", ":", "iban", "=", "match", ".", "upper", "(", ")", "countryCode", "=", "iban", "[", "0", ":", "2", "]", "if", "countryCode", "not", "in", "ibanCountryLengths", ".", "keys", "(", ")", ":", "continue", "if", "len", "(", "iban", ")", "!=", "ibanCountryLengths", "[", "countryCode", "]", ":", "continue", "# Convert IBAN to integer format.", "# Move the first 4 characters to the end of the string,", "# then convert all characters to integers; where A = 10, B = 11, ...., Z = 35", "iban_int", "=", "iban", "[", "4", ":", "]", "+", "iban", "[", "0", ":", "4", "]", "for", "character", "in", "iban_int", ":", "if", "character", ".", "isalpha", "(", ")", ":", "iban_int", "=", "iban_int", ".", "replace", "(", "character", ",", "str", "(", "(", "ord", "(", "character", ")", "-", "65", ")", "+", "10", ")", ")", "# Check IBAN integer mod 97 for remainder", "if", "int", "(", "iban_int", ")", "%", "97", "!=", "1", ":", "continue", "self", ".", "debug", "(", "\"Found IBAN: %s\"", "%", "iban", ")", "ibans", ".", "add", "(", "iban", ")", "return", "list", "(", "ibans", ")" ]
https://github.com/smicallef/spiderfoot/blob/fd4bf9394c9ab3ecc90adc3115c56349fb23165b/sflib.py#L1625-L1705
OpenCobolIDE/OpenCobolIDE
c78d0d335378e5fe0a5e74f53c19b68b55e85388
open_cobol_ide/extlibs/pyqode/core/widgets/tabs.py
python
TabWidget.close_all
(self)
return False
Closes all editors
Closes all editors
[ "Closes", "all", "editors" ]
def close_all(self): """ Closes all editors """ if self._try_close_dirty_tabs(): while self.count(): widget = self.widget(0) self.removeTab(0) self.tab_closed.emit(widget) return True return False
[ "def", "close_all", "(", "self", ")", ":", "if", "self", ".", "_try_close_dirty_tabs", "(", ")", ":", "while", "self", ".", "count", "(", ")", ":", "widget", "=", "self", ".", "widget", "(", "0", ")", "self", ".", "removeTab", "(", "0", ")", "self", ".", "tab_closed", ".", "emit", "(", "widget", ")", "return", "True", "return", "False" ]
https://github.com/OpenCobolIDE/OpenCobolIDE/blob/c78d0d335378e5fe0a5e74f53c19b68b55e85388/open_cobol_ide/extlibs/pyqode/core/widgets/tabs.py#L103-L113
kupferlauncher/kupfer
1c1e9bcbce05a82f503f68f8b3955c20b02639b3
waflib/Scripting.py
python
Dist.get_files
(self)
return files
Files to package are searched automatically by :py:func:`waflib.Node.Node.ant_glob`. Set *files* to prevent this behaviour:: def dist(ctx): ctx.files = ctx.path.find_node('wscript') Files are also searched from the directory 'base_path', to change it, set:: def dist(ctx): ctx.base_path = path :rtype: list of :py:class:`waflib.Node.Node`
Files to package are searched automatically by :py:func:`waflib.Node.Node.ant_glob`. Set *files* to prevent this behaviour::
[ "Files", "to", "package", "are", "searched", "automatically", "by", ":", "py", ":", "func", ":", "waflib", ".", "Node", ".", "Node", ".", "ant_glob", ".", "Set", "*", "files", "*", "to", "prevent", "this", "behaviour", "::" ]
def get_files(self): """ Files to package are searched automatically by :py:func:`waflib.Node.Node.ant_glob`. Set *files* to prevent this behaviour:: def dist(ctx): ctx.files = ctx.path.find_node('wscript') Files are also searched from the directory 'base_path', to change it, set:: def dist(ctx): ctx.base_path = path :rtype: list of :py:class:`waflib.Node.Node` """ try: files = self.files except AttributeError: files = self.base_path.ant_glob('**/*', excl=self.get_excl()) return files
[ "def", "get_files", "(", "self", ")", ":", "try", ":", "files", "=", "self", ".", "files", "except", "AttributeError", ":", "files", "=", "self", ".", "base_path", ".", "ant_glob", "(", "'**/*'", ",", "excl", "=", "self", ".", "get_excl", "(", ")", ")", "return", "files" ]
https://github.com/kupferlauncher/kupfer/blob/1c1e9bcbce05a82f503f68f8b3955c20b02639b3/waflib/Scripting.py#L500-L519
cloudera/impyla
0c736af4cad2bade9b8e313badc08ec50e81c948
impala/_thrift_gen/TCLIService/ttypes.py
python
TDoubleValue.__eq__
(self, other)
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
[]
def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "return", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", "and", "self", ".", "__dict__", "==", "other", ".", "__dict__" ]
https://github.com/cloudera/impyla/blob/0c736af4cad2bade9b8e313badc08ec50e81c948/impala/_thrift_gen/TCLIService/ttypes.py#L1592-L1593
hzy46/Deep-Learning-21-Examples
15c2d9edccad090cd67b033f24a43c544e5cba3e
chapter_7/preprocessing/vgg_preprocessing.py
python
_crop
(image, offset_height, offset_width, crop_height, crop_width)
return tf.reshape(image, cropped_shape)
Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size.
Crops the given image using the provided offsets and sizes.
[ "Crops", "the", "given", "image", "using", "the", "provided", "offsets", "and", "sizes", "." ]
def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) cropped_shape = control_flow_ops.with_dependencies( [rank_assertion], tf.stack([crop_height, crop_width, original_shape[2]])) # print(original_shape[0], crop_height) # print(original_shape[1], crop_width) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. image = control_flow_ops.with_dependencies( [size_assertion], tf.slice(image, offsets, cropped_shape)) return tf.reshape(image, cropped_shape)
[ "def", "_crop", "(", "image", ",", "offset_height", ",", "offset_width", ",", "crop_height", ",", "crop_width", ")", ":", "original_shape", "=", "tf", ".", "shape", "(", "image", ")", "rank_assertion", "=", "tf", ".", "Assert", "(", "tf", ".", "equal", "(", "tf", ".", "rank", "(", "image", ")", ",", "3", ")", ",", "[", "'Rank of image must be equal to 3.'", "]", ")", "cropped_shape", "=", "control_flow_ops", ".", "with_dependencies", "(", "[", "rank_assertion", "]", ",", "tf", ".", "stack", "(", "[", "crop_height", ",", "crop_width", ",", "original_shape", "[", "2", "]", "]", ")", ")", "# print(original_shape[0], crop_height)", "# print(original_shape[1], crop_width)", "size_assertion", "=", "tf", ".", "Assert", "(", "tf", ".", "logical_and", "(", "tf", ".", "greater_equal", "(", "original_shape", "[", "0", "]", ",", "crop_height", ")", ",", "tf", ".", "greater_equal", "(", "original_shape", "[", "1", "]", ",", "crop_width", ")", ")", ",", "[", "'Crop size greater than the image size.'", "]", ")", "offsets", "=", "tf", ".", "to_int32", "(", "tf", ".", "stack", "(", "[", "offset_height", ",", "offset_width", ",", "0", "]", ")", ")", "# Use tf.slice instead of crop_to_bounding box as it accepts tensors to", "# define the crop size.", "image", "=", "control_flow_ops", ".", "with_dependencies", "(", "[", "size_assertion", "]", ",", "tf", ".", "slice", "(", "image", ",", "offsets", ",", "cropped_shape", ")", ")", "return", "tf", ".", "reshape", "(", "image", ",", "cropped_shape", ")" ]
https://github.com/hzy46/Deep-Learning-21-Examples/blob/15c2d9edccad090cd67b033f24a43c544e5cba3e/chapter_7/preprocessing/vgg_preprocessing.py#L49-L93
raffaele-forte/climber
5530a780446e35b1ce977bae140557050fe0b47c
Exscript/stdlib/string.py
python
replace
(scope, strings, source, dest)
return [s.replace(source[0], dest[0]) for s in strings]
Returns a copy of the given string (or list of strings) in which all occurrences of the given source are replaced by the given dest. @type strings: string @param strings: A string, or a list of strings. @type source: string @param source: What to replace. @type dest: string @param dest: What to replace it with. @rtype: string @return: The resulting string, or list of strings.
Returns a copy of the given string (or list of strings) in which all occurrences of the given source are replaced by the given dest.
[ "Returns", "a", "copy", "of", "the", "given", "string", "(", "or", "list", "of", "strings", ")", "in", "which", "all", "occurrences", "of", "the", "given", "source", "are", "replaced", "by", "the", "given", "dest", "." ]
def replace(scope, strings, source, dest): """ Returns a copy of the given string (or list of strings) in which all occurrences of the given source are replaced by the given dest. @type strings: string @param strings: A string, or a list of strings. @type source: string @param source: What to replace. @type dest: string @param dest: What to replace it with. @rtype: string @return: The resulting string, or list of strings. """ return [s.replace(source[0], dest[0]) for s in strings]
[ "def", "replace", "(", "scope", ",", "strings", ",", "source", ",", "dest", ")", ":", "return", "[", "s", ".", "replace", "(", "source", "[", "0", "]", ",", "dest", "[", "0", "]", ")", "for", "s", "in", "strings", "]" ]
https://github.com/raffaele-forte/climber/blob/5530a780446e35b1ce977bae140557050fe0b47c/Exscript/stdlib/string.py#L18-L32
maas/maas
db2f89970c640758a51247c59bf1ec6f60cf4ab5
src/maasserver/api/subnets.py
python
SubnetHandler.unreserved_ip_ranges
(self, request, id)
return subnet.get_ipranges_not_in_use().render_json( include_purpose=False )
@description-title List unreserved IP ranges @description Lists IP ranges currently unreserved in the subnet. @param (int) "{id}" [required=true] A subnet ID. @success (http-status-code) "server-success" 200 @success (json) "success-json" A JSON object containing a list of unreserved IP ranges. @success-example "success-json" [exkey=subnets-unreserved-ips] placeholder text @error (http-status-code) "404" 404 @error (content) "not-found" The requested subnet is not found. @error-example "not-found" Not Found
@description-title List unreserved IP ranges @description Lists IP ranges currently unreserved in the subnet.
[ "@description", "-", "title", "List", "unreserved", "IP", "ranges", "@description", "Lists", "IP", "ranges", "currently", "unreserved", "in", "the", "subnet", "." ]
def unreserved_ip_ranges(self, request, id): """@description-title List unreserved IP ranges @description Lists IP ranges currently unreserved in the subnet. @param (int) "{id}" [required=true] A subnet ID. @success (http-status-code) "server-success" 200 @success (json) "success-json" A JSON object containing a list of unreserved IP ranges. @success-example "success-json" [exkey=subnets-unreserved-ips] placeholder text @error (http-status-code) "404" 404 @error (content) "not-found" The requested subnet is not found. @error-example "not-found" Not Found """ subnet = Subnet.objects.get_subnet_or_404( id, request.user, NodePermission.view ) return subnet.get_ipranges_not_in_use().render_json( include_purpose=False )
[ "def", "unreserved_ip_ranges", "(", "self", ",", "request", ",", "id", ")", ":", "subnet", "=", "Subnet", ".", "objects", ".", "get_subnet_or_404", "(", "id", ",", "request", ".", "user", ",", "NodePermission", ".", "view", ")", "return", "subnet", ".", "get_ipranges_not_in_use", "(", ")", ".", "render_json", "(", "include_purpose", "=", "False", ")" ]
https://github.com/maas/maas/blob/db2f89970c640758a51247c59bf1ec6f60cf4ab5/src/maasserver/api/subnets.py#L317-L339
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/imaplib.py
python
_Authenticator.encode
(self, inp)
return oup
[]
def encode(self, inp): # # Invoke binascii.b2a_base64 iteratively with # short even length buffers, strip the trailing # line feed from the result and append. "Even" # means a number that factors to both 6 and 8, # so when it gets to the end of the 8-bit input # there's no partial 6-bit output. # oup = b'' if isinstance(inp, str): inp = inp.encode('utf-8') while inp: if len(inp) > 48: t = inp[:48] inp = inp[48:] else: t = inp inp = b'' e = binascii.b2a_base64(t) if e: oup = oup + e[:-1] return oup
[ "def", "encode", "(", "self", ",", "inp", ")", ":", "#", "# Invoke binascii.b2a_base64 iteratively with", "# short even length buffers, strip the trailing", "# line feed from the result and append. \"Even\"", "# means a number that factors to both 6 and 8,", "# so when it gets to the end of the 8-bit input", "# there's no partial 6-bit output.", "#", "oup", "=", "b''", "if", "isinstance", "(", "inp", ",", "str", ")", ":", "inp", "=", "inp", ".", "encode", "(", "'utf-8'", ")", "while", "inp", ":", "if", "len", "(", "inp", ")", ">", "48", ":", "t", "=", "inp", "[", ":", "48", "]", "inp", "=", "inp", "[", "48", ":", "]", "else", ":", "t", "=", "inp", "inp", "=", "b''", "e", "=", "binascii", ".", "b2a_base64", "(", "t", ")", "if", "e", ":", "oup", "=", "oup", "+", "e", "[", ":", "-", "1", "]", "return", "oup" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/imaplib.py#L1387-L1409
ukBaz/python-bluezero
e6b4e96342de6c66571a6660d711c018f8c6b470
bluezero/broadcaster.py
python
Beacon.add_manufacturer_data
(self, manufacturer, data)
Add manufacturer information to be used in beacon message :param manufacturer: Use numbers from Bluetooth SIG https://www.bluetooth.com/specifications/assigned-numbers/16-bit-UUIDs-for-Members :param data: Data to be sent (Limit of 23bytes) e.g. b'\\xff' * 23
Add manufacturer information to be used in beacon message :param manufacturer: Use numbers from Bluetooth SIG https://www.bluetooth.com/specifications/assigned-numbers/16-bit-UUIDs-for-Members
[ "Add", "manufacturer", "information", "to", "be", "used", "in", "beacon", "message", ":", "param", "manufacturer", ":", "Use", "numbers", "from", "Bluetooth", "SIG", "https", ":", "//", "www", ".", "bluetooth", ".", "com", "/", "specifications", "/", "assigned", "-", "numbers", "/", "16", "-", "bit", "-", "UUIDs", "-", "for", "-", "Members" ]
def add_manufacturer_data(self, manufacturer, data): """ Add manufacturer information to be used in beacon message :param manufacturer: Use numbers from Bluetooth SIG https://www.bluetooth.com/specifications/assigned-numbers/16-bit-UUIDs-for-Members :param data: Data to be sent (Limit of 23bytes) e.g. b'\\xff' * 23 """ if isinstance(manufacturer, str): manufacturer = int(manufacturer, 16) self.broadcaster.manufacturer_data(manufacturer, data)
[ "def", "add_manufacturer_data", "(", "self", ",", "manufacturer", ",", "data", ")", ":", "if", "isinstance", "(", "manufacturer", ",", "str", ")", ":", "manufacturer", "=", "int", "(", "manufacturer", ",", "16", ")", "self", ".", "broadcaster", ".", "manufacturer_data", "(", "manufacturer", ",", "data", ")" ]
https://github.com/ukBaz/python-bluezero/blob/e6b4e96342de6c66571a6660d711c018f8c6b470/bluezero/broadcaster.py#L38-L49
llSourcell/AI_Artist
3038c06c2e389b9c919c881c9a169efe2fd7810e
lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/_base.py
python
Node.reparentChildren
(self, newParent)
Move all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way
Move all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way
[ "Move", "all", "the", "children", "of", "the", "current", "node", "to", "newParent", ".", "This", "is", "needed", "so", "that", "trees", "that", "don", "t", "store", "text", "as", "nodes", "move", "the", "text", "in", "the", "correct", "way" ]
def reparentChildren(self, newParent): """Move all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way """ # XXX - should this method be made more general? for child in self.childNodes: newParent.appendChild(child) self.childNodes = []
[ "def", "reparentChildren", "(", "self", ",", "newParent", ")", ":", "# XXX - should this method be made more general?", "for", "child", "in", "self", ".", "childNodes", ":", "newParent", ".", "appendChild", "(", "child", ")", "self", ".", "childNodes", "=", "[", "]" ]
https://github.com/llSourcell/AI_Artist/blob/3038c06c2e389b9c919c881c9a169efe2fd7810e/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/_base.py#L76-L84
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/turtle.py
python
RawTurtle._update
(self)
Perform a Turtle-data update.
Perform a Turtle-data update.
[ "Perform", "a", "Turtle", "-", "data", "update", "." ]
def _update(self): """Perform a Turtle-data update. """ screen = self.screen if screen._tracing == 0: return elif screen._tracing == 1: self._update_data() self._drawturtle() screen._update() # TurtleScreenBase screen._delay(screen._delayvalue) # TurtleScreenBase else: self._update_data() if screen._updatecounter == 0: for t in screen.turtles(): t._drawturtle() screen._update()
[ "def", "_update", "(", "self", ")", ":", "screen", "=", "self", ".", "screen", "if", "screen", ".", "_tracing", "==", "0", ":", "return", "elif", "screen", ".", "_tracing", "==", "1", ":", "self", ".", "_update_data", "(", ")", "self", ".", "_drawturtle", "(", ")", "screen", ".", "_update", "(", ")", "# TurtleScreenBase", "screen", ".", "_delay", "(", "screen", ".", "_delayvalue", ")", "# TurtleScreenBase", "else", ":", "self", ".", "_update_data", "(", ")", "if", "screen", ".", "_updatecounter", "==", "0", ":", "for", "t", "in", "screen", ".", "turtles", "(", ")", ":", "t", ".", "_drawturtle", "(", ")", "screen", ".", "_update", "(", ")" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/turtle.py#L2645-L2661
Source-Python-Dev-Team/Source.Python
d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb
addons/source-python/packages/site-packages/sqlalchemy/sql/selectable.py
python
SelectBase.as_scalar
(self)
return ScalarSelect(self)
return a 'scalar' representation of this selectable, which can be used as a column expression. Typically, a select statement which has only one column in its columns clause is eligible to be used as a scalar expression. The returned object is an instance of :class:`ScalarSelect`.
return a 'scalar' representation of this selectable, which can be used as a column expression.
[ "return", "a", "scalar", "representation", "of", "this", "selectable", "which", "can", "be", "used", "as", "a", "column", "expression", "." ]
def as_scalar(self): """return a 'scalar' representation of this selectable, which can be used as a column expression. Typically, a select statement which has only one column in its columns clause is eligible to be used as a scalar expression. The returned object is an instance of :class:`ScalarSelect`. """ return ScalarSelect(self)
[ "def", "as_scalar", "(", "self", ")", ":", "return", "ScalarSelect", "(", "self", ")" ]
https://github.com/Source-Python-Dev-Team/Source.Python/blob/d0ffd8ccbd1e9923c9bc44936f20613c1c76b7fb/addons/source-python/packages/site-packages/sqlalchemy/sql/selectable.py#L1520-L1531
rowliny/DiffHelper
ab3a96f58f9579d0023aed9ebd785f4edf26f8af
Tool/SitePackages/nltk/app/chartparser_app.py
python
ChartView.update
(self, chart=None)
Draw any edges that have not been drawn. This is typically called when a after modifies the canvas that a CanvasView is displaying. ``update`` will cause any edges that have been added to the chart to be drawn. If update is given a ``chart`` argument, then it will replace the current chart with the given chart.
Draw any edges that have not been drawn. This is typically called when a after modifies the canvas that a CanvasView is displaying. ``update`` will cause any edges that have been added to the chart to be drawn.
[ "Draw", "any", "edges", "that", "have", "not", "been", "drawn", ".", "This", "is", "typically", "called", "when", "a", "after", "modifies", "the", "canvas", "that", "a", "CanvasView", "is", "displaying", ".", "update", "will", "cause", "any", "edges", "that", "have", "been", "added", "to", "the", "chart", "to", "be", "drawn", "." ]
def update(self, chart=None): """ Draw any edges that have not been drawn. This is typically called when a after modifies the canvas that a CanvasView is displaying. ``update`` will cause any edges that have been added to the chart to be drawn. If update is given a ``chart`` argument, then it will replace the current chart with the given chart. """ if chart is not None: self._chart = chart self._edgelevels = [] self._marks = {} self._analyze() self._grow() self.draw() self.erase_tree() self._resize() else: for edge in self._chart: if edge not in self._edgetags: self._add_edge(edge) self._resize()
[ "def", "update", "(", "self", ",", "chart", "=", "None", ")", ":", "if", "chart", "is", "not", "None", ":", "self", ".", "_chart", "=", "chart", "self", ".", "_edgelevels", "=", "[", "]", "self", ".", "_marks", "=", "{", "}", "self", ".", "_analyze", "(", ")", "self", ".", "_grow", "(", ")", "self", ".", "draw", "(", ")", "self", ".", "erase_tree", "(", ")", "self", ".", "_resize", "(", ")", "else", ":", "for", "edge", "in", "self", ".", "_chart", ":", "if", "edge", "not", "in", "self", ".", "_edgetags", ":", "self", ".", "_add_edge", "(", "edge", ")", "self", ".", "_resize", "(", ")" ]
https://github.com/rowliny/DiffHelper/blob/ab3a96f58f9579d0023aed9ebd785f4edf26f8af/Tool/SitePackages/nltk/app/chartparser_app.py#L1168-L1191
tanghaibao/jcvi
5e720870c0928996f8b77a38208106ff0447ccb6
jcvi/assembly/hic.py
python
iter_last_tour
(tourfile, clm)
return tour, tour_o
Extract last tour from tourfile. The clm instance is also passed in to see if any contig is covered in the clm.
Extract last tour from tourfile. The clm instance is also passed in to see if any contig is covered in the clm.
[ "Extract", "last", "tour", "from", "tourfile", ".", "The", "clm", "instance", "is", "also", "passed", "in", "to", "see", "if", "any", "contig", "is", "covered", "in", "the", "clm", "." ]
def iter_last_tour(tourfile, clm): """ Extract last tour from tourfile. The clm instance is also passed in to see if any contig is covered in the clm. """ row = open(tourfile).readlines()[-1] _tour, _tour_o = separate_tour_and_o(row) tour = [] tour_o = [] for tc, to in zip(_tour, _tour_o): if tc not in clm.contigs: logging.debug( "Contig `{}` in file `{}` not found in `{}`".format( tc, tourfile, clm.idsfile ) ) continue tour.append(tc) tour_o.append(to) return tour, tour_o
[ "def", "iter_last_tour", "(", "tourfile", ",", "clm", ")", ":", "row", "=", "open", "(", "tourfile", ")", ".", "readlines", "(", ")", "[", "-", "1", "]", "_tour", ",", "_tour_o", "=", "separate_tour_and_o", "(", "row", ")", "tour", "=", "[", "]", "tour_o", "=", "[", "]", "for", "tc", ",", "to", "in", "zip", "(", "_tour", ",", "_tour_o", ")", ":", "if", "tc", "not", "in", "clm", ".", "contigs", ":", "logging", ".", "debug", "(", "\"Contig `{}` in file `{}` not found in `{}`\"", ".", "format", "(", "tc", ",", "tourfile", ",", "clm", ".", "idsfile", ")", ")", "continue", "tour", ".", "append", "(", "tc", ")", "tour_o", ".", "append", "(", "to", ")", "return", "tour", ",", "tour_o" ]
https://github.com/tanghaibao/jcvi/blob/5e720870c0928996f8b77a38208106ff0447ccb6/jcvi/assembly/hic.py#L1373-L1392
naftaliharris/tauthon
5587ceec329b75f7caf6d65a036db61ac1bae214
Tools/pybench/CommandLine.py
python
fileopen
(name, mode='wb', encoding=None)
Open a file using mode. Default mode is 'wb' meaning to open the file for writing in binary mode. If encoding is given, I/O to and from the file is transparently encoded using the given encoding. Files opened for writing are chmod()ed to 0600.
Open a file using mode.
[ "Open", "a", "file", "using", "mode", "." ]
def fileopen(name, mode='wb', encoding=None): """ Open a file using mode. Default mode is 'wb' meaning to open the file for writing in binary mode. If encoding is given, I/O to and from the file is transparently encoded using the given encoding. Files opened for writing are chmod()ed to 0600. """ if name == 'stdout': return sys.stdout elif name == 'stderr': return sys.stderr elif name == 'stdin': return sys.stdin else: if encoding is not None: import codecs f = codecs.open(name, mode, encoding) else: f = open(name, mode) if 'w' in mode: os.chmod(name, 0600) return f
[ "def", "fileopen", "(", "name", ",", "mode", "=", "'wb'", ",", "encoding", "=", "None", ")", ":", "if", "name", "==", "'stdout'", ":", "return", "sys", ".", "stdout", "elif", "name", "==", "'stderr'", ":", "return", "sys", ".", "stderr", "elif", "name", "==", "'stdin'", ":", "return", "sys", ".", "stdin", "else", ":", "if", "encoding", "is", "not", "None", ":", "import", "codecs", "f", "=", "codecs", ".", "open", "(", "name", ",", "mode", ",", "encoding", ")", "else", ":", "f", "=", "open", "(", "name", ",", "mode", ")", "if", "'w'", "in", "mode", ":", "os", ".", "chmod", "(", "name", ",", "0600", ")", "return", "f" ]
https://github.com/naftaliharris/tauthon/blob/5587ceec329b75f7caf6d65a036db61ac1bae214/Tools/pybench/CommandLine.py#L61-L86
mayank93/Twitter-Sentiment-Analysis
f095c6ca6bf69787582b5dabb140fefaf278eb37
front-end/web2py/gluon/tools.py
python
Auth.has_permission
( self, name='any', table_name='', record_id=0, user_id=None, group_id=None, )
return r
checks if user_id or current logged in user is member of a group that has 'name' permission on 'table_name' and 'record_id' if group_id is passed, it checks whether the group has the permission
checks if user_id or current logged in user is member of a group that has 'name' permission on 'table_name' and 'record_id' if group_id is passed, it checks whether the group has the permission
[ "checks", "if", "user_id", "or", "current", "logged", "in", "user", "is", "member", "of", "a", "group", "that", "has", "name", "permission", "on", "table_name", "and", "record_id", "if", "group_id", "is", "passed", "it", "checks", "whether", "the", "group", "has", "the", "permission" ]
def has_permission( self, name='any', table_name='', record_id=0, user_id=None, group_id=None, ): """ checks if user_id or current logged in user is member of a group that has 'name' permission on 'table_name' and 'record_id' if group_id is passed, it checks whether the group has the permission """ if not group_id and self.settings.everybody_group_id and \ self.has_permission( name,table_name,record_id,user_id=None, group_id=self.settings.everybody_group_id): return True if not user_id and not group_id and self.user: user_id = self.user.id if user_id: membership = self.table_membership() rows = self.db(membership.user_id == user_id).select(membership.group_id) groups = set([row.group_id for row in rows]) if group_id and not group_id in groups: return False else: groups = set([group_id]) permission = self.table_permission() rows = self.db(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == record_id).select(permission.group_id) groups_required = set([row.group_id for row in rows]) if record_id: rows = self.db(permission.name == name)(permission.table_name == str(table_name))(permission.record_id == 0).select(permission.group_id) groups_required = groups_required.union(set([row.group_id for row in rows])) if groups.intersection(groups_required): r = True else: r = False if user_id: self.log_event(self.messages.has_permission_log, dict(user_id=user_id, name=name, table_name=table_name, record_id=record_id)) return r
[ "def", "has_permission", "(", "self", ",", "name", "=", "'any'", ",", "table_name", "=", "''", ",", "record_id", "=", "0", ",", "user_id", "=", "None", ",", "group_id", "=", "None", ",", ")", ":", "if", "not", "group_id", "and", "self", ".", "settings", ".", "everybody_group_id", "and", "self", ".", "has_permission", "(", "name", ",", "table_name", ",", "record_id", ",", "user_id", "=", "None", ",", "group_id", "=", "self", ".", "settings", ".", "everybody_group_id", ")", ":", "return", "True", "if", "not", "user_id", "and", "not", "group_id", "and", "self", ".", "user", ":", "user_id", "=", "self", ".", "user", ".", "id", "if", "user_id", ":", "membership", "=", "self", ".", "table_membership", "(", ")", "rows", "=", "self", ".", "db", "(", "membership", ".", "user_id", "==", "user_id", ")", ".", "select", "(", "membership", ".", "group_id", ")", "groups", "=", "set", "(", "[", "row", ".", "group_id", "for", "row", "in", "rows", "]", ")", "if", "group_id", "and", "not", "group_id", "in", "groups", ":", "return", "False", "else", ":", "groups", "=", "set", "(", "[", "group_id", "]", ")", "permission", "=", "self", ".", "table_permission", "(", ")", "rows", "=", "self", ".", "db", "(", "permission", ".", "name", "==", "name", ")", "(", "permission", ".", "table_name", "==", "str", "(", "table_name", ")", ")", "(", "permission", ".", "record_id", "==", "record_id", ")", ".", "select", "(", "permission", ".", "group_id", ")", "groups_required", "=", "set", "(", "[", "row", ".", "group_id", "for", "row", "in", "rows", "]", ")", "if", "record_id", ":", "rows", "=", "self", ".", "db", "(", "permission", ".", "name", "==", "name", ")", "(", "permission", ".", "table_name", "==", "str", "(", "table_name", ")", ")", "(", "permission", ".", "record_id", "==", "0", ")", ".", "select", "(", "permission", ".", "group_id", ")", "groups_required", "=", "groups_required", ".", "union", "(", "set", "(", "[", "row", ".", "group_id", "for", "row", "in", "rows", "]", ")", ")", "if", "groups", ".", "intersection", "(", "groups_required", ")", ":", "r", "=", "True", "else", ":", "r", "=", "False", "if", "user_id", ":", "self", ".", "log_event", "(", "self", ".", "messages", ".", "has_permission_log", ",", "dict", "(", "user_id", "=", "user_id", ",", "name", "=", "name", ",", "table_name", "=", "table_name", ",", "record_id", "=", "record_id", ")", ")", "return", "r" ]
https://github.com/mayank93/Twitter-Sentiment-Analysis/blob/f095c6ca6bf69787582b5dabb140fefaf278eb37/front-end/web2py/gluon/tools.py#L2936-L2986
tendenci/tendenci
0f2c348cc0e7d41bc56f50b00ce05544b083bf1d
tendenci/apps/profiles/models.py
python
Profile.is_superuser
(self)
return all([self._can_login(), self.user.is_superuser])
[]
def is_superuser(self): return all([self._can_login(), self.user.is_superuser])
[ "def", "is_superuser", "(", "self", ")", ":", "return", "all", "(", "[", "self", ".", "_can_login", "(", ")", ",", "self", ".", "user", ".", "is_superuser", "]", ")" ]
https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/profiles/models.py#L175-L176
gentoo/portage
e5be73709b1a42b40380fd336f9381452b01a723
lib/portage/dbapi/vartree.py
python
dblink.quickpkg
( self, output_file, include_config=False, include_unmodified_config=False )
return excluded_config_files
Create a tar file appropriate for use by quickpkg. @param output_file: Write binary tar stream to file. @type output_file: file @param include_config: Include all files protected by CONFIG_PROTECT (as a security precaution, default is False). @type include_config: bool @param include_unmodified_config: Include files protected by CONFIG_PROTECT that have not been modified since installation (as a security precaution, default is False). @type include_unmodified_config: bool @rtype: list @return: Paths of protected configuration files which have been omitted.
Create a tar file appropriate for use by quickpkg.
[ "Create", "a", "tar", "file", "appropriate", "for", "use", "by", "quickpkg", "." ]
def quickpkg( self, output_file, include_config=False, include_unmodified_config=False ): """ Create a tar file appropriate for use by quickpkg. @param output_file: Write binary tar stream to file. @type output_file: file @param include_config: Include all files protected by CONFIG_PROTECT (as a security precaution, default is False). @type include_config: bool @param include_unmodified_config: Include files protected by CONFIG_PROTECT that have not been modified since installation (as a security precaution, default is False). @type include_unmodified_config: bool @rtype: list @return: Paths of protected configuration files which have been omitted. """ settings = self.settings cpv = self.mycpv xattrs = "xattr" in settings.features contents = self.getcontents() excluded_config_files = [] protect = None if not include_config: confprot = ConfigProtect( settings["EROOT"], portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")), portage.util.shlex_split(settings.get("CONFIG_PROTECT_MASK", "")), case_insensitive=("case-insensitive-fs" in settings.features), ) def protect(filename): if not confprot.isprotected(filename): return False if include_unmodified_config: file_data = contents[filename] if file_data[0] == "obj": orig_md5 = file_data[2].lower() cur_md5 = perform_md5(filename, calc_prelink=1) if orig_md5 == cur_md5: return False excluded_config_files.append(filename) return True # The tarfile module will write pax headers holding the # xattrs only if PAX_FORMAT is specified here. with tarfile.open( fileobj=output_file, mode="w|", format=tarfile.PAX_FORMAT if xattrs else tarfile.DEFAULT_FORMAT, ) as tar: tar_contents( contents, settings["ROOT"], tar, protect=protect, xattrs=xattrs ) return excluded_config_files
[ "def", "quickpkg", "(", "self", ",", "output_file", ",", "include_config", "=", "False", ",", "include_unmodified_config", "=", "False", ")", ":", "settings", "=", "self", ".", "settings", "cpv", "=", "self", ".", "mycpv", "xattrs", "=", "\"xattr\"", "in", "settings", ".", "features", "contents", "=", "self", ".", "getcontents", "(", ")", "excluded_config_files", "=", "[", "]", "protect", "=", "None", "if", "not", "include_config", ":", "confprot", "=", "ConfigProtect", "(", "settings", "[", "\"EROOT\"", "]", ",", "portage", ".", "util", ".", "shlex_split", "(", "settings", ".", "get", "(", "\"CONFIG_PROTECT\"", ",", "\"\"", ")", ")", ",", "portage", ".", "util", ".", "shlex_split", "(", "settings", ".", "get", "(", "\"CONFIG_PROTECT_MASK\"", ",", "\"\"", ")", ")", ",", "case_insensitive", "=", "(", "\"case-insensitive-fs\"", "in", "settings", ".", "features", ")", ",", ")", "def", "protect", "(", "filename", ")", ":", "if", "not", "confprot", ".", "isprotected", "(", "filename", ")", ":", "return", "False", "if", "include_unmodified_config", ":", "file_data", "=", "contents", "[", "filename", "]", "if", "file_data", "[", "0", "]", "==", "\"obj\"", ":", "orig_md5", "=", "file_data", "[", "2", "]", ".", "lower", "(", ")", "cur_md5", "=", "perform_md5", "(", "filename", ",", "calc_prelink", "=", "1", ")", "if", "orig_md5", "==", "cur_md5", ":", "return", "False", "excluded_config_files", ".", "append", "(", "filename", ")", "return", "True", "# The tarfile module will write pax headers holding the", "# xattrs only if PAX_FORMAT is specified here.", "with", "tarfile", ".", "open", "(", "fileobj", "=", "output_file", ",", "mode", "=", "\"w|\"", ",", "format", "=", "tarfile", ".", "PAX_FORMAT", "if", "xattrs", "else", "tarfile", ".", "DEFAULT_FORMAT", ",", ")", "as", "tar", ":", "tar_contents", "(", "contents", ",", "settings", "[", "\"ROOT\"", "]", ",", "tar", ",", "protect", "=", "protect", ",", "xattrs", "=", "xattrs", ")", "return", "excluded_config_files" ]
https://github.com/gentoo/portage/blob/e5be73709b1a42b40380fd336f9381452b01a723/lib/portage/dbapi/vartree.py#L2109-L2166
bjmayor/hacker
e3ce2ad74839c2733b27dac6c0f495e0743e1866
venv/lib/python3.5/site-packages/pip/_vendor/pkg_resources/__init__.py
python
find_eggs_in_zip
(importer, path_item, only=False)
Find eggs in zip files; possibly multiple nested eggs.
Find eggs in zip files; possibly multiple nested eggs.
[ "Find", "eggs", "in", "zip", "files", ";", "possibly", "multiple", "nested", "eggs", "." ]
def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir('/'): if _is_unpacked_egg(subitem): subpath = os.path.join(path_item, subitem) for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): yield dist
[ "def", "find_eggs_in_zip", "(", "importer", ",", "path_item", ",", "only", "=", "False", ")", ":", "if", "importer", ".", "archive", ".", "endswith", "(", "'.whl'", ")", ":", "# wheels are not supported with this finder", "# they don't have PKG-INFO metadata, and won't ever contain eggs", "return", "metadata", "=", "EggMetadata", "(", "importer", ")", "if", "metadata", ".", "has_metadata", "(", "'PKG-INFO'", ")", ":", "yield", "Distribution", ".", "from_filename", "(", "path_item", ",", "metadata", "=", "metadata", ")", "if", "only", ":", "# don't yield nested distros", "return", "for", "subitem", "in", "metadata", ".", "resource_listdir", "(", "'/'", ")", ":", "if", "_is_unpacked_egg", "(", "subitem", ")", ":", "subpath", "=", "os", ".", "path", ".", "join", "(", "path_item", ",", "subitem", ")", "for", "dist", "in", "find_eggs_in_zip", "(", "zipimport", ".", "zipimporter", "(", "subpath", ")", ",", "subpath", ")", ":", "yield", "dist" ]
https://github.com/bjmayor/hacker/blob/e3ce2ad74839c2733b27dac6c0f495e0743e1866/venv/lib/python3.5/site-packages/pip/_vendor/pkg_resources/__init__.py#L1940-L1958
git-cola/git-cola
b48b8028e0c3baf47faf7b074b9773737358163d
cola/utils.py
python
dirname
(path, current_dir='')
return path.rsplit('/', 1)[0]
An os.path.dirname() implementation that always uses '/' Avoid os.path.dirname because git's output always uses '/' regardless of platform.
An os.path.dirname() implementation that always uses '/'
[ "An", "os", ".", "path", ".", "dirname", "()", "implementation", "that", "always", "uses", "/" ]
def dirname(path, current_dir=''): """ An os.path.dirname() implementation that always uses '/' Avoid os.path.dirname because git's output always uses '/' regardless of platform. """ while '//' in path: path = path.replace('//', '/') path_dirname = path.rsplit('/', 1)[0] if path_dirname == path: return current_dir return path.rsplit('/', 1)[0]
[ "def", "dirname", "(", "path", ",", "current_dir", "=", "''", ")", ":", "while", "'//'", "in", "path", ":", "path", "=", "path", ".", "replace", "(", "'//'", ",", "'/'", ")", "path_dirname", "=", "path", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "0", "]", "if", "path_dirname", "==", "path", ":", "return", "current_dir", "return", "path", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "0", "]" ]
https://github.com/git-cola/git-cola/blob/b48b8028e0c3baf47faf7b074b9773737358163d/cola/utils.py#L133-L146
huawei-noah/Pretrained-Language-Model
d4694a134bdfacbaef8ff1d99735106bd3b3372b
DynaBERT/transformers/tokenization_bert.py
python
BasicTokenizer._is_chinese_char
(self, cp)
return False
Checks whether CP is the codepoint of a CJK character.
Checks whether CP is the codepoint of a CJK character.
[ "Checks", "whether", "CP", "is", "the", "codepoint", "of", "a", "CJK", "character", "." ]
def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False
[ "def", "_is_chinese_char", "(", "self", ",", "cp", ")", ":", "# This defines a \"chinese character\" as anything in the CJK Unicode block:", "# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)", "#", "# Note that the CJK Unicode block is NOT all Japanese and Korean characters,", "# despite its name. The modern Korean Hangul alphabet is a different block,", "# as is Japanese Hiragana and Katakana. Those alphabets are used to write", "# space-separated words, so they are not treated specially and handled", "# like the all of the other languages.", "if", "(", "(", "cp", ">=", "0x4E00", "and", "cp", "<=", "0x9FFF", ")", "or", "#", "(", "cp", ">=", "0x3400", "and", "cp", "<=", "0x4DBF", ")", "or", "#", "(", "cp", ">=", "0x20000", "and", "cp", "<=", "0x2A6DF", ")", "or", "#", "(", "cp", ">=", "0x2A700", "and", "cp", "<=", "0x2B73F", ")", "or", "#", "(", "cp", ">=", "0x2B740", "and", "cp", "<=", "0x2B81F", ")", "or", "#", "(", "cp", ">=", "0x2B820", "and", "cp", "<=", "0x2CEAF", ")", "or", "(", "cp", ">=", "0xF900", "and", "cp", "<=", "0xFAFF", ")", "or", "#", "(", "cp", ">=", "0x2F800", "and", "cp", "<=", "0x2FA1F", ")", ")", ":", "#", "return", "True", "return", "False" ]
https://github.com/huawei-noah/Pretrained-Language-Model/blob/d4694a134bdfacbaef8ff1d99735106bd3b3372b/DynaBERT/transformers/tokenization_bert.py#L369-L389
google/aiyprojects-raspbian
964f07f5b4bd2ec785cfda6f318e50e1b67d4758
src/aiy/cloudspeech.py
python
CloudSpeechClient.recognize
(self, language_code='en-US', hint_phrases=None)
return None
Performs speech-to-text for a single utterance using the default ALSA soundcard driver. Once it detects the user is done speaking, it stops listening and delivers the top result as text. By default, this method calls :meth:`start_listening` and :meth:`stop_listening` as the recording begins and ends, respectively. Args: language_code: Language expected from the user, in IETF BCP 47 syntax (default is "en-US"). See the `list of Cloud's supported languages`_. hint_phrase: A list of strings containing words and phrases that may be expected from the user. These hints help the speech recognizer identify them in the dialog and improve the accuracy of your results. Returns: The text transcription of the user's dialog.
Performs speech-to-text for a single utterance using the default ALSA soundcard driver. Once it detects the user is done speaking, it stops listening and delivers the top result as text.
[ "Performs", "speech", "-", "to", "-", "text", "for", "a", "single", "utterance", "using", "the", "default", "ALSA", "soundcard", "driver", ".", "Once", "it", "detects", "the", "user", "is", "done", "speaking", "it", "stops", "listening", "and", "delivers", "the", "top", "result", "as", "text", "." ]
def recognize(self, language_code='en-US', hint_phrases=None): """ Performs speech-to-text for a single utterance using the default ALSA soundcard driver. Once it detects the user is done speaking, it stops listening and delivers the top result as text. By default, this method calls :meth:`start_listening` and :meth:`stop_listening` as the recording begins and ends, respectively. Args: language_code: Language expected from the user, in IETF BCP 47 syntax (default is "en-US"). See the `list of Cloud's supported languages`_. hint_phrase: A list of strings containing words and phrases that may be expected from the user. These hints help the speech recognizer identify them in the dialog and improve the accuracy of your results. Returns: The text transcription of the user's dialog. """ streaming_config=speech.types.StreamingRecognitionConfig( config=self._make_config(language_code, hint_phrases), single_utterance=True) with Recorder() as recorder: chunks = recorder.record(AUDIO_FORMAT, chunk_duration_sec=0.1, on_start=self.start_listening, on_stop=self.stop_listening) requests = (speech.types.StreamingRecognizeRequest(audio_content=data) for data in chunks) responses = self._client.streaming_recognize(config=streaming_config, requests=requests) for response in responses: if response.speech_event_type == END_OF_SINGLE_UTTERANCE: recorder.done() for result in response.results: if result.is_final: return result.alternatives[0].transcript return None
[ "def", "recognize", "(", "self", ",", "language_code", "=", "'en-US'", ",", "hint_phrases", "=", "None", ")", ":", "streaming_config", "=", "speech", ".", "types", ".", "StreamingRecognitionConfig", "(", "config", "=", "self", ".", "_make_config", "(", "language_code", ",", "hint_phrases", ")", ",", "single_utterance", "=", "True", ")", "with", "Recorder", "(", ")", "as", "recorder", ":", "chunks", "=", "recorder", ".", "record", "(", "AUDIO_FORMAT", ",", "chunk_duration_sec", "=", "0.1", ",", "on_start", "=", "self", ".", "start_listening", ",", "on_stop", "=", "self", ".", "stop_listening", ")", "requests", "=", "(", "speech", ".", "types", ".", "StreamingRecognizeRequest", "(", "audio_content", "=", "data", ")", "for", "data", "in", "chunks", ")", "responses", "=", "self", ".", "_client", ".", "streaming_recognize", "(", "config", "=", "streaming_config", ",", "requests", "=", "requests", ")", "for", "response", "in", "responses", ":", "if", "response", ".", "speech_event_type", "==", "END_OF_SINGLE_UTTERANCE", ":", "recorder", ".", "done", "(", ")", "for", "result", "in", "response", ".", "results", ":", "if", "result", ".", "is_final", ":", "return", "result", ".", "alternatives", "[", "0", "]", ".", "transcript", "return", "None" ]
https://github.com/google/aiyprojects-raspbian/blob/964f07f5b4bd2ec785cfda6f318e50e1b67d4758/src/aiy/cloudspeech.py#L104-L144
ifwe/digsby
f5fe00244744aa131e07f09348d10563f3d8fa99
digsby/lib/pyxmpp/jabber/clientstream.py
python
LegacyClientStream._post_auth
(self)
Unregister legacy authentication handlers after successfull authentication.
Unregister legacy authentication handlers after successfull authentication.
[ "Unregister", "legacy", "authentication", "handlers", "after", "successfull", "authentication", "." ]
def _post_auth(self): """Unregister legacy authentication handlers after successfull authentication.""" ClientStream._post_auth(self) if not self.initiator: self.unset_iq_get_handler("query","jabber:iq:auth") self.unset_iq_set_handler("query","jabber:iq:auth")
[ "def", "_post_auth", "(", "self", ")", ":", "ClientStream", ".", "_post_auth", "(", "self", ")", "if", "not", "self", ".", "initiator", ":", "self", ".", "unset_iq_get_handler", "(", "\"query\"", ",", "\"jabber:iq:auth\"", ")", "self", ".", "unset_iq_set_handler", "(", "\"query\"", ",", "\"jabber:iq:auth\"", ")" ]
https://github.com/ifwe/digsby/blob/f5fe00244744aa131e07f09348d10563f3d8fa99/digsby/lib/pyxmpp/jabber/clientstream.py#L99-L105
mchristopher/PokemonGo-DesktopMap
ec37575f2776ee7d64456e2a1f6b6b78830b4fe0
app/pylibs/osx64/Cryptodome/Hash/SHAKE256.py
python
SHAKE256_XOF.update
(self, data)
return self
Continue hashing of a message by consuming the next chunk of data. Repeated calls are equivalent to a single call with the concatenation of all the arguments. In other words: >>> m.update(a); m.update(b) is equivalent to: >>> m.update(a+b) You cannot use ``update`` anymore after the first call to ``read``. :Parameters: data : byte string The next chunk of the message being hashed.
Continue hashing of a message by consuming the next chunk of data.
[ "Continue", "hashing", "of", "a", "message", "by", "consuming", "the", "next", "chunk", "of", "data", "." ]
def update(self, data): """Continue hashing of a message by consuming the next chunk of data. Repeated calls are equivalent to a single call with the concatenation of all the arguments. In other words: >>> m.update(a); m.update(b) is equivalent to: >>> m.update(a+b) You cannot use ``update`` anymore after the first call to ``read``. :Parameters: data : byte string The next chunk of the message being hashed. """ if self._is_squeezing: raise TypeError("You cannot call 'update' after the first 'read'") expect_byte_string(data) result = _raw_keccak_lib.keccak_absorb(self._state.get(), data, c_size_t(len(data))) if result: raise ValueError("Error %d while updating SHAKE256 state" % result) return self
[ "def", "update", "(", "self", ",", "data", ")", ":", "if", "self", ".", "_is_squeezing", ":", "raise", "TypeError", "(", "\"You cannot call 'update' after the first 'read'\"", ")", "expect_byte_string", "(", "data", ")", "result", "=", "_raw_keccak_lib", ".", "keccak_absorb", "(", "self", ".", "_state", ".", "get", "(", ")", ",", "data", ",", "c_size_t", "(", "len", "(", "data", ")", ")", ")", "if", "result", ":", "raise", "ValueError", "(", "\"Error %d while updating SHAKE256 state\"", "%", "result", ")", "return", "self" ]
https://github.com/mchristopher/PokemonGo-DesktopMap/blob/ec37575f2776ee7d64456e2a1f6b6b78830b4fe0/app/pylibs/osx64/Cryptodome/Hash/SHAKE256.py#L85-L114
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/idlelib/searchengine.py
python
SearchEngine.iswrap
(self)
return self.wrapvar.get()
[]
def iswrap(self): return self.wrapvar.get()
[ "def", "iswrap", "(", "self", ")", ":", "return", "self", ".", "wrapvar", ".", "get", "(", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/idlelib/searchengine.py#L52-L53
iotaledger/iota.py
f596c1ac0d9bcbceda1cf6109cd921943a6599b3
iota/adapter/__init__.py
python
HttpAdapter._interpret_response
( self, response: Response, payload: dict, expected_status: Container[int] )
Interprets the HTTP response from the node. :param response: The response object received from :py:meth:`_send_http_request`. :param payload: The request payload that was sent (used for debugging). :param expected_status: The response should match one of these status codes to be considered valid.
Interprets the HTTP response from the node.
[ "Interprets", "the", "HTTP", "response", "from", "the", "node", "." ]
def _interpret_response( self, response: Response, payload: dict, expected_status: Container[int] ) -> dict: """ Interprets the HTTP response from the node. :param response: The response object received from :py:meth:`_send_http_request`. :param payload: The request payload that was sent (used for debugging). :param expected_status: The response should match one of these status codes to be considered valid. """ raw_content = response.text if not raw_content: raise with_context( exc=BadApiResponse( 'Empty {status} response from node.'.format( status=response.status_code, ), ), context={ 'request': payload, }, ) try: decoded: dict = json.loads(raw_content) # :bc: py2k doesn't have JSONDecodeError except ValueError: raise with_context( exc=BadApiResponse( 'Non-JSON {status} response from node: ' '{raw_content}'.format( status=response.status_code, raw_content=raw_content, ) ), context={ 'request': payload, 'raw_response': raw_content, }, ) if not isinstance(decoded, dict): raise with_context( exc=BadApiResponse( 'Malformed {status} response from node: {decoded!r}'.format( status=response.status_code, decoded=decoded, ), ), context={ 'request': payload, 'response': decoded, }, ) if response.status_code in expected_status: return decoded error = None try: if response.status_code == codes['BAD_REQUEST']: error = decoded['error'] elif response.status_code == codes['INTERNAL_SERVER_ERROR']: error = decoded['exception'] except KeyError: pass raise with_context( exc=BadApiResponse( '{status} response from node: {error}'.format( error=error or decoded, status=response.status_code, ), ), context={ 'request': payload, 'response': decoded, }, )
[ "def", "_interpret_response", "(", "self", ",", "response", ":", "Response", ",", "payload", ":", "dict", ",", "expected_status", ":", "Container", "[", "int", "]", ")", "->", "dict", ":", "raw_content", "=", "response", ".", "text", "if", "not", "raw_content", ":", "raise", "with_context", "(", "exc", "=", "BadApiResponse", "(", "'Empty {status} response from node.'", ".", "format", "(", "status", "=", "response", ".", "status_code", ",", ")", ",", ")", ",", "context", "=", "{", "'request'", ":", "payload", ",", "}", ",", ")", "try", ":", "decoded", ":", "dict", "=", "json", ".", "loads", "(", "raw_content", ")", "# :bc: py2k doesn't have JSONDecodeError", "except", "ValueError", ":", "raise", "with_context", "(", "exc", "=", "BadApiResponse", "(", "'Non-JSON {status} response from node: '", "'{raw_content}'", ".", "format", "(", "status", "=", "response", ".", "status_code", ",", "raw_content", "=", "raw_content", ",", ")", ")", ",", "context", "=", "{", "'request'", ":", "payload", ",", "'raw_response'", ":", "raw_content", ",", "}", ",", ")", "if", "not", "isinstance", "(", "decoded", ",", "dict", ")", ":", "raise", "with_context", "(", "exc", "=", "BadApiResponse", "(", "'Malformed {status} response from node: {decoded!r}'", ".", "format", "(", "status", "=", "response", ".", "status_code", ",", "decoded", "=", "decoded", ",", ")", ",", ")", ",", "context", "=", "{", "'request'", ":", "payload", ",", "'response'", ":", "decoded", ",", "}", ",", ")", "if", "response", ".", "status_code", "in", "expected_status", ":", "return", "decoded", "error", "=", "None", "try", ":", "if", "response", ".", "status_code", "==", "codes", "[", "'BAD_REQUEST'", "]", ":", "error", "=", "decoded", "[", "'error'", "]", "elif", "response", ".", "status_code", "==", "codes", "[", "'INTERNAL_SERVER_ERROR'", "]", ":", "error", "=", "decoded", "[", "'exception'", "]", "except", "KeyError", ":", "pass", "raise", "with_context", "(", "exc", "=", "BadApiResponse", "(", "'{status} response from node: {error}'", ".", "format", "(", "error", "=", "error", "or", "decoded", ",", "status", "=", "response", ".", "status_code", ",", ")", ",", ")", ",", "context", "=", "{", "'request'", ":", "payload", ",", "'response'", ":", "decoded", ",", "}", ",", ")" ]
https://github.com/iotaledger/iota.py/blob/f596c1ac0d9bcbceda1cf6109cd921943a6599b3/iota/adapter/__init__.py#L405-L497
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
lms/djangoapps/verify_student/services.py
python
IDVerificationService.user_is_verified
(cls, user)
return False
Return whether or not a user has satisfactorily proved their identity. Depending on the policy, this can expire after some period of time, so a user might have to renew periodically.
Return whether or not a user has satisfactorily proved their identity. Depending on the policy, this can expire after some period of time, so a user might have to renew periodically.
[ "Return", "whether", "or", "not", "a", "user", "has", "satisfactorily", "proved", "their", "identity", ".", "Depending", "on", "the", "policy", "this", "can", "expire", "after", "some", "period", "of", "time", "so", "a", "user", "might", "have", "to", "renew", "periodically", "." ]
def user_is_verified(cls, user): """ Return whether or not a user has satisfactorily proved their identity. Depending on the policy, this can expire after some period of time, so a user might have to renew periodically. """ expiration_datetime = cls.get_expiration_datetime(user, ['approved']) if expiration_datetime: return expiration_datetime >= now() return False
[ "def", "user_is_verified", "(", "cls", ",", "user", ")", ":", "expiration_datetime", "=", "cls", ".", "get_expiration_datetime", "(", "user", ",", "[", "'approved'", "]", ")", "if", "expiration_datetime", ":", "return", "expiration_datetime", ">=", "now", "(", ")", "return", "False" ]
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/lms/djangoapps/verify_student/services.py#L60-L69
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/wled/light.py
python
WLEDSegmentLight.available
(self)
return super().available
Return True if entity is available.
Return True if entity is available.
[ "Return", "True", "if", "entity", "is", "available", "." ]
def available(self) -> bool: """Return True if entity is available.""" try: self.coordinator.data.state.segments[self._segment] except IndexError: return False return super().available
[ "def", "available", "(", "self", ")", "->", "bool", ":", "try", ":", "self", ".", "coordinator", ".", "data", ".", "state", ".", "segments", "[", "self", ".", "_segment", "]", "except", "IndexError", ":", "return", "False", "return", "super", "(", ")", ".", "available" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/wled/light.py#L139-L146
JelteF/PyLaTeX
1a73261b771ae15afbb3ca5f06d4ba61328f1c62
pylatex/headfoot.py
python
Head.__init__
(self, position=None, *, data=None)
r""" Args ---- position: str the headers position: L, C, R data: str or `~.LatexObject` The data to place inside the Head element
r""" Args ---- position: str the headers position: L, C, R data: str or `~.LatexObject` The data to place inside the Head element
[ "r", "Args", "----", "position", ":", "str", "the", "headers", "position", ":", "L", "C", "R", "data", ":", "str", "or", "~", ".", "LatexObject", "The", "data", "to", "place", "inside", "the", "Head", "element" ]
def __init__(self, position=None, *, data=None): r""" Args ---- position: str the headers position: L, C, R data: str or `~.LatexObject` The data to place inside the Head element """ self.position = position super().__init__(data=data, options=position)
[ "def", "__init__", "(", "self", ",", "position", "=", "None", ",", "*", ",", "data", "=", "None", ")", ":", "self", ".", "position", "=", "position", "super", "(", ")", ".", "__init__", "(", "data", "=", "data", ",", "options", "=", "position", ")" ]
https://github.com/JelteF/PyLaTeX/blob/1a73261b771ae15afbb3ca5f06d4ba61328f1c62/pylatex/headfoot.py#L87-L99
duckduckgo/zeroclickinfo-fathead
477c5652f6576746618dbb8158b67f0960ae9f56
lib/fathead/reactjs/parse.py
python
APIDocsParser.__init__
(self, data)
Initialize APIDocsParser object with API Reference HTML
Initialize APIDocsParser object with API Reference HTML
[ "Initialize", "APIDocsParser", "object", "with", "API", "Reference", "HTML" ]
def __init__(self, data): """ Initialize APIDocsParser object with API Reference HTML """ self.data = self.get_api_reference_html(data) self.parsed_data = []
[ "def", "__init__", "(", "self", ",", "data", ")", ":", "self", ".", "data", "=", "self", ".", "get_api_reference_html", "(", "data", ")", "self", ".", "parsed_data", "=", "[", "]" ]
https://github.com/duckduckgo/zeroclickinfo-fathead/blob/477c5652f6576746618dbb8158b67f0960ae9f56/lib/fathead/reactjs/parse.py#L49-L54
yuxiaokui/Intranet-Penetration
f57678a204840c83cbf3308e3470ae56c5ff514b
proxy/XX-Net/code/default/gae_proxy/server/lib/google/appengine/api/search/search.py
python
Document.__getitem__
(self, field_name)
return self._BuildFieldMap().get(field_name, [])
Returns a list of all fields with the provided field name. Args: field_name: The name of the field to return. Returns: All fields with the given name, or an empty list if no field with that name exists.
Returns a list of all fields with the provided field name.
[ "Returns", "a", "list", "of", "all", "fields", "with", "the", "provided", "field", "name", "." ]
def __getitem__(self, field_name): """Returns a list of all fields with the provided field name. Args: field_name: The name of the field to return. Returns: All fields with the given name, or an empty list if no field with that name exists. """ return self._BuildFieldMap().get(field_name, [])
[ "def", "__getitem__", "(", "self", ",", "field_name", ")", ":", "return", "self", ".", "_BuildFieldMap", "(", ")", ".", "get", "(", "field_name", ",", "[", "]", ")" ]
https://github.com/yuxiaokui/Intranet-Penetration/blob/f57678a204840c83cbf3308e3470ae56c5ff514b/proxy/XX-Net/code/default/gae_proxy/server/lib/google/appengine/api/search/search.py#L1890-L1900
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/networkx/algorithms/threshold.py
python
make_compact
(creation_sequence)
return ccs
Returns the creation sequence in a compact form that is the number of 'i's and 'd's alternating. Examples -------- >>> from networkx.algorithms.threshold import make_compact >>> make_compact(['d', 'i', 'i', 'd', 'd', 'i', 'i', 'i']) [1, 2, 2, 3] >>> make_compact(['d', 'd', 'd', 'i', 'd', 'd']) [3, 1, 2] Notice that the first number is the first vertex to be used for construction and so is always 'd'. Labeled creation sequences lose their labels in the compact representation. >>> make_compact([3, 1, 2]) [3, 1, 2]
Returns the creation sequence in a compact form that is the number of 'i's and 'd's alternating.
[ "Returns", "the", "creation", "sequence", "in", "a", "compact", "form", "that", "is", "the", "number", "of", "i", "s", "and", "d", "s", "alternating", "." ]
def make_compact(creation_sequence): """ Returns the creation sequence in a compact form that is the number of 'i's and 'd's alternating. Examples -------- >>> from networkx.algorithms.threshold import make_compact >>> make_compact(['d', 'i', 'i', 'd', 'd', 'i', 'i', 'i']) [1, 2, 2, 3] >>> make_compact(['d', 'd', 'd', 'i', 'd', 'd']) [3, 1, 2] Notice that the first number is the first vertex to be used for construction and so is always 'd'. Labeled creation sequences lose their labels in the compact representation. >>> make_compact([3, 1, 2]) [3, 1, 2] """ first = creation_sequence[0] if isinstance(first, str): # creation sequence cs = creation_sequence[:] elif isinstance(first, tuple): # labeled creation sequence cs = [s[1] for s in creation_sequence] elif isinstance(first, int): # compact creation sequence return creation_sequence else: raise TypeError("Not a valid creation sequence type") ccs = [] count = 1 # count the run lengths of d's or i's. for i in range(1, len(cs)): if cs[i] == cs[i - 1]: count += 1 else: ccs.append(count) count = 1 ccs.append(count) # don't forget the last one return ccs
[ "def", "make_compact", "(", "creation_sequence", ")", ":", "first", "=", "creation_sequence", "[", "0", "]", "if", "isinstance", "(", "first", ",", "str", ")", ":", "# creation sequence", "cs", "=", "creation_sequence", "[", ":", "]", "elif", "isinstance", "(", "first", ",", "tuple", ")", ":", "# labeled creation sequence", "cs", "=", "[", "s", "[", "1", "]", "for", "s", "in", "creation_sequence", "]", "elif", "isinstance", "(", "first", ",", "int", ")", ":", "# compact creation sequence", "return", "creation_sequence", "else", ":", "raise", "TypeError", "(", "\"Not a valid creation sequence type\"", ")", "ccs", "=", "[", "]", "count", "=", "1", "# count the run lengths of d's or i's.", "for", "i", "in", "range", "(", "1", ",", "len", "(", "cs", ")", ")", ":", "if", "cs", "[", "i", "]", "==", "cs", "[", "i", "-", "1", "]", ":", "count", "+=", "1", "else", ":", "ccs", ".", "append", "(", "count", ")", "count", "=", "1", "ccs", ".", "append", "(", "count", ")", "# don't forget the last one", "return", "ccs" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/networkx/algorithms/threshold.py#L111-L152
pypa/cibuildwheel
5255155bc57eb6224354356df648dc42e31a0028
noxfile.py
python
tests
(session: nox.Session)
Run the unit and regular tests.
Run the unit and regular tests.
[ "Run", "the", "unit", "and", "regular", "tests", "." ]
def tests(session: nox.Session) -> None: """ Run the unit and regular tests. """ unit_test_args = ["--run-docker"] if sys.platform.startswith("linux") else [] session.install("-e", ".[test]") if session.posargs: session.run("pytest", *session.posargs) else: session.run("pytest", "unit_test", *unit_test_args) session.run("pytest", "test", "-x", "--durations", "0", "--timeout=2400", "test")
[ "def", "tests", "(", "session", ":", "nox", ".", "Session", ")", "->", "None", ":", "unit_test_args", "=", "[", "\"--run-docker\"", "]", "if", "sys", ".", "platform", ".", "startswith", "(", "\"linux\"", ")", "else", "[", "]", "session", ".", "install", "(", "\"-e\"", ",", "\".[test]\"", ")", "if", "session", ".", "posargs", ":", "session", ".", "run", "(", "\"pytest\"", ",", "*", "session", ".", "posargs", ")", "else", ":", "session", ".", "run", "(", "\"pytest\"", ",", "\"unit_test\"", ",", "*", "unit_test_args", ")", "session", ".", "run", "(", "\"pytest\"", ",", "\"test\"", ",", "\"-x\"", ",", "\"--durations\"", ",", "\"0\"", ",", "\"--timeout=2400\"", ",", "\"test\"", ")" ]
https://github.com/pypa/cibuildwheel/blob/5255155bc57eb6224354356df648dc42e31a0028/noxfile.py#L15-L25
CiscoDevNet/netprog_basics
3fa67855ef461ccaee283dcbbdd9bf00e7a52378
network_controllers/apicem/troubleshoot_step4.py
python
host_list
(apic, ticket, ip=None, mac=None, name=None)
return response.json()["response"]
Use the REST API to retrieve the list of hosts. Optional parameters to filter by: IP address MAC address Hostname
Use the REST API to retrieve the list of hosts. Optional parameters to filter by: IP address MAC address Hostname
[ "Use", "the", "REST", "API", "to", "retrieve", "the", "list", "of", "hosts", ".", "Optional", "parameters", "to", "filter", "by", ":", "IP", "address", "MAC", "address", "Hostname" ]
def host_list(apic, ticket, ip=None, mac=None, name=None): """ Use the REST API to retrieve the list of hosts. Optional parameters to filter by: IP address MAC address Hostname """ url = "https://{}/api/v1/host".format(apic) headers["x-auth-token"] = ticket filters = [] # Add filters if provided if ip: filters.append("hostIp={}".format(ip)) if mac: filters.append("hostMac={}".format(mac)) if name: filters.append("hostName={}".format(name)) if len(filters) > 0: url += "?" + "&".join(filters) # Make API request and return the response body response = requests.request("GET", url, headers=headers, verify=False) return response.json()["response"]
[ "def", "host_list", "(", "apic", ",", "ticket", ",", "ip", "=", "None", ",", "mac", "=", "None", ",", "name", "=", "None", ")", ":", "url", "=", "\"https://{}/api/v1/host\"", ".", "format", "(", "apic", ")", "headers", "[", "\"x-auth-token\"", "]", "=", "ticket", "filters", "=", "[", "]", "# Add filters if provided", "if", "ip", ":", "filters", ".", "append", "(", "\"hostIp={}\"", ".", "format", "(", "ip", ")", ")", "if", "mac", ":", "filters", ".", "append", "(", "\"hostMac={}\"", ".", "format", "(", "mac", ")", ")", "if", "name", ":", "filters", ".", "append", "(", "\"hostName={}\"", ".", "format", "(", "name", ")", ")", "if", "len", "(", "filters", ")", ">", "0", ":", "url", "+=", "\"?\"", "+", "\"&\"", ".", "join", "(", "filters", ")", "# Make API request and return the response body", "response", "=", "requests", ".", "request", "(", "\"GET\"", ",", "url", ",", "headers", "=", "headers", ",", "verify", "=", "False", ")", "return", "response", ".", "json", "(", ")", "[", "\"response\"", "]" ]
https://github.com/CiscoDevNet/netprog_basics/blob/3fa67855ef461ccaee283dcbbdd9bf00e7a52378/network_controllers/apicem/troubleshoot_step4.py#L50-L74
dayorbyte/MongoAlchemy
e64ef0c87feff385637459707fe6090bd789e116
mongoalchemy/fields/fields.py
python
TupleField.__init__
(self, *item_types, **kwargs)
:param item_types: instances of :class:`Field`, in the order they \ will appear in the tuples. :param kwargs: arguments for :class:`Field`
:param item_types: instances of :class:`Field`, in the order they \ will appear in the tuples. :param kwargs: arguments for :class:`Field`
[ ":", "param", "item_types", ":", "instances", "of", ":", "class", ":", "Field", "in", "the", "order", "they", "\\", "will", "appear", "in", "the", "tuples", ".", ":", "param", "kwargs", ":", "arguments", "for", ":", "class", ":", "Field" ]
def __init__(self, *item_types, **kwargs): ''' :param item_types: instances of :class:`Field`, in the order they \ will appear in the tuples. :param kwargs: arguments for :class:`Field` ''' super(TupleField, self).__init__(**kwargs) self.size = len(item_types) self.types = item_types
[ "def", "__init__", "(", "self", ",", "*", "item_types", ",", "*", "*", "kwargs", ")", ":", "super", "(", "TupleField", ",", "self", ")", ".", "__init__", "(", "*", "*", "kwargs", ")", "self", ".", "size", "=", "len", "(", "item_types", ")", "self", ".", "types", "=", "item_types" ]
https://github.com/dayorbyte/MongoAlchemy/blob/e64ef0c87feff385637459707fe6090bd789e116/mongoalchemy/fields/fields.py#L236-L243
psf/black
33e3bb1e4e326713f85749705179da2e31520670
src/black/output.py
python
color_diff
(contents: str)
return "\n".join(lines)
Inject the ANSI color codes to the diff.
Inject the ANSI color codes to the diff.
[ "Inject", "the", "ANSI", "color", "codes", "to", "the", "diff", "." ]
def color_diff(contents: str) -> str: """Inject the ANSI color codes to the diff.""" lines = contents.split("\n") for i, line in enumerate(lines): if line.startswith("+++") or line.startswith("---"): line = "\033[1m" + line + "\033[0m" # bold, reset elif line.startswith("@@"): line = "\033[36m" + line + "\033[0m" # cyan, reset elif line.startswith("+"): line = "\033[32m" + line + "\033[0m" # green, reset elif line.startswith("-"): line = "\033[31m" + line + "\033[0m" # red, reset lines[i] = line return "\n".join(lines)
[ "def", "color_diff", "(", "contents", ":", "str", ")", "->", "str", ":", "lines", "=", "contents", ".", "split", "(", "\"\\n\"", ")", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "if", "line", ".", "startswith", "(", "\"+++\"", ")", "or", "line", ".", "startswith", "(", "\"---\"", ")", ":", "line", "=", "\"\\033[1m\"", "+", "line", "+", "\"\\033[0m\"", "# bold, reset", "elif", "line", ".", "startswith", "(", "\"@@\"", ")", ":", "line", "=", "\"\\033[36m\"", "+", "line", "+", "\"\\033[0m\"", "# cyan, reset", "elif", "line", ".", "startswith", "(", "\"+\"", ")", ":", "line", "=", "\"\\033[32m\"", "+", "line", "+", "\"\\033[0m\"", "# green, reset", "elif", "line", ".", "startswith", "(", "\"-\"", ")", ":", "line", "=", "\"\\033[31m\"", "+", "line", "+", "\"\\033[0m\"", "# red, reset", "lines", "[", "i", "]", "=", "line", "return", "\"\\n\"", ".", "join", "(", "lines", ")" ]
https://github.com/psf/black/blob/33e3bb1e4e326713f85749705179da2e31520670/src/black/output.py#L79-L92
hyperledger/aries-cloudagent-python
2f36776e99f6053ae92eed8123b5b1b2e891c02a
demo/runners/support/agent.py
python
DemoAgent.get_new_webhook_port
(self)
return self.webhook_port
Get new webhook port for registering additional sub-wallets
Get new webhook port for registering additional sub-wallets
[ "Get", "new", "webhook", "port", "for", "registering", "additional", "sub", "-", "wallets" ]
def get_new_webhook_port(self): """Get new webhook port for registering additional sub-wallets""" self.webhook_port = self.webhook_port + 1 return self.webhook_port
[ "def", "get_new_webhook_port", "(", "self", ")", ":", "self", ".", "webhook_port", "=", "self", ".", "webhook_port", "+", "1", "return", "self", ".", "webhook_port" ]
https://github.com/hyperledger/aries-cloudagent-python/blob/2f36776e99f6053ae92eed8123b5b1b2e891c02a/demo/runners/support/agent.py#L234-L237
coherence-project/Coherence
88016204c7778bf0d3ad1ae331b4d8fd725dd2af
misc/Rhythmbox-Plugin/upnp_coherence/MediaPlayer.py
python
RhythmboxPlayer.seek
(self, location, old_state)
@param location: +nL = relative seek forward n seconds -nL = relative seek backwards n seconds
[]
def seek(self, location, old_state): """ @param location: +nL = relative seek forward n seconds -nL = relative seek backwards n seconds """ self.info("player seek %r", location) self.player.seek(location) self.server.av_transport_server.set_variable(0, 'TransportState', old_state)
[ "def", "seek", "(", "self", ",", "location", ",", "old_state", ")", ":", "self", ".", "info", "(", "\"player seek %r\"", ",", "location", ")", "self", ".", "player", ".", "seek", "(", "location", ")", "self", ".", "server", ".", "av_transport_server", ".", "set_variable", "(", "0", ",", "'TransportState'", ",", "old_state", ")" ]
https://github.com/coherence-project/Coherence/blob/88016204c7778bf0d3ad1ae331b4d8fd725dd2af/misc/Rhythmbox-Plugin/upnp_coherence/MediaPlayer.py#L376-L383
ClusterHQ/flocker
eaa586248986d7cd681c99c948546c2b507e44de
flocker/common/logging.py
python
log_info
(**kwargs)
Simple logging wrapper around Eliot messages. This fills in the message type and passes all other arguments on to ``Message.log``.
Simple logging wrapper around Eliot messages.
[ "Simple", "logging", "wrapper", "around", "Eliot", "messages", "." ]
def log_info(**kwargs): """ Simple logging wrapper around Eliot messages. This fills in the message type and passes all other arguments on to ``Message.log``. """ Message.log( message_type=_compute_message_type(stack()[1]), **kwargs )
[ "def", "log_info", "(", "*", "*", "kwargs", ")", ":", "Message", ".", "log", "(", "message_type", "=", "_compute_message_type", "(", "stack", "(", ")", "[", "1", "]", ")", ",", "*", "*", "kwargs", ")" ]
https://github.com/ClusterHQ/flocker/blob/eaa586248986d7cd681c99c948546c2b507e44de/flocker/common/logging.py#L38-L48
lightkurve/lightkurve
70d1c4cd1ab30f24c83e54bdcea4dd16624bfd9c
src/lightkurve/correctors/sffcorrector.py
python
_get_window_points
( centroid_col, centroid_row, windows, arclength=None, breakindex=None )
return np.asarray(window_points, dtype=int)
Returns indices where thrusters are fired. Parameters ---------- lc : `.LightCurve` object Input light curve windows: int Number of windows to split the light curve into arc: np.ndarray Arclength for the roll motion breakindex: int Cadence where there is a natural break. Windows will be automatically put here.
Returns indices where thrusters are fired.
[ "Returns", "indices", "where", "thrusters", "are", "fired", "." ]
def _get_window_points( centroid_col, centroid_row, windows, arclength=None, breakindex=None ): """Returns indices where thrusters are fired. Parameters ---------- lc : `.LightCurve` object Input light curve windows: int Number of windows to split the light curve into arc: np.ndarray Arclength for the roll motion breakindex: int Cadence where there is a natural break. Windows will be automatically put here. """ if arclength is None: arclength = _estimate_arclength(centroid_col, centroid_row) # Validate break indices if isinstance(breakindex, int): breakindexes = [breakindex] if breakindex is None: breakindexes = [] elif (breakindex[0] == 0) & (len(breakindex) == 1): breakindexes = [] else: breakindexes = breakindex if not isinstance(breakindexes, list): raise ValueError("`breakindex` must be an int or a list") # If the user asks for break indices we should still return them, # even if there is only 1 window. if windows == 1: return breakindexes # Find evenly spaced window points dt = len(centroid_col) / windows lower_idx = np.append(0, breakindexes) upper_idx = np.append(breakindexes, len(centroid_col)) window_points = np.hstack( [np.asarray(np.arange(a, b, dt), int) for a, b in zip(lower_idx, upper_idx)] ) # Get thruster firings thrusters = _get_thruster_firings(arclength) for b in breakindexes: thrusters[b] = True thrusters = np.where(thrusters)[0] # Find the nearest point to each thruster firing, unless it's a user supplied break point if len(thrusters) > 0: window_points = [ thrusters[np.argmin(np.abs(thrusters - wp))] + 1 for wp in window_points if wp not in breakindexes ] window_points = np.unique(np.hstack([window_points, breakindexes])) # If the first or last windows are very short (<40% median window length), # then we add them to the second or penultimate window, respectively, # by removing their break points. median_length = np.median(np.diff(window_points)) if window_points[0] < 0.4 * median_length: window_points = window_points[1:] if window_points[-1] > (len(centroid_col) - 0.4 * median_length): window_points = window_points[:-1] return np.asarray(window_points, dtype=int)
[ "def", "_get_window_points", "(", "centroid_col", ",", "centroid_row", ",", "windows", ",", "arclength", "=", "None", ",", "breakindex", "=", "None", ")", ":", "if", "arclength", "is", "None", ":", "arclength", "=", "_estimate_arclength", "(", "centroid_col", ",", "centroid_row", ")", "# Validate break indices", "if", "isinstance", "(", "breakindex", ",", "int", ")", ":", "breakindexes", "=", "[", "breakindex", "]", "if", "breakindex", "is", "None", ":", "breakindexes", "=", "[", "]", "elif", "(", "breakindex", "[", "0", "]", "==", "0", ")", "&", "(", "len", "(", "breakindex", ")", "==", "1", ")", ":", "breakindexes", "=", "[", "]", "else", ":", "breakindexes", "=", "breakindex", "if", "not", "isinstance", "(", "breakindexes", ",", "list", ")", ":", "raise", "ValueError", "(", "\"`breakindex` must be an int or a list\"", ")", "# If the user asks for break indices we should still return them,", "# even if there is only 1 window.", "if", "windows", "==", "1", ":", "return", "breakindexes", "# Find evenly spaced window points", "dt", "=", "len", "(", "centroid_col", ")", "/", "windows", "lower_idx", "=", "np", ".", "append", "(", "0", ",", "breakindexes", ")", "upper_idx", "=", "np", ".", "append", "(", "breakindexes", ",", "len", "(", "centroid_col", ")", ")", "window_points", "=", "np", ".", "hstack", "(", "[", "np", ".", "asarray", "(", "np", ".", "arange", "(", "a", ",", "b", ",", "dt", ")", ",", "int", ")", "for", "a", ",", "b", "in", "zip", "(", "lower_idx", ",", "upper_idx", ")", "]", ")", "# Get thruster firings", "thrusters", "=", "_get_thruster_firings", "(", "arclength", ")", "for", "b", "in", "breakindexes", ":", "thrusters", "[", "b", "]", "=", "True", "thrusters", "=", "np", ".", "where", "(", "thrusters", ")", "[", "0", "]", "# Find the nearest point to each thruster firing, unless it's a user supplied break point", "if", "len", "(", "thrusters", ")", ">", "0", ":", "window_points", "=", "[", "thrusters", "[", "np", ".", "argmin", "(", "np", ".", "abs", "(", "thrusters", "-", "wp", ")", ")", "]", "+", "1", "for", "wp", "in", "window_points", "if", "wp", "not", "in", "breakindexes", "]", "window_points", "=", "np", ".", "unique", "(", "np", ".", "hstack", "(", "[", "window_points", ",", "breakindexes", "]", ")", ")", "# If the first or last windows are very short (<40% median window length),", "# then we add them to the second or penultimate window, respectively,", "# by removing their break points.", "median_length", "=", "np", ".", "median", "(", "np", ".", "diff", "(", "window_points", ")", ")", "if", "window_points", "[", "0", "]", "<", "0.4", "*", "median_length", ":", "window_points", "=", "window_points", "[", "1", ":", "]", "if", "window_points", "[", "-", "1", "]", ">", "(", "len", "(", "centroid_col", ")", "-", "0.4", "*", "median_length", ")", ":", "window_points", "=", "window_points", "[", ":", "-", "1", "]", "return", "np", ".", "asarray", "(", "window_points", ",", "dtype", "=", "int", ")" ]
https://github.com/lightkurve/lightkurve/blob/70d1c4cd1ab30f24c83e54bdcea4dd16624bfd9c/src/lightkurve/correctors/sffcorrector.py#L415-L485
python-diamond/Diamond
7000e16cfdf4508ed9291fc4b3800592557b2431
src/collectors/mongodb/mongodb.py
python
MongoDBCollector.get_default_config_help
(self)
return config_help
[]
def get_default_config_help(self): config_help = super(MongoDBCollector, self).get_default_config_help() config_help.update({ 'hosts': 'Array of hostname(:port) elements to get metrics from' 'Set an alias by prefixing host:port with alias@', 'host': 'A single hostname(:port) to get metrics from' ' (can be used instead of hosts and overrides it)', 'user': 'Username for authenticated login (optional)', 'passwd': 'Password for authenticated login (optional)', 'databases': 'A regex of which databases to gather metrics for.' ' Defaults to all databases.', 'ignore_collections': 'A regex of which collections to ignore.' ' MapReduce temporary collections (tmp.mr.*)' ' are ignored by default.', 'collection_sample_rate': 'Only send stats for a consistent subset ' 'of collections. This is applied after ' 'collections are ignored via ' 'ignore_collections Sampling uses crc32 ' 'so it is consistent across ' 'replicas. Value between 0 and 1. ' 'Default is 1', 'network_timeout': 'Timeout for mongodb connection (in' ' milliseconds). There is no timeout by' ' default.', 'simple': 'Only collect the same metrics as mongostat.', 'translate_collections': 'Translate dot (.) to underscores (_)' ' in collection names.', 'replace_dashes_in_metric_keys': 'Replace dashes (-) to dots (.)' ' in database object names and metrics', 'ssl': 'True to enable SSL connections to the MongoDB server.' ' Default is False', 'replica': 'True to enable replica set logging. Reports health of' ' individual nodes as well as basic aggregate stats.' ' Default is False', 'replset_node_name': 'Identifier for reporting replset metrics. ' 'Default is _id' }) return config_help
[ "def", "get_default_config_help", "(", "self", ")", ":", "config_help", "=", "super", "(", "MongoDBCollector", ",", "self", ")", ".", "get_default_config_help", "(", ")", "config_help", ".", "update", "(", "{", "'hosts'", ":", "'Array of hostname(:port) elements to get metrics from'", "'Set an alias by prefixing host:port with alias@'", ",", "'host'", ":", "'A single hostname(:port) to get metrics from'", "' (can be used instead of hosts and overrides it)'", ",", "'user'", ":", "'Username for authenticated login (optional)'", ",", "'passwd'", ":", "'Password for authenticated login (optional)'", ",", "'databases'", ":", "'A regex of which databases to gather metrics for.'", "' Defaults to all databases.'", ",", "'ignore_collections'", ":", "'A regex of which collections to ignore.'", "' MapReduce temporary collections (tmp.mr.*)'", "' are ignored by default.'", ",", "'collection_sample_rate'", ":", "'Only send stats for a consistent subset '", "'of collections. This is applied after '", "'collections are ignored via '", "'ignore_collections Sampling uses crc32 '", "'so it is consistent across '", "'replicas. Value between 0 and 1. '", "'Default is 1'", ",", "'network_timeout'", ":", "'Timeout for mongodb connection (in'", "' milliseconds). There is no timeout by'", "' default.'", ",", "'simple'", ":", "'Only collect the same metrics as mongostat.'", ",", "'translate_collections'", ":", "'Translate dot (.) to underscores (_)'", "' in collection names.'", ",", "'replace_dashes_in_metric_keys'", ":", "'Replace dashes (-) to dots (.)'", "' in database object names and metrics'", ",", "'ssl'", ":", "'True to enable SSL connections to the MongoDB server.'", "' Default is False'", ",", "'replica'", ":", "'True to enable replica set logging. Reports health of'", "' individual nodes as well as basic aggregate stats.'", "' Default is False'", ",", "'replset_node_name'", ":", "'Identifier for reporting replset metrics. '", "'Default is _id'", "}", ")", "return", "config_help" ]
https://github.com/python-diamond/Diamond/blob/7000e16cfdf4508ed9291fc4b3800592557b2431/src/collectors/mongodb/mongodb.py#L49-L86
pyodide/pyodide
dcef27fe3783fada747f257cce387ca1347dd514
src/py/pyodide/webloop.py
python
WebLoop.is_running
(self)
return True
Returns ``True`` if the event loop is running. Always returns ``True`` because WebLoop has no lifecycle management.
Returns ``True`` if the event loop is running.
[ "Returns", "True", "if", "the", "event", "loop", "is", "running", "." ]
def is_running(self) -> bool: """Returns ``True`` if the event loop is running. Always returns ``True`` because WebLoop has no lifecycle management. """ return True
[ "def", "is_running", "(", "self", ")", "->", "bool", ":", "return", "True" ]
https://github.com/pyodide/pyodide/blob/dcef27fe3783fada747f257cce387ca1347dd514/src/py/pyodide/webloop.py#L47-L52
robotlearn/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
pyrobolearn/actions/action.py
python
Action.vec_torch_data
(self)
return torch.cat([data.reshape(-1) for data in self.merged_torch_data])
Return a vectorized form of all the torch tensors. Returns: torch.Tensor([N]): all the torch tensors reshaped such that they are unidimensional.
Return a vectorized form of all the torch tensors.
[ "Return", "a", "vectorized", "form", "of", "all", "the", "torch", "tensors", "." ]
def vec_torch_data(self): """ Return a vectorized form of all the torch tensors. Returns: torch.Tensor([N]): all the torch tensors reshaped such that they are unidimensional. """ return torch.cat([data.reshape(-1) for data in self.merged_torch_data])
[ "def", "vec_torch_data", "(", "self", ")", ":", "return", "torch", ".", "cat", "(", "[", "data", ".", "reshape", "(", "-", "1", ")", "for", "data", "in", "self", ".", "merged_torch_data", "]", ")" ]
https://github.com/robotlearn/pyrobolearn/blob/9cd7c060723fda7d2779fa255ac998c2c82b8436/pyrobolearn/actions/action.py#L294-L301
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/site-packages/setuptools/_vendor/pyparsing.py
python
ParserElement.__rand__
(self, other )
return other & self
Implementation of & operator when left operand is not a C{L{ParserElement}}
Implementation of & operator when left operand is not a C{L{ParserElement}}
[ "Implementation", "of", "&", "operator", "when", "left", "operand", "is", "not", "a", "C", "{", "L", "{", "ParserElement", "}}" ]
def __rand__(self, other ): """ Implementation of & operator when left operand is not a C{L{ParserElement}} """ if isinstance( other, basestring ): other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None return other & self
[ "def", "__rand__", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "basestring", ")", ":", "other", "=", "ParserElement", ".", "_literalStringClass", "(", "other", ")", "if", "not", "isinstance", "(", "other", ",", "ParserElement", ")", ":", "warnings", ".", "warn", "(", "\"Cannot combine element of type %s with ParserElement\"", "%", "type", "(", "other", ")", ",", "SyntaxWarning", ",", "stacklevel", "=", "2", ")", "return", "None", "return", "other", "&", "self" ]
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/site-packages/setuptools/_vendor/pyparsing.py#L2008-L2018
Jenyay/outwiker
50530cf7b3f71480bb075b2829bc0669773b835b
plugins/webpage/webpage/libs/email/_parseaddr.py
python
quote
(str)
return str.replace('\\', '\\\\').replace('"', '\\"')
Prepare string to be used in a quoted string. Turns backslash and double quote characters into quoted pairs. These are the only characters that need to be quoted inside a quoted string. Does not add the surrounding double quotes.
Prepare string to be used in a quoted string.
[ "Prepare", "string", "to", "be", "used", "in", "a", "quoted", "string", "." ]
def quote(str): """Prepare string to be used in a quoted string. Turns backslash and double quote characters into quoted pairs. These are the only characters that need to be quoted inside a quoted string. Does not add the surrounding double quotes. """ return str.replace('\\', '\\\\').replace('"', '\\"')
[ "def", "quote", "(", "str", ")", ":", "return", "str", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")" ]
https://github.com/Jenyay/outwiker/blob/50530cf7b3f71480bb075b2829bc0669773b835b/plugins/webpage/webpage/libs/email/_parseaddr.py#L193-L200
tensorflow/model-analysis
e38c23ce76eff039548ce69e3160ed4d7984f2fc
tensorflow_model_analysis/extractors/tfjs_predict_extractor_util.py
python
get_tfjs_binary
()
return path
Download and return the path to the tfjs binary.
Download and return the path to the tfjs binary.
[ "Download", "and", "return", "the", "path", "to", "the", "tfjs", "binary", "." ]
def get_tfjs_binary(): """Download and return the path to the tfjs binary.""" if sys.platform == 'darwin': url = 'http://storage.googleapis.com/tfjs-inference/tfjs-inference-macos' else: url = 'http://storage.googleapis.com/tfjs-inference/tfjs-inference-linux' base_path = tempfile.mkdtemp() path = os.path.join(base_path, 'binary') with urllib.request.urlopen(url) as response: with tf.io.gfile.GFile(path, 'w') as file: shutil.copyfileobj(response, file) subprocess.check_call(['chmod', '+x', path]) return path
[ "def", "get_tfjs_binary", "(", ")", ":", "if", "sys", ".", "platform", "==", "'darwin'", ":", "url", "=", "'http://storage.googleapis.com/tfjs-inference/tfjs-inference-macos'", "else", ":", "url", "=", "'http://storage.googleapis.com/tfjs-inference/tfjs-inference-linux'", "base_path", "=", "tempfile", ".", "mkdtemp", "(", ")", "path", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "'binary'", ")", "with", "urllib", ".", "request", ".", "urlopen", "(", "url", ")", "as", "response", ":", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "path", ",", "'w'", ")", "as", "file", ":", "shutil", ".", "copyfileobj", "(", "response", ",", "file", ")", "subprocess", ".", "check_call", "(", "[", "'chmod'", ",", "'+x'", ",", "path", "]", ")", "return", "path" ]
https://github.com/tensorflow/model-analysis/blob/e38c23ce76eff039548ce69e3160ed4d7984f2fc/tensorflow_model_analysis/extractors/tfjs_predict_extractor_util.py#L26-L39
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/urllib3/connectionpool.py
python
ConnectionPool.__str__
(self)
return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
[]
def __str__(self): return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
[ "def", "__str__", "(", "self", ")", ":", "return", "\"%s(host=%r, port=%r)\"", "%", "(", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "host", ",", "self", ".", "port", ")" ]
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/urllib3/connectionpool.py#L82-L83
openbmc/openbmc
5f4109adae05f4d6925bfe960007d52f98c61086
poky/bitbake/lib/layerindexlib/restapi.py
python
RestApiPlugin.load_index
(self, url, load)
Fetches layer information from a local or remote layer index. The return value is a LayerIndexObj. url is the url to the rest api of the layer index, such as: http://layers.openembedded.org/layerindex/api/ Or a local file...
Fetches layer information from a local or remote layer index.
[ "Fetches", "layer", "information", "from", "a", "local", "or", "remote", "layer", "index", "." ]
def load_index(self, url, load): """ Fetches layer information from a local or remote layer index. The return value is a LayerIndexObj. url is the url to the rest api of the layer index, such as: http://layers.openembedded.org/layerindex/api/ Or a local file... """ up = urlparse(url) if up.scheme == 'file': return self.load_index_file(up, url, load) if up.scheme == 'http' or up.scheme == 'https': return self.load_index_web(up, url, load) raise layerindexlib.plugin.LayerIndexPluginUrlError(self.type, url)
[ "def", "load_index", "(", "self", ",", "url", ",", "load", ")", ":", "up", "=", "urlparse", "(", "url", ")", "if", "up", ".", "scheme", "==", "'file'", ":", "return", "self", ".", "load_index_file", "(", "up", ",", "url", ",", "load", ")", "if", "up", ".", "scheme", "==", "'http'", "or", "up", ".", "scheme", "==", "'https'", ":", "return", "self", ".", "load_index_web", "(", "up", ",", "url", ",", "load", ")", "raise", "layerindexlib", ".", "plugin", ".", "LayerIndexPluginUrlError", "(", "self", ".", "type", ",", "url", ")" ]
https://github.com/openbmc/openbmc/blob/5f4109adae05f4d6925bfe960007d52f98c61086/poky/bitbake/lib/layerindexlib/restapi.py#L27-L47
CedricGuillemet/Imogen
ee417b42747ed5b46cb11b02ef0c3630000085b3
bin/Lib/asyncio/base_events.py
python
BaseEventLoop.call_later
(self, delay, callback, *args, context=None)
return timer
Arrange for a callback to be called at a given time. Return a Handle: an opaque object with a cancel() method that can be used to cancel the call. The delay can be an int or float, expressed in seconds. It is always relative to the current time. Each callback will be called exactly once. If two callbacks are scheduled for exactly the same time, it undefined which will be called first. Any positional arguments after the callback will be passed to the callback when it is called.
Arrange for a callback to be called at a given time.
[ "Arrange", "for", "a", "callback", "to", "be", "called", "at", "a", "given", "time", "." ]
def call_later(self, delay, callback, *args, context=None): """Arrange for a callback to be called at a given time. Return a Handle: an opaque object with a cancel() method that can be used to cancel the call. The delay can be an int or float, expressed in seconds. It is always relative to the current time. Each callback will be called exactly once. If two callbacks are scheduled for exactly the same time, it undefined which will be called first. Any positional arguments after the callback will be passed to the callback when it is called. """ timer = self.call_at(self.time() + delay, callback, *args, context=context) if timer._source_traceback: del timer._source_traceback[-1] return timer
[ "def", "call_later", "(", "self", ",", "delay", ",", "callback", ",", "*", "args", ",", "context", "=", "None", ")", ":", "timer", "=", "self", ".", "call_at", "(", "self", ".", "time", "(", ")", "+", "delay", ",", "callback", ",", "*", "args", ",", "context", "=", "context", ")", "if", "timer", ".", "_source_traceback", ":", "del", "timer", ".", "_source_traceback", "[", "-", "1", "]", "return", "timer" ]
https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/asyncio/base_events.py#L629-L649
projecthamster/hamster-gtk
f51b45a77bcc343514ac1439d42ec024ef751a82
hamster_gtk/preferences/widgets/combo_file_chooser.py
python
ComboFileChooser._on_choose_clicked
(self, widget)
Open a dialog to select path and update entry widget with it.
Open a dialog to select path and update entry widget with it.
[ "Open", "a", "dialog", "to", "select", "path", "and", "update", "entry", "widget", "with", "it", "." ]
def _on_choose_clicked(self, widget): """Open a dialog to select path and update entry widget with it.""" toplevel = get_parent_window(self) dialog = Gtk.FileChooserDialog(_("Please choose a directory"), toplevel, Gtk.FileChooserAction.SAVE, (_("_Cancel"), Gtk.ResponseType.CANCEL, _("_Save"), Gtk.ResponseType.OK)) dialog.set_filename(self.get_config_value()) response = dialog.run() if response == Gtk.ResponseType.OK: self._entry.set_text(_u(dialog.get_filename())) dialog.destroy()
[ "def", "_on_choose_clicked", "(", "self", ",", "widget", ")", ":", "toplevel", "=", "get_parent_window", "(", "self", ")", "dialog", "=", "Gtk", ".", "FileChooserDialog", "(", "_", "(", "\"Please choose a directory\"", ")", ",", "toplevel", ",", "Gtk", ".", "FileChooserAction", ".", "SAVE", ",", "(", "_", "(", "\"_Cancel\"", ")", ",", "Gtk", ".", "ResponseType", ".", "CANCEL", ",", "_", "(", "\"_Save\"", ")", ",", "Gtk", ".", "ResponseType", ".", "OK", ")", ")", "dialog", ".", "set_filename", "(", "self", ".", "get_config_value", "(", ")", ")", "response", "=", "dialog", ".", "run", "(", ")", "if", "response", "==", "Gtk", ".", "ResponseType", ".", "OK", ":", "self", ".", "_entry", ".", "set_text", "(", "_u", "(", "dialog", ".", "get_filename", "(", ")", ")", ")", "dialog", ".", "destroy", "(", ")" ]
https://github.com/projecthamster/hamster-gtk/blob/f51b45a77bcc343514ac1439d42ec024ef751a82/hamster_gtk/preferences/widgets/combo_file_chooser.py#L75-L87
KalleHallden/AutoTimer
2d954216700c4930baa154e28dbddc34609af7ce
env/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py
python
DistributionNotFound.req
(self)
return self.args[0]
[]
def req(self): return self.args[0]
[ "def", "req", "(", "self", ")", ":", "return", "self", ".", "args", "[", "0", "]" ]
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L310-L311
yuxiaokui/Intranet-Penetration
f57678a204840c83cbf3308e3470ae56c5ff514b
proxy/XX-Net/code/default/python27/1.0/lib/string.py
python
capwords
(s, sep=None)
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
capwords(s [,sep]) -> string Split the argument into words using split, capitalize each word using capitalize, and join the capitalized words using join. If the optional second argument sep is absent or None, runs of whitespace characters are replaced by a single space and leading and trailing whitespace are removed, otherwise sep is used to split and join the words.
capwords(s [,sep]) -> string
[ "capwords", "(", "s", "[", "sep", "]", ")", "-", ">", "string" ]
def capwords(s, sep=None): """capwords(s [,sep]) -> string Split the argument into words using split, capitalize each word using capitalize, and join the capitalized words using join. If the optional second argument sep is absent or None, runs of whitespace characters are replaced by a single space and leading and trailing whitespace are removed, otherwise sep is used to split and join the words. """ return (sep or ' ').join(x.capitalize() for x in s.split(sep))
[ "def", "capwords", "(", "s", ",", "sep", "=", "None", ")", ":", "return", "(", "sep", "or", "' '", ")", ".", "join", "(", "x", ".", "capitalize", "(", ")", "for", "x", "in", "s", ".", "split", "(", "sep", ")", ")" ]
https://github.com/yuxiaokui/Intranet-Penetration/blob/f57678a204840c83cbf3308e3470ae56c5ff514b/proxy/XX-Net/code/default/python27/1.0/lib/string.py#L45-L56
TengXiaoDai/DistributedCrawling
f5c2439e6ce68dd9b49bde084d76473ff9ed4963
Lib/heapq.py
python
_siftup_max
(heap, pos)
Maxheap variant of _siftup
Maxheap variant of _siftup
[ "Maxheap", "variant", "of", "_siftup" ]
def _siftup_max(heap, pos): 'Maxheap variant of _siftup' endpos = len(heap) startpos = pos newitem = heap[pos] # Bubble up the larger child until hitting a leaf. childpos = 2*pos + 1 # leftmost child position while childpos < endpos: # Set childpos to index of larger child. rightpos = childpos + 1 if rightpos < endpos and not heap[rightpos] < heap[childpos]: childpos = rightpos # Move the larger child up. heap[pos] = heap[childpos] pos = childpos childpos = 2*pos + 1 # The leaf at pos is empty now. Put newitem there, and bubble it up # to its final resting place (by sifting its parents down). heap[pos] = newitem _siftdown_max(heap, startpos, pos)
[ "def", "_siftup_max", "(", "heap", ",", "pos", ")", ":", "endpos", "=", "len", "(", "heap", ")", "startpos", "=", "pos", "newitem", "=", "heap", "[", "pos", "]", "# Bubble up the larger child until hitting a leaf.", "childpos", "=", "2", "*", "pos", "+", "1", "# leftmost child position", "while", "childpos", "<", "endpos", ":", "# Set childpos to index of larger child.", "rightpos", "=", "childpos", "+", "1", "if", "rightpos", "<", "endpos", "and", "not", "heap", "[", "rightpos", "]", "<", "heap", "[", "childpos", "]", ":", "childpos", "=", "rightpos", "# Move the larger child up.", "heap", "[", "pos", "]", "=", "heap", "[", "childpos", "]", "pos", "=", "childpos", "childpos", "=", "2", "*", "pos", "+", "1", "# The leaf at pos is empty now. Put newitem there, and bubble it up", "# to its final resting place (by sifting its parents down).", "heap", "[", "pos", "]", "=", "newitem", "_siftdown_max", "(", "heap", ",", "startpos", ",", "pos", ")" ]
https://github.com/TengXiaoDai/DistributedCrawling/blob/f5c2439e6ce68dd9b49bde084d76473ff9ed4963/Lib/heapq.py#L293-L312
DataDog/integrations-core
934674b29d94b70ccc008f76ea172d0cdae05e1e
tomcat/datadog_checks/tomcat/config_models/shared.py
python
SharedConfig._run_validations
(cls, v, field)
return getattr(validators, f'shared_{field.name}', identity)(v, field=field)
[]
def _run_validations(cls, v, field): if not v: return v return getattr(validators, f'shared_{field.name}', identity)(v, field=field)
[ "def", "_run_validations", "(", "cls", ",", "v", ",", "field", ")", ":", "if", "not", "v", ":", "return", "v", "return", "getattr", "(", "validators", ",", "f'shared_{field.name}'", ",", "identity", ")", "(", "v", ",", "field", "=", "field", ")" ]
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/tomcat/datadog_checks/tomcat/config_models/shared.py#L45-L49
ucbdrive/3d-vehicle-tracking
8ee189f6792897651bb56bb2950ce07c9629a89d
3d-tracking/lib/model/rpn/generate_anchors.py
python
generate_anchors
(base_size=16, ratios=[0.5, 1, 2], scales=2**np.arange(3, 6))
return anchors
Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, 15, 15) window.
Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, 15, 15) window.
[ "Generate", "anchor", "(", "reference", ")", "windows", "by", "enumerating", "aspect", "ratios", "X", "scales", "wrt", "a", "reference", "(", "0", "0", "15", "15", ")", "window", "." ]
def generate_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2**np.arange(3, 6)): """ Generate anchor (reference) windows by enumerating aspect ratios X scales wrt a reference (0, 0, 15, 15) window. """ base_anchor = np.array([1, 1, base_size, base_size]) - 1 ratio_anchors = _ratio_enum(base_anchor, ratios) anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales) for i in xrange(ratio_anchors.shape[0])]) return anchors
[ "def", "generate_anchors", "(", "base_size", "=", "16", ",", "ratios", "=", "[", "0.5", ",", "1", ",", "2", "]", ",", "scales", "=", "2", "**", "np", ".", "arange", "(", "3", ",", "6", ")", ")", ":", "base_anchor", "=", "np", ".", "array", "(", "[", "1", ",", "1", ",", "base_size", ",", "base_size", "]", ")", "-", "1", "ratio_anchors", "=", "_ratio_enum", "(", "base_anchor", ",", "ratios", ")", "anchors", "=", "np", ".", "vstack", "(", "[", "_scale_enum", "(", "ratio_anchors", "[", "i", ",", ":", "]", ",", "scales", ")", "for", "i", "in", "xrange", "(", "ratio_anchors", ".", "shape", "[", "0", "]", ")", "]", ")", "return", "anchors" ]
https://github.com/ucbdrive/3d-vehicle-tracking/blob/8ee189f6792897651bb56bb2950ce07c9629a89d/3d-tracking/lib/model/rpn/generate_anchors.py#L45-L56
numenta/numenta-apps
02903b0062c89c2c259b533eea2df6e8bb44eaf3
htmengine/htmengine/model_checkpoint_mgr/model_checkpoint_mgr.py
python
ModelCheckpointMgr._getCurrentCheckpointRealPath
(self, modelID)
return checkpointStoreDirPath
Get the real path of the model's existing checkpoint store directory, resolving all links raises: ModelNotFound if the model checkpoint hasn't been saved yet or if this model's entry doesn't exist in the checkpoint archive
Get the real path of the model's existing checkpoint store directory, resolving all links
[ "Get", "the", "real", "path", "of", "the", "model", "s", "existing", "checkpoint", "store", "directory", "resolving", "all", "links" ]
def _getCurrentCheckpointRealPath(self, modelID): """ Get the real path of the model's existing checkpoint store directory, resolving all links raises: ModelNotFound if the model checkpoint hasn't been saved yet or if this model's entry doesn't exist in the checkpoint archive """ currentStoreSymlinkPath = os.path.join( self._getModelDir(modelID, mustExist=True), self._CHECKPOINT_LINK_NAME) # Follow the chekpoint link checkpointStoreDirPath = os.path.realpath(currentStoreSymlinkPath) if not os.path.exists(checkpointStoreDirPath): raise ModelNotFound("Checkpoint not found for model=%s; expected " "directory=%s" % (modelID, checkpointStoreDirPath,)) assert checkpointStoreDirPath != currentStoreSymlinkPath, ( checkpointStoreDirPath) return checkpointStoreDirPath
[ "def", "_getCurrentCheckpointRealPath", "(", "self", ",", "modelID", ")", ":", "currentStoreSymlinkPath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_getModelDir", "(", "modelID", ",", "mustExist", "=", "True", ")", ",", "self", ".", "_CHECKPOINT_LINK_NAME", ")", "# Follow the chekpoint link", "checkpointStoreDirPath", "=", "os", ".", "path", ".", "realpath", "(", "currentStoreSymlinkPath", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "checkpointStoreDirPath", ")", ":", "raise", "ModelNotFound", "(", "\"Checkpoint not found for model=%s; expected \"", "\"directory=%s\"", "%", "(", "modelID", ",", "checkpointStoreDirPath", ",", ")", ")", "assert", "checkpointStoreDirPath", "!=", "currentStoreSymlinkPath", ",", "(", "checkpointStoreDirPath", ")", "return", "checkpointStoreDirPath" ]
https://github.com/numenta/numenta-apps/blob/02903b0062c89c2c259b533eea2df6e8bb44eaf3/htmengine/htmengine/model_checkpoint_mgr/model_checkpoint_mgr.py#L188-L210
AstroPrint/AstroBox
e7e3b8a7d33ea85fcb6b2696869c0d719ceb8b75
src/ext/makerbot_driver/GcodeProcessors/EmptyLayerProcessor.py
python
EmptyLayerProcessor.check_for_move_with_extrude
(self, string)
return match is not None
[]
def check_for_move_with_extrude(self, string): match = re.match(self.move_with_extrude, string) return match is not None
[ "def", "check_for_move_with_extrude", "(", "self", ",", "string", ")", ":", "match", "=", "re", ".", "match", "(", "self", ".", "move_with_extrude", ",", "string", ")", "return", "match", "is", "not", "None" ]
https://github.com/AstroPrint/AstroBox/blob/e7e3b8a7d33ea85fcb6b2696869c0d719ceb8b75/src/ext/makerbot_driver/GcodeProcessors/EmptyLayerProcessor.py#L123-L125
inkandswitch/livebook
93c8d467734787366ad084fc3566bf5cbe249c51
public/pypyjs/modules/decimal.py
python
Context.is_zero
(self, a)
return a.is_zero()
Return True if the operand is a zero; otherwise return False. >>> ExtendedContext.is_zero(Decimal('0')) True >>> ExtendedContext.is_zero(Decimal('2.50')) False >>> ExtendedContext.is_zero(Decimal('-0E+2')) True >>> ExtendedContext.is_zero(1) False >>> ExtendedContext.is_zero(0) True
Return True if the operand is a zero; otherwise return False.
[ "Return", "True", "if", "the", "operand", "is", "a", "zero", ";", "otherwise", "return", "False", "." ]
def is_zero(self, a): """Return True if the operand is a zero; otherwise return False. >>> ExtendedContext.is_zero(Decimal('0')) True >>> ExtendedContext.is_zero(Decimal('2.50')) False >>> ExtendedContext.is_zero(Decimal('-0E+2')) True >>> ExtendedContext.is_zero(1) False >>> ExtendedContext.is_zero(0) True """ a = _convert_other(a, raiseit=True) return a.is_zero()
[ "def", "is_zero", "(", "self", ",", "a", ")", ":", "a", "=", "_convert_other", "(", "a", ",", "raiseit", "=", "True", ")", "return", "a", ".", "is_zero", "(", ")" ]
https://github.com/inkandswitch/livebook/blob/93c8d467734787366ad084fc3566bf5cbe249c51/public/pypyjs/modules/decimal.py#L4470-L4485
ask/python-github2
4287019520643e2fcd46c9c5b3aac439cdc35b88
github2/issues.py
python
Issues.list_labels
(self, project)
return self.get_values("labels", project, filter="labels")
Get all labels for project. .. versionadded:: 0.3.0 :param str project: GitHub project
Get all labels for project.
[ "Get", "all", "labels", "for", "project", "." ]
def list_labels(self, project): """Get all labels for project. .. versionadded:: 0.3.0 :param str project: GitHub project """ return self.get_values("labels", project, filter="labels")
[ "def", "list_labels", "(", "self", ",", "project", ")", ":", "return", "self", ".", "get_values", "(", "\"labels\"", ",", "project", ",", "filter", "=", "\"labels\"", ")" ]
https://github.com/ask/python-github2/blob/4287019520643e2fcd46c9c5b3aac439cdc35b88/github2/issues.py#L98-L106
matplotlib/matplotlib
8d7a2b9d2a38f01ee0d6802dd4f9e98aec812322
lib/matplotlib/patches.py
python
ConnectionPatch._get_path_in_displaycoord
(self)
return path, fillable
Return the mutated path of the arrow in display coordinates.
Return the mutated path of the arrow in display coordinates.
[ "Return", "the", "mutated", "path", "of", "the", "arrow", "in", "display", "coordinates", "." ]
def _get_path_in_displaycoord(self): """Return the mutated path of the arrow in display coordinates.""" dpi_cor = self._dpi_cor posA = self._get_xy(self.xy1, self.coords1, self.axesA) posB = self._get_xy(self.xy2, self.coords2, self.axesB) path = self.get_connectionstyle()( posA, posB, patchA=self.patchA, patchB=self.patchB, shrinkA=self.shrinkA * dpi_cor, shrinkB=self.shrinkB * dpi_cor, ) path, fillable = self.get_arrowstyle()( path, self.get_mutation_scale() * dpi_cor, self.get_linewidth() * dpi_cor, self.get_mutation_aspect() ) return path, fillable
[ "def", "_get_path_in_displaycoord", "(", "self", ")", ":", "dpi_cor", "=", "self", ".", "_dpi_cor", "posA", "=", "self", ".", "_get_xy", "(", "self", ".", "xy1", ",", "self", ".", "coords1", ",", "self", ".", "axesA", ")", "posB", "=", "self", ".", "_get_xy", "(", "self", ".", "xy2", ",", "self", ".", "coords2", ",", "self", ".", "axesB", ")", "path", "=", "self", ".", "get_connectionstyle", "(", ")", "(", "posA", ",", "posB", ",", "patchA", "=", "self", ".", "patchA", ",", "patchB", "=", "self", ".", "patchB", ",", "shrinkA", "=", "self", ".", "shrinkA", "*", "dpi_cor", ",", "shrinkB", "=", "self", ".", "shrinkB", "*", "dpi_cor", ",", ")", "path", ",", "fillable", "=", "self", ".", "get_arrowstyle", "(", ")", "(", "path", ",", "self", ".", "get_mutation_scale", "(", ")", "*", "dpi_cor", ",", "self", ".", "get_linewidth", "(", ")", "*", "dpi_cor", ",", "self", ".", "get_mutation_aspect", "(", ")", ")", "return", "path", ",", "fillable" ]
https://github.com/matplotlib/matplotlib/blob/8d7a2b9d2a38f01ee0d6802dd4f9e98aec812322/lib/matplotlib/patches.py#L4762-L4778
openstack/neutron
fb229fb527ac8b95526412f7762d90826ac41428
neutron/services/auto_allocate/db.py
python
_ensure_external_network_default_value_callback
( resource, event, trigger, payload=None)
Ensure the is_default db field matches the create/update request.
Ensure the is_default db field matches the create/update request.
[ "Ensure", "the", "is_default", "db", "field", "matches", "the", "create", "/", "update", "request", "." ]
def _ensure_external_network_default_value_callback( resource, event, trigger, payload=None): """Ensure the is_default db field matches the create/update request.""" _request = payload.request_body _context = payload.context _network = payload.desired_state or payload.latest_state _orig = payload.states[0] @db_api.retry_if_session_inactive() def _do_ensure_external_network_default_value_callback( context, request, orig, network): is_default = request.get(api_const.IS_DEFAULT) if is_default is None: return if is_default: # ensure only one default external network at any given time pager = base_obj.Pager(limit=1) objs = net_obj.ExternalNetwork.get_objects(context, _pager=pager, is_default=True) if objs: if objs[0] and network['id'] != objs[0].network_id: raise exceptions.DefaultExternalNetworkExists( net_id=objs[0].network_id) if orig and orig.get(api_const.IS_DEFAULT) == is_default: return network[api_const.IS_DEFAULT] = is_default # Reflect the status of the is_default on the create/update request obj = net_obj.ExternalNetwork.get_object(context, network_id=network['id']) if obj: obj.is_default = is_default obj.update() _do_ensure_external_network_default_value_callback( _context, _request, _orig, _network)
[ "def", "_ensure_external_network_default_value_callback", "(", "resource", ",", "event", ",", "trigger", ",", "payload", "=", "None", ")", ":", "_request", "=", "payload", ".", "request_body", "_context", "=", "payload", ".", "context", "_network", "=", "payload", ".", "desired_state", "or", "payload", ".", "latest_state", "_orig", "=", "payload", ".", "states", "[", "0", "]", "@", "db_api", ".", "retry_if_session_inactive", "(", ")", "def", "_do_ensure_external_network_default_value_callback", "(", "context", ",", "request", ",", "orig", ",", "network", ")", ":", "is_default", "=", "request", ".", "get", "(", "api_const", ".", "IS_DEFAULT", ")", "if", "is_default", "is", "None", ":", "return", "if", "is_default", ":", "# ensure only one default external network at any given time", "pager", "=", "base_obj", ".", "Pager", "(", "limit", "=", "1", ")", "objs", "=", "net_obj", ".", "ExternalNetwork", ".", "get_objects", "(", "context", ",", "_pager", "=", "pager", ",", "is_default", "=", "True", ")", "if", "objs", ":", "if", "objs", "[", "0", "]", "and", "network", "[", "'id'", "]", "!=", "objs", "[", "0", "]", ".", "network_id", ":", "raise", "exceptions", ".", "DefaultExternalNetworkExists", "(", "net_id", "=", "objs", "[", "0", "]", ".", "network_id", ")", "if", "orig", "and", "orig", ".", "get", "(", "api_const", ".", "IS_DEFAULT", ")", "==", "is_default", ":", "return", "network", "[", "api_const", ".", "IS_DEFAULT", "]", "=", "is_default", "# Reflect the status of the is_default on the create/update request", "obj", "=", "net_obj", ".", "ExternalNetwork", ".", "get_object", "(", "context", ",", "network_id", "=", "network", "[", "'id'", "]", ")", "if", "obj", ":", "obj", ".", "is_default", "=", "is_default", "obj", ".", "update", "(", ")", "_do_ensure_external_network_default_value_callback", "(", "_context", ",", "_request", ",", "_orig", ",", "_network", ")" ]
https://github.com/openstack/neutron/blob/fb229fb527ac8b95526412f7762d90826ac41428/neutron/services/auto_allocate/db.py#L43-L79
bbfamily/abu
2de85ae57923a720dac99a545b4f856f6b87304b
abupy/MLBu/ABuML.py
python
AbuML.bagging_classifier_best
(self, **kwargs)
return self.estimator.bagging_classifier_best(self.x, self.y, **kwargs)
eg: bagging_classifier_best有param_grid参数调用: param_grid = {'max_samples': np.arange(1, 5), 'n_estimators': np.arange(100, 300, 50)} ttn_abu.bagging_classifier_best(param_grid=param_grid, n_jobs=-1) out: BaggingClassifier(max_samples=4, n_estimators=100) bagging_classifier_best无param_grid参数调用: from abupy import AbuML, ml ttn_abu = AbuML.create_test_more_fiter() ttn_abu.bagging_classifier_best() :param kwargs: 关键字可选参数param_grid: 最优字典关键字参数 eg:param_grid = {'max_samples': np.arange(1, 5), 'n_estimators': np.arange(100, 300, 50)} 关键字可选参数assign: 是否保存实例化后最优参数的学习器对象,默认True 关键字可选参数n_jobs: 并行执行的进程任务数量,默认-1, 开启与cpu相同数量的进程数 关键字可选参数show: 是否可视化最优参数搜索结果 :return: 通过最优参数构造的BaggingClassifier对象
eg: bagging_classifier_best有param_grid参数调用:
[ "eg:", "bagging_classifier_best有param_grid参数调用:" ]
def bagging_classifier_best(self, **kwargs): """ eg: bagging_classifier_best有param_grid参数调用: param_grid = {'max_samples': np.arange(1, 5), 'n_estimators': np.arange(100, 300, 50)} ttn_abu.bagging_classifier_best(param_grid=param_grid, n_jobs=-1) out: BaggingClassifier(max_samples=4, n_estimators=100) bagging_classifier_best无param_grid参数调用: from abupy import AbuML, ml ttn_abu = AbuML.create_test_more_fiter() ttn_abu.bagging_classifier_best() :param kwargs: 关键字可选参数param_grid: 最优字典关键字参数 eg:param_grid = {'max_samples': np.arange(1, 5), 'n_estimators': np.arange(100, 300, 50)} 关键字可选参数assign: 是否保存实例化后最优参数的学习器对象,默认True 关键字可选参数n_jobs: 并行执行的进程任务数量,默认-1, 开启与cpu相同数量的进程数 关键字可选参数show: 是否可视化最优参数搜索结果 :return: 通过最优参数构造的BaggingClassifier对象 """ return self.estimator.bagging_classifier_best(self.x, self.y, **kwargs)
[ "def", "bagging_classifier_best", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "estimator", ".", "bagging_classifier_best", "(", "self", ".", "x", ",", "self", ".", "y", ",", "*", "*", "kwargs", ")" ]
https://github.com/bbfamily/abu/blob/2de85ae57923a720dac99a545b4f856f6b87304b/abupy/MLBu/ABuML.py#L1434-L1458
veusz/veusz
5a1e2af5f24df0eb2a2842be51f2997c4999c7fb
veusz/plugins/importplugin.py
python
ImportPluginBinary.doImport
(self, params)
return [ datasetplugin.Dataset1D(name, data) ]
Import the data.
Import the data.
[ "Import", "the", "data", "." ]
def doImport(self, params): """Import the data.""" name = params.field_results["name"].strip() if not name: raise ImportPluginException(_("Please provide a name for the dataset")) try: f = open(params.filename, "rb") f.seek( params.field_results["offset"] ) retn = f.read() f.close() except EnvironmentError as e: raise ImportPluginException( _("Error while reading file '%s'\n\n%s") % (params.filename, e.strerror(e))) try: data = N.fromstring( retn, dtype=self.getNumpyDataType(params), count=params.field_results["length"]) except ValueError as e: raise ImportPluginException( _("Error converting data for file '%s'\n\n%s") % (params.filename, str(e))) data = data.astype(N.float64) return [ datasetplugin.Dataset1D(name, data) ]
[ "def", "doImport", "(", "self", ",", "params", ")", ":", "name", "=", "params", ".", "field_results", "[", "\"name\"", "]", ".", "strip", "(", ")", "if", "not", "name", ":", "raise", "ImportPluginException", "(", "_", "(", "\"Please provide a name for the dataset\"", ")", ")", "try", ":", "f", "=", "open", "(", "params", ".", "filename", ",", "\"rb\"", ")", "f", ".", "seek", "(", "params", ".", "field_results", "[", "\"offset\"", "]", ")", "retn", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "except", "EnvironmentError", "as", "e", ":", "raise", "ImportPluginException", "(", "_", "(", "\"Error while reading file '%s'\\n\\n%s\"", ")", "%", "(", "params", ".", "filename", ",", "e", ".", "strerror", "(", "e", ")", ")", ")", "try", ":", "data", "=", "N", ".", "fromstring", "(", "retn", ",", "dtype", "=", "self", ".", "getNumpyDataType", "(", "params", ")", ",", "count", "=", "params", ".", "field_results", "[", "\"length\"", "]", ")", "except", "ValueError", "as", "e", ":", "raise", "ImportPluginException", "(", "_", "(", "\"Error converting data for file '%s'\\n\\n%s\"", ")", "%", "(", "params", ".", "filename", ",", "str", "(", "e", ")", ")", ")", "data", "=", "data", ".", "astype", "(", "N", ".", "float64", ")", "return", "[", "datasetplugin", ".", "Dataset1D", "(", "name", ",", "data", ")", "]" ]
https://github.com/veusz/veusz/blob/5a1e2af5f24df0eb2a2842be51f2997c4999c7fb/veusz/plugins/importplugin.py#L683-L710
KoreLogicSecurity/mastiff
04d569e4fa59513572e77c74b049cad82f9b0310
mastiff/plugins/category/categories.py
python
MastiffPlugin.__init__
(self, name=None)
Initialize the Mastiff plugin class.
Initialize the Mastiff plugin class.
[ "Initialize", "the", "Mastiff", "plugin", "class", "." ]
def __init__(self, name=None): """Initialize the Mastiff plugin class.""" IPlugin.__init__(self) self.name = name self.prereq = None self.yara_filetype = None self.page_data = output.page() self.page_data.meta['filename'] = 'CHANGEME'
[ "def", "__init__", "(", "self", ",", "name", "=", "None", ")", ":", "IPlugin", ".", "__init__", "(", "self", ")", "self", ".", "name", "=", "name", "self", ".", "prereq", "=", "None", "self", ".", "yara_filetype", "=", "None", "self", ".", "page_data", "=", "output", ".", "page", "(", ")", "self", ".", "page_data", ".", "meta", "[", "'filename'", "]", "=", "'CHANGEME'" ]
https://github.com/KoreLogicSecurity/mastiff/blob/04d569e4fa59513572e77c74b049cad82f9b0310/mastiff/plugins/category/categories.py#L25-L32
Saferman/cupper
51a88b5c95d8712d491b73a064f94a9ea4aea18b
session/interactiveSession.py
python
SessionCommandInput
(sessionList,mColor)
这个脚本写的是持久性命令模式的输入 主要给session文件管理使用
这个脚本写的是持久性命令模式的输入 主要给session文件管理使用
[ "这个脚本写的是持久性命令模式的输入", "主要给session文件管理使用" ]
def SessionCommandInput(sessionList,mColor): ''' 这个脚本写的是持久性命令模式的输入 主要给session文件管理使用 ''' print "[+]请输入help查看帮助信息" while 1: c = raw_input(mColor.getInput_Color + "ManageSession>").lower().strip() cL = c.split()#''.split()结果为[]除非是''.split(',')为[''] if cL == []: continue if cL[0] == "help": if len(cL)!=1: print mColor.error_Color + "[-]请不要输入多余字符!" continue else: print mColor.choice_Color preSpace = 10 middleSpace = 4 print " "*preSpace+"show sessionname"+" "*middleSpace + "查看该session文件的内容" print " "*preSpace+"delete sessionname"+" "*middleSpace + "删除session文件" print " "*preSpace+"list sessionname"+" "*middleSpace + "列出所有session文件" print " "*preSpace+"help"+" "*middleSpace + "查看支持的命令" print " "*preSpace+"exit"+" "*middleSpace + "退出" continue if cL[0] == "show": ShowSession(cL[1],mColor) continue if cL[0] == "delete": DeleteSession(cL[1],mColor) continue if cL[0] == 'list': if len(cL)!=1: print mColor.error_Color + "[-]请不要输入多余字符!" continue else: CheckSession(mColor) continue if cL[0] == "exit": if len(cL)!=1: print mColor.error_Color + "[-]请不要输入多余字符!" continue else: break print mColor.error_Color + "[-]命令格式输入错误!请按照help显示的要求输入"
[ "def", "SessionCommandInput", "(", "sessionList", ",", "mColor", ")", ":", "print", "\"[+]请输入help查看帮助信息\"", "while", "1", ":", "c", "=", "raw_input", "(", "mColor", ".", "getInput_Color", "+", "\"ManageSession>\"", ")", ".", "lower", "(", ")", ".", "strip", "(", ")", "cL", "=", "c", ".", "split", "(", ")", "#''.split()结果为[]除非是''.split(',')为['']", "if", "cL", "==", "[", "]", ":", "continue", "if", "cL", "[", "0", "]", "==", "\"help\"", ":", "if", "len", "(", "cL", ")", "!=", "1", ":", "print", "mColor", ".", "error_Color", "+", "\"[-]请不要输入多余字符!\"", "continue", "else", ":", "print", "mColor", ".", "choice_Color", "preSpace", "=", "10", "middleSpace", "=", "4", "print", "\" \"", "*", "preSpace", "+", "\"show sessionname\"", "+", "\" \"", "*", "middleSpace", "+", "\"查看该session文件的内容\"", "print", "\" \"", "*", "preSpace", "+", "\"delete sessionname\"", "+", "\" \"", "*", "middleSpace", "+", "\"删除session文件\"", "print", "\" \"", "*", "preSpace", "+", "\"list sessionname\"", "+", "\" \"", "*", "middleSpace", "+", "\"列出所有session文件\"", "print", "\" \"", "*", "preSpace", "+", "\"help\"", "+", "\" \"", "*", "middleSpace", "+", "\"查看支持的命令\"", "print", "\" \"", "*", "preSpace", "+", "\"exit\"", "+", "\" \"", "*", "middleSpace", "+", "\"退出\"", "continue", "if", "cL", "[", "0", "]", "==", "\"show\"", ":", "ShowSession", "(", "cL", "[", "1", "]", ",", "mColor", ")", "continue", "if", "cL", "[", "0", "]", "==", "\"delete\"", ":", "DeleteSession", "(", "cL", "[", "1", "]", ",", "mColor", ")", "continue", "if", "cL", "[", "0", "]", "==", "'list'", ":", "if", "len", "(", "cL", ")", "!=", "1", ":", "print", "mColor", ".", "error_Color", "+", "\"[-]请不要输入多余字符!\"", "continue", "else", ":", "CheckSession", "(", "mColor", ")", "continue", "if", "cL", "[", "0", "]", "==", "\"exit\"", ":", "if", "len", "(", "cL", ")", "!=", "1", ":", "print", "mColor", ".", "error_Color", "+", "\"[-]请不要输入多余字符!\"", "continue", "else", ":", "break", "print", "mColor", ".", "error_Color", "+", "\"[-]命令格式输入错误!请按照help显示的要求输入\"" ]
https://github.com/Saferman/cupper/blob/51a88b5c95d8712d491b73a064f94a9ea4aea18b/session/interactiveSession.py#L83-L127
shmilylty/OneForAll
48591142a641e80f8a64ab215d11d06b696702d7
modules/datasets/dnsdumpster.py
python
DNSDumpster.query
(self)
向接口查询子域并做子域匹配
向接口查询子域并做子域匹配
[ "向接口查询子域并做子域匹配" ]
def query(self): """ 向接口查询子域并做子域匹配 """ self.header = self.get_header() self.header.update({'Referer': 'https://dnsdumpster.com'}) self.proxy = self.get_proxy(self.source) resp = self.get(self.addr) if not resp: return self.cookie = resp.cookies data = {'csrfmiddlewaretoken': self.cookie.get('csrftoken'), 'targetip': self.domain, 'user':'free'} resp = self.post(self.addr, data) self.subdomains = self.collect_subdomains(resp)
[ "def", "query", "(", "self", ")", ":", "self", ".", "header", "=", "self", ".", "get_header", "(", ")", "self", ".", "header", ".", "update", "(", "{", "'Referer'", ":", "'https://dnsdumpster.com'", "}", ")", "self", ".", "proxy", "=", "self", ".", "get_proxy", "(", "self", ".", "source", ")", "resp", "=", "self", ".", "get", "(", "self", ".", "addr", ")", "if", "not", "resp", ":", "return", "self", ".", "cookie", "=", "resp", ".", "cookies", "data", "=", "{", "'csrfmiddlewaretoken'", ":", "self", ".", "cookie", ".", "get", "(", "'csrftoken'", ")", ",", "'targetip'", ":", "self", ".", "domain", ",", "'user'", ":", "'free'", "}", "resp", "=", "self", ".", "post", "(", "self", ".", "addr", ",", "data", ")", "self", ".", "subdomains", "=", "self", ".", "collect_subdomains", "(", "resp", ")" ]
https://github.com/shmilylty/OneForAll/blob/48591142a641e80f8a64ab215d11d06b696702d7/modules/datasets/dnsdumpster.py#L12-L27
rotki/rotki
aafa446815cdd5e9477436d1b02bee7d01b398c8
rotkehlchen/db/dbhandler.py
python
DBHandler.save_tokens_for_address
( self, address: ChecksumEthAddress, tokens: List[EthereumToken], )
Saves detected tokens for an address
Saves detected tokens for an address
[ "Saves", "detected", "tokens", "for", "an", "address" ]
def save_tokens_for_address( self, address: ChecksumEthAddress, tokens: List[EthereumToken], ) -> None: """Saves detected tokens for an address""" old_details = self._get_address_details_json(address) new_details = {} if old_details and 'univ2_lp_tokens' in old_details: new_details['univ2_lp_tokens'] = old_details['univ2_lp_tokens'] new_details['tokens'] = [x.identifier for x in tokens] now = ts_now() cursor = self.conn.cursor() cursor.execute( 'INSERT OR REPLACE INTO ethereum_accounts_details ' '(account, tokens_list, time) VALUES (?, ?, ?)', (address, json.dumps(new_details), now), ) self.update_last_write()
[ "def", "save_tokens_for_address", "(", "self", ",", "address", ":", "ChecksumEthAddress", ",", "tokens", ":", "List", "[", "EthereumToken", "]", ",", ")", "->", "None", ":", "old_details", "=", "self", ".", "_get_address_details_json", "(", "address", ")", "new_details", "=", "{", "}", "if", "old_details", "and", "'univ2_lp_tokens'", "in", "old_details", ":", "new_details", "[", "'univ2_lp_tokens'", "]", "=", "old_details", "[", "'univ2_lp_tokens'", "]", "new_details", "[", "'tokens'", "]", "=", "[", "x", ".", "identifier", "for", "x", "in", "tokens", "]", "now", "=", "ts_now", "(", ")", "cursor", "=", "self", ".", "conn", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'INSERT OR REPLACE INTO ethereum_accounts_details '", "'(account, tokens_list, time) VALUES (?, ?, ?)'", ",", "(", "address", ",", "json", ".", "dumps", "(", "new_details", ")", ",", "now", ")", ",", ")", "self", ".", "update_last_write", "(", ")" ]
https://github.com/rotki/rotki/blob/aafa446815cdd5e9477436d1b02bee7d01b398c8/rotkehlchen/db/dbhandler.py#L1604-L1622
openstack/heat
ea6633c35b04bb49c4a2858edc9df0a82d039478
heat/engine/stack.py
python
Stack.__contains__
(self, key)
Determine whether the stack contains the specified resource.
Determine whether the stack contains the specified resource.
[ "Determine", "whether", "the", "stack", "contains", "the", "specified", "resource", "." ]
def __contains__(self, key): """Determine whether the stack contains the specified resource.""" if self._resources is not None: return key in self.resources else: return key in self.t[self.t.RESOURCES]
[ "def", "__contains__", "(", "self", ",", "key", ")", ":", "if", "self", ".", "_resources", "is", "not", "None", ":", "return", "key", "in", "self", ".", "resources", "else", ":", "return", "key", "in", "self", ".", "t", "[", "self", ".", "t", ".", "RESOURCES", "]" ]
https://github.com/openstack/heat/blob/ea6633c35b04bb49c4a2858edc9df0a82d039478/heat/engine/stack.py#L831-L836
Textualize/rich
d39626143036188cb2c9e1619e836540f5b627f8
rich/color.py
python
parse_rgb_hex
(hex_color: str)
return color
Parse six hex characters in to RGB triplet.
Parse six hex characters in to RGB triplet.
[ "Parse", "six", "hex", "characters", "in", "to", "RGB", "triplet", "." ]
def parse_rgb_hex(hex_color: str) -> ColorTriplet: """Parse six hex characters in to RGB triplet.""" assert len(hex_color) == 6, "must be 6 characters" color = ColorTriplet( int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16) ) return color
[ "def", "parse_rgb_hex", "(", "hex_color", ":", "str", ")", "->", "ColorTriplet", ":", "assert", "len", "(", "hex_color", ")", "==", "6", ",", "\"must be 6 characters\"", "color", "=", "ColorTriplet", "(", "int", "(", "hex_color", "[", "0", ":", "2", "]", ",", "16", ")", ",", "int", "(", "hex_color", "[", "2", ":", "4", "]", ",", "16", ")", ",", "int", "(", "hex_color", "[", "4", ":", "6", "]", ",", "16", ")", ")", "return", "color" ]
https://github.com/Textualize/rich/blob/d39626143036188cb2c9e1619e836540f5b627f8/rich/color.py#L532-L538
schemathesis/schemathesis
2eaea40be33067af5f5f6b6b7a79000224e1bbd2
src/schemathesis/specs/openapi/links.py
python
get_links
(response: GenericResponse, operation: APIOperation, field: str)
return [Link.from_definition(name, definition, operation) for name, definition in links.items()]
Get `x-links` / `links` definitions from the schema.
Get `x-links` / `links` definitions from the schema.
[ "Get", "x", "-", "links", "/", "links", "definitions", "from", "the", "schema", "." ]
def get_links(response: GenericResponse, operation: APIOperation, field: str) -> Sequence[Link]: """Get `x-links` / `links` definitions from the schema.""" responses = operation.definition.resolved["responses"] if str(response.status_code) in responses: response_definition = responses[str(response.status_code)] elif response.status_code in responses: response_definition = responses[response.status_code] else: response_definition = responses.get("default", {}) links = response_definition.get(field, {}) return [Link.from_definition(name, definition, operation) for name, definition in links.items()]
[ "def", "get_links", "(", "response", ":", "GenericResponse", ",", "operation", ":", "APIOperation", ",", "field", ":", "str", ")", "->", "Sequence", "[", "Link", "]", ":", "responses", "=", "operation", ".", "definition", ".", "resolved", "[", "\"responses\"", "]", "if", "str", "(", "response", ".", "status_code", ")", "in", "responses", ":", "response_definition", "=", "responses", "[", "str", "(", "response", ".", "status_code", ")", "]", "elif", "response", ".", "status_code", "in", "responses", ":", "response_definition", "=", "responses", "[", "response", ".", "status_code", "]", "else", ":", "response_definition", "=", "responses", ".", "get", "(", "\"default\"", ",", "{", "}", ")", "links", "=", "response_definition", ".", "get", "(", "field", ",", "{", "}", ")", "return", "[", "Link", ".", "from_definition", "(", "name", ",", "definition", ",", "operation", ")", "for", "name", ",", "definition", "in", "links", ".", "items", "(", ")", "]" ]
https://github.com/schemathesis/schemathesis/blob/2eaea40be33067af5f5f6b6b7a79000224e1bbd2/src/schemathesis/specs/openapi/links.py#L150-L160
ChineseGLUE/ChineseGLUE
1591b85cf5427c2ff60f718d359ecb71d2b44879
baselines/models/roberta_wwm_ext/run_classifier_with_tfhub.py
python
model_fn_builder
(num_labels, learning_rate, num_train_steps, num_warmup_steps, use_tpu, bert_hub_module_handle)
return model_fn
Returns `model_fn` closure for TPUEstimator.
Returns `model_fn` closure for TPUEstimator.
[ "Returns", "model_fn", "closure", "for", "TPUEstimator", "." ]
def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps, use_tpu, bert_hub_module_handle): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, bert_hub_module_handle) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy(label_ids, predictions) loss = tf.metrics.mean(per_example_loss) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics) elif mode == tf.estimator.ModeKeys.PREDICT: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}) else: raise ValueError( "Only TRAIN, EVAL and PREDICT modes are supported: %s" % (mode)) return output_spec return model_fn
[ "def", "model_fn_builder", "(", "num_labels", ",", "learning_rate", ",", "num_train_steps", ",", "num_warmup_steps", ",", "use_tpu", ",", "bert_hub_module_handle", ")", ":", "def", "model_fn", "(", "features", ",", "labels", ",", "mode", ",", "params", ")", ":", "# pylint: disable=unused-argument", "\"\"\"The `model_fn` for TPUEstimator.\"\"\"", "tf", ".", "logging", ".", "info", "(", "\"*** Features ***\"", ")", "for", "name", "in", "sorted", "(", "features", ".", "keys", "(", ")", ")", ":", "tf", ".", "logging", ".", "info", "(", "\" name = %s, shape = %s\"", "%", "(", "name", ",", "features", "[", "name", "]", ".", "shape", ")", ")", "input_ids", "=", "features", "[", "\"input_ids\"", "]", "input_mask", "=", "features", "[", "\"input_mask\"", "]", "segment_ids", "=", "features", "[", "\"segment_ids\"", "]", "label_ids", "=", "features", "[", "\"label_ids\"", "]", "is_training", "=", "(", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ")", "(", "total_loss", ",", "per_example_loss", ",", "logits", ",", "probabilities", ")", "=", "create_model", "(", "is_training", ",", "input_ids", ",", "input_mask", ",", "segment_ids", ",", "label_ids", ",", "num_labels", ",", "bert_hub_module_handle", ")", "output_spec", "=", "None", "if", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ":", "train_op", "=", "optimization", ".", "create_optimizer", "(", "total_loss", ",", "learning_rate", ",", "num_train_steps", ",", "num_warmup_steps", ",", "use_tpu", ")", "output_spec", "=", "tf", ".", "contrib", ".", "tpu", ".", "TPUEstimatorSpec", "(", "mode", "=", "mode", ",", "loss", "=", "total_loss", ",", "train_op", "=", "train_op", ")", "elif", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "EVAL", ":", "def", "metric_fn", "(", "per_example_loss", ",", "label_ids", ",", "logits", ")", ":", "predictions", "=", "tf", ".", "argmax", "(", "logits", ",", "axis", "=", "-", "1", ",", "output_type", "=", "tf", ".", "int32", ")", "accuracy", "=", "tf", ".", "metrics", ".", "accuracy", "(", "label_ids", ",", "predictions", ")", "loss", "=", "tf", ".", "metrics", ".", "mean", "(", "per_example_loss", ")", "return", "{", "\"eval_accuracy\"", ":", "accuracy", ",", "\"eval_loss\"", ":", "loss", ",", "}", "eval_metrics", "=", "(", "metric_fn", ",", "[", "per_example_loss", ",", "label_ids", ",", "logits", "]", ")", "output_spec", "=", "tf", ".", "contrib", ".", "tpu", ".", "TPUEstimatorSpec", "(", "mode", "=", "mode", ",", "loss", "=", "total_loss", ",", "eval_metrics", "=", "eval_metrics", ")", "elif", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "PREDICT", ":", "output_spec", "=", "tf", ".", "contrib", ".", "tpu", ".", "TPUEstimatorSpec", "(", "mode", "=", "mode", ",", "predictions", "=", "{", "\"probabilities\"", ":", "probabilities", "}", ")", "else", ":", "raise", "ValueError", "(", "\"Only TRAIN, EVAL and PREDICT modes are supported: %s\"", "%", "(", "mode", ")", ")", "return", "output_spec", "return", "model_fn" ]
https://github.com/ChineseGLUE/ChineseGLUE/blob/1591b85cf5427c2ff60f718d359ecb71d2b44879/baselines/models/roberta_wwm_ext/run_classifier_with_tfhub.py#L87-L143
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/importlib/_bootstrap.py
python
_DefaultPathFinder._path_importer_cache
(cls, path)
return super()._path_importer_cache(path, _DEFAULT_PATH_HOOK)
Use the default path hook when None is stored in sys.path_importer_cache.
Use the default path hook when None is stored in sys.path_importer_cache.
[ "Use", "the", "default", "path", "hook", "when", "None", "is", "stored", "in", "sys", ".", "path_importer_cache", "." ]
def _path_importer_cache(cls, path): """Use the default path hook when None is stored in sys.path_importer_cache.""" return super()._path_importer_cache(path, _DEFAULT_PATH_HOOK)
[ "def", "_path_importer_cache", "(", "cls", ",", "path", ")", ":", "return", "super", "(", ")", ".", "_path_importer_cache", "(", "path", ",", "_DEFAULT_PATH_HOOK", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/importlib/_bootstrap.py#L740-L743
hydroshare/hydroshare
7ba563b55412f283047fb3ef6da367d41dec58c6
hs_file_types/models/model_program.py
python
ModelProgramResourceFileType.type_from_string
(cls, type_string)
return type_map.get(type_string, None)
gets model program file type value as stored in DB for a given file type name :param type_string: name of the file type
gets model program file type value as stored in DB for a given file type name :param type_string: name of the file type
[ "gets", "model", "program", "file", "type", "value", "as", "stored", "in", "DB", "for", "a", "given", "file", "type", "name", ":", "param", "type_string", ":", "name", "of", "the", "file", "type" ]
def type_from_string(cls, type_string): """gets model program file type value as stored in DB for a given file type name :param type_string: name of the file type """ type_map = {'release notes': cls.RELEASE_NOTES, 'documentation': cls.DOCUMENTATION, 'software': cls.SOFTWARE, 'computational engine': cls.ENGINE} type_string = type_string.lower() return type_map.get(type_string, None)
[ "def", "type_from_string", "(", "cls", ",", "type_string", ")", ":", "type_map", "=", "{", "'release notes'", ":", "cls", ".", "RELEASE_NOTES", ",", "'documentation'", ":", "cls", ".", "DOCUMENTATION", ",", "'software'", ":", "cls", ".", "SOFTWARE", ",", "'computational engine'", ":", "cls", ".", "ENGINE", "}", "type_string", "=", "type_string", ".", "lower", "(", ")", "return", "type_map", ".", "get", "(", "type_string", ",", "None", ")" ]
https://github.com/hydroshare/hydroshare/blob/7ba563b55412f283047fb3ef6da367d41dec58c6/hs_file_types/models/model_program.py#L58-L66
dit/dit
2853cb13110c5a5b2fa7ad792e238e2177013da2
dit/algorithms/optimization.py
python
BaseAuxVarOptimizer.construct_constant_initial
(self)
return np.concatenate(vecs, axis=0)
Construct a constant optimization vector. Returns ------- x : np.ndarray A constant optimization vector.
Construct a constant optimization vector.
[ "Construct", "a", "constant", "optimization", "vector", "." ]
def construct_constant_initial(self): """ Construct a constant optimization vector. Returns ------- x : np.ndarray A constant optimization vector. """ vecs = [] for av in self._aux_vars: vec = np.zeros(av.shape) vec[..., 0] = 1 vecs.append(vec.ravel()) return np.concatenate(vecs, axis=0)
[ "def", "construct_constant_initial", "(", "self", ")", ":", "vecs", "=", "[", "]", "for", "av", "in", "self", ".", "_aux_vars", ":", "vec", "=", "np", ".", "zeros", "(", "av", ".", "shape", ")", "vec", "[", "...", ",", "0", "]", "=", "1", "vecs", ".", "append", "(", "vec", ".", "ravel", "(", ")", ")", "return", "np", ".", "concatenate", "(", "vecs", ",", "axis", "=", "0", ")" ]
https://github.com/dit/dit/blob/2853cb13110c5a5b2fa7ad792e238e2177013da2/dit/algorithms/optimization.py#L1237-L1251
EDCD/EDMarketConnector
d8b29e45b86f36ab3cf540ec1503b9170a8505de
EDMCLogging.py
python
filter
(self, record: logging.LogRecord)
return True
Attempt to set/change fields in the LogRecord. 1. class = class name(s) of the call site, if applicable 2. qualname = __qualname__ of the call site. This simplifies logging.Formatter() as you can use just this no matter if there is a class involved or not, so you get a nice clean: <file/module>.<classA>[.classB....].<function> 3. osthreadid = OS level thread ID. If we fail to be able to properly set either then: 1. Use print() to alert, to be SURE a message is seen. 2. But also return strings noting the error, so there'll be something in the log output if it happens. :param record: The LogRecord we're "filtering" :return: bool - Always true in order for this record to be logged.
Attempt to set/change fields in the LogRecord.
[ "Attempt", "to", "set", "/", "change", "fields", "in", "the", "LogRecord", "." ]
def filter(self, record: logging.LogRecord) -> bool: """ Attempt to set/change fields in the LogRecord. 1. class = class name(s) of the call site, if applicable 2. qualname = __qualname__ of the call site. This simplifies logging.Formatter() as you can use just this no matter if there is a class involved or not, so you get a nice clean: <file/module>.<classA>[.classB....].<function> 3. osthreadid = OS level thread ID. If we fail to be able to properly set either then: 1. Use print() to alert, to be SURE a message is seen. 2. But also return strings noting the error, so there'll be something in the log output if it happens. :param record: The LogRecord we're "filtering" :return: bool - Always true in order for this record to be logged. """ (class_name, qualname, module_name) = self.caller_attributes(module_name=getattr(record, 'module')) # Only set if we got a useful value if module_name: setattr(record, 'module', module_name) # Only set if not already provided by logging itself if getattr(record, 'class', None) is None: setattr(record, 'class', class_name) # Only set if not already provided by logging itself if getattr(record, 'qualname', None) is None: setattr(record, 'qualname', qualname) setattr(record, 'osthreadid', thread_native_id()) return True
[ "def", "filter", "(", "self", ",", "record", ":", "logging", ".", "LogRecord", ")", "->", "bool", ":", "(", "class_name", ",", "qualname", ",", "module_name", ")", "=", "self", ".", "caller_attributes", "(", "module_name", "=", "getattr", "(", "record", ",", "'module'", ")", ")", "# Only set if we got a useful value", "if", "module_name", ":", "setattr", "(", "record", ",", "'module'", ",", "module_name", ")", "# Only set if not already provided by logging itself", "if", "getattr", "(", "record", ",", "'class'", ",", "None", ")", "is", "None", ":", "setattr", "(", "record", ",", "'class'", ",", "class_name", ")", "# Only set if not already provided by logging itself", "if", "getattr", "(", "record", ",", "'qualname'", ",", "None", ")", "is", "None", ":", "setattr", "(", "record", ",", "'qualname'", ",", "qualname", ")", "setattr", "(", "record", ",", "'osthreadid'", ",", "thread_native_id", "(", ")", ")", "return", "True" ]
https://github.com/EDCD/EDMarketConnector/blob/d8b29e45b86f36ab3cf540ec1503b9170a8505de/EDMCLogging.py#L278-L314
tensorflow/tensor2tensor
2a33b152d7835af66a6d20afe7961751047e28dd
tensor2tensor/rl/trainer_model_based_params.py
python
rlmb_dummy_range
(rhp)
Dummy tuning grid just to get the variance.
Dummy tuning grid just to get the variance.
[ "Dummy", "tuning", "grid", "just", "to", "get", "the", "variance", "." ]
def rlmb_dummy_range(rhp): """Dummy tuning grid just to get the variance.""" rhp.set_float("model.moe_loss_coef", 0.01, 0.02)
[ "def", "rlmb_dummy_range", "(", "rhp", ")", ":", "rhp", ".", "set_float", "(", "\"model.moe_loss_coef\"", ",", "0.01", ",", "0.02", ")" ]
https://github.com/tensorflow/tensor2tensor/blob/2a33b152d7835af66a6d20afe7961751047e28dd/tensor2tensor/rl/trainer_model_based_params.py#L875-L877
danilobellini/audiolazy
dba0a278937909980ed40b976d866b8e97c35dee
examples/ode_to_joy.py
python
note2snd
(pitch, quarters)
return synth(freq, dur)
Creates an audio Stream object for a single note. Parameters ---------- pitch : Pitch note like ``"A4"``, as a string, or ``None`` for a rest. quarters : Duration in quarters (see ``quarter_dur``).
Creates an audio Stream object for a single note.
[ "Creates", "an", "audio", "Stream", "object", "for", "a", "single", "note", "." ]
def note2snd(pitch, quarters): """ Creates an audio Stream object for a single note. Parameters ---------- pitch : Pitch note like ``"A4"``, as a string, or ``None`` for a rest. quarters : Duration in quarters (see ``quarter_dur``). """ dur = quarters * quarter_dur if pitch is None: return zeros(dur) freq = str2freq(pitch) * Hz return synth(freq, dur)
[ "def", "note2snd", "(", "pitch", ",", "quarters", ")", ":", "dur", "=", "quarters", "*", "quarter_dur", "if", "pitch", "is", "None", ":", "return", "zeros", "(", "dur", ")", "freq", "=", "str2freq", "(", "pitch", ")", "*", "Hz", "return", "synth", "(", "freq", ",", "dur", ")" ]
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/examples/ode_to_joy.py#L49-L64
SteveDoyle2/pyNastran
eda651ac2d4883d95a34951f8a002ff94f642a1a
pyNastran/f06/f06_formatting.py
python
write_floats_13e
(vals: List[float])
return vals2
writes a series of Nastran formatted 13.6 floats
writes a series of Nastran formatted 13.6 floats
[ "writes", "a", "series", "of", "Nastran", "formatted", "13", ".", "6", "floats" ]
def write_floats_13e(vals: List[float]) -> List[str]: """writes a series of Nastran formatted 13.6 floats""" vals2 = [] for v in vals: v2 = '%13.6E' % v if v2 in (' 0.000000E+00', '-0.000000E+00'): v2 = ' 0.0' vals2.append(v2) return vals2
[ "def", "write_floats_13e", "(", "vals", ":", "List", "[", "float", "]", ")", "->", "List", "[", "str", "]", ":", "vals2", "=", "[", "]", "for", "v", "in", "vals", ":", "v2", "=", "'%13.6E'", "%", "v", "if", "v2", "in", "(", "' 0.000000E+00'", ",", "'-0.000000E+00'", ")", ":", "v2", "=", "' 0.0'", "vals2", ".", "append", "(", "v2", ")", "return", "vals2" ]
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/f06/f06_formatting.py#L57-L65
quay/quay
b7d325ed42827db9eda2d9f341cb5a6cdfd155a6
data/logs_model/logs_producer/util.py
python
logs_json_serializer
(logentry, sort_keys=False)
return json.dumps( logentry.to_dict(), cls=LogEntryJSONEncoder, ensure_ascii=True, sort_keys=sort_keys ).encode("ascii")
Serializes a LogEntry to json bytes.
Serializes a LogEntry to json bytes.
[ "Serializes", "a", "LogEntry", "to", "json", "bytes", "." ]
def logs_json_serializer(logentry, sort_keys=False): """ Serializes a LogEntry to json bytes. """ return json.dumps( logentry.to_dict(), cls=LogEntryJSONEncoder, ensure_ascii=True, sort_keys=sort_keys ).encode("ascii")
[ "def", "logs_json_serializer", "(", "logentry", ",", "sort_keys", "=", "False", ")", ":", "return", "json", ".", "dumps", "(", "logentry", ".", "to_dict", "(", ")", ",", "cls", "=", "LogEntryJSONEncoder", ",", "ensure_ascii", "=", "True", ",", "sort_keys", "=", "sort_keys", ")", ".", "encode", "(", "\"ascii\"", ")" ]
https://github.com/quay/quay/blob/b7d325ed42827db9eda2d9f341cb5a6cdfd155a6/data/logs_model/logs_producer/util.py#L17-L23
leo-editor/leo-editor
383d6776d135ef17d73d935a2f0ecb3ac0e99494
leo/scripts/leoFindScript.py
python
reChangeNext
( v, pos, findPat, changePat, bodyFlag, reFlags = None )
return v, mo, pos
reChangeNext: use re.search() to change text in a Leo outline. v the vnode to start the search. pos the position within the body text of v to start the search. findPat the search string. changePat the replacement string. bodyFlag true: change body text. false: change headline text. reFlags flags argument to re.search(). returns a tuple (v,pos) showing where the change occured. returns (None,0) if no further match in the outline was found. Note: if (v,pos) is a tuple returned previously from reChangeNext, reChangeNext(v,pos+len(findPat),findPat,changePat,bodyFlag) changes the next matching string.
reChangeNext: use re.search() to change text in a Leo outline. v the vnode to start the search. pos the position within the body text of v to start the search. findPat the search string. changePat the replacement string. bodyFlag true: change body text. false: change headline text. reFlags flags argument to re.search(). returns a tuple (v,pos) showing where the change occured. returns (None,0) if no further match in the outline was found.
[ "reChangeNext", ":", "use", "re", ".", "search", "()", "to", "change", "text", "in", "a", "Leo", "outline", ".", "v", "the", "vnode", "to", "start", "the", "search", ".", "pos", "the", "position", "within", "the", "body", "text", "of", "v", "to", "start", "the", "search", ".", "findPat", "the", "search", "string", ".", "changePat", "the", "replacement", "string", ".", "bodyFlag", "true", ":", "change", "body", "text", ".", "false", ":", "change", "headline", "text", ".", "reFlags", "flags", "argument", "to", "re", ".", "search", "()", ".", "returns", "a", "tuple", "(", "v", "pos", ")", "showing", "where", "the", "change", "occured", ".", "returns", "(", "None", "0", ")", "if", "no", "further", "match", "in", "the", "outline", "was", "found", "." ]
def reChangeNext ( v, pos, findPat, changePat, bodyFlag, reFlags = None ): """ reChangeNext: use re.search() to change text in a Leo outline. v the vnode to start the search. pos the position within the body text of v to start the search. findPat the search string. changePat the replacement string. bodyFlag true: change body text. false: change headline text. reFlags flags argument to re.search(). returns a tuple (v,pos) showing where the change occured. returns (None,0) if no further match in the outline was found. Note: if (v,pos) is a tuple returned previously from reChangeNext, reChangeNext(v,pos+len(findPat),findPat,changePat,bodyFlag) changes the next matching string. """ n = len(findPat) v, mo, pos = reFindNext(v, pos, findPat, bodyFlag, reFlags) if v == None: return None, None, 0 if bodyFlag: s = v.bodyString() print(s, findPat, changePat) # s[pos:pos+n] = changePat s = s[:pos] + changePat + s[pos+n:] v.setBodyStringOrPane(s) else: s = v.headString() # s[pos:pos+n] = changePat s = s[:pos] + changePat + s[pos+n:] v.setHeadStringOrHeadline(s) return v, mo, pos
[ "def", "reChangeNext", "(", "v", ",", "pos", ",", "findPat", ",", "changePat", ",", "bodyFlag", ",", "reFlags", "=", "None", ")", ":", "n", "=", "len", "(", "findPat", ")", "v", ",", "mo", ",", "pos", "=", "reFindNext", "(", "v", ",", "pos", ",", "findPat", ",", "bodyFlag", ",", "reFlags", ")", "if", "v", "==", "None", ":", "return", "None", ",", "None", ",", "0", "if", "bodyFlag", ":", "s", "=", "v", ".", "bodyString", "(", ")", "print", "(", "s", ",", "findPat", ",", "changePat", ")", "# s[pos:pos+n] = changePat", "s", "=", "s", "[", ":", "pos", "]", "+", "changePat", "+", "s", "[", "pos", "+", "n", ":", "]", "v", ".", "setBodyStringOrPane", "(", "s", ")", "else", ":", "s", "=", "v", ".", "headString", "(", ")", "# s[pos:pos+n] = changePat", "s", "=", "s", "[", ":", "pos", "]", "+", "changePat", "+", "s", "[", "pos", "+", "n", ":", "]", "v", ".", "setHeadStringOrHeadline", "(", "s", ")", "return", "v", ",", "mo", ",", "pos" ]
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/scripts/leoFindScript.py#L200-L233
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/tcl/tkinter/tix.py
python
HList.delete_siblings
(self, entry)
[]
def delete_siblings(self, entry): self.tk.call(self._w, 'delete', 'siblings', entry)
[ "def", "delete_siblings", "(", "self", ",", "entry", ")", ":", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'delete'", ",", "'siblings'", ",", "entry", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/tcl/tkinter/tix.py#L899-L900
cloudera/impyla
0c736af4cad2bade9b8e313badc08ec50e81c948
impala/_thrift_gen/fb303/FacebookService.py
python
setOption_result.__repr__
(self)
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
[]
def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
[ "def", "__repr__", "(", "self", ")", ":", "L", "=", "[", "'%s=%r'", "%", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "self", ".", "__dict__", ".", "items", "(", ")", "]", "return", "'%s(%s)'", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "', '", ".", "join", "(", "L", ")", ")" ]
https://github.com/cloudera/impyla/blob/0c736af4cad2bade9b8e313badc08ec50e81c948/impala/_thrift_gen/fb303/FacebookService.py#L1550-L1553
tobegit3hub/deep_image_model
8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e
java_predict_client/src/main/proto/tensorflow/python/framework/ops.py
python
Output.set_shape
(self, shape)
Updates the shape of this tensor. This method can be called multiple times, and will merge the given `shape` with the current shape of this tensor. It can be used to provide additional information about the shape of this tensor that cannot be inferred from the graph alone. For example, this can be used to provide additional information about the shapes of images: ```python _, image_data = tf.TFRecordReader(...).read(...) image = tf.image.decode_png(image_data, channels=3) # The height and width dimensions of `image` are data dependent, and # cannot be computed without executing the op. print(image.get_shape()) ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)]) # We know that each image in this dataset is 28 x 28 pixels. image.set_shape([28, 28, 3]) print(image.get_shape()) ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)]) ``` Args: shape: A `TensorShape` representing the shape of this tensor. Raises: ValueError: If `shape` is not compatible with the current shape of this tensor.
Updates the shape of this tensor.
[ "Updates", "the", "shape", "of", "this", "tensor", "." ]
def set_shape(self, shape): """Updates the shape of this tensor. This method can be called multiple times, and will merge the given `shape` with the current shape of this tensor. It can be used to provide additional information about the shape of this tensor that cannot be inferred from the graph alone. For example, this can be used to provide additional information about the shapes of images: ```python _, image_data = tf.TFRecordReader(...).read(...) image = tf.image.decode_png(image_data, channels=3) # The height and width dimensions of `image` are data dependent, and # cannot be computed without executing the op. print(image.get_shape()) ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)]) # We know that each image in this dataset is 28 x 28 pixels. image.set_shape([28, 28, 3]) print(image.get_shape()) ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)]) ``` Args: shape: A `TensorShape` representing the shape of this tensor. Raises: ValueError: If `shape` is not compatible with the current shape of this tensor. """ self._shape = self._shape.merge_with(shape)
[ "def", "set_shape", "(", "self", ",", "shape", ")", ":", "self", ".", "_shape", "=", "self", ".", "_shape", ".", "merge_with", "(", "shape", ")" ]
https://github.com/tobegit3hub/deep_image_model/blob/8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e/java_predict_client/src/main/proto/tensorflow/python/framework/ops.py#L387-L418
4shadoww/hakkuframework
409a11fc3819d251f86faa3473439f8c19066a21
lib/future/backports/email/message.py
python
Message.get_content_charset
(self, failobj=None)
return charset.lower()
Return the charset parameter of the Content-Type header. The returned string is always coerced to lower case. If there is no Content-Type header, or if that header has no charset parameter, failobj is returned.
Return the charset parameter of the Content-Type header.
[ "Return", "the", "charset", "parameter", "of", "the", "Content", "-", "Type", "header", "." ]
def get_content_charset(self, failobj=None): """Return the charset parameter of the Content-Type header. The returned string is always coerced to lower case. If there is no Content-Type header, or if that header has no charset parameter, failobj is returned. """ missing = object() charset = self.get_param('charset', missing) if charset is missing: return failobj if isinstance(charset, tuple): # RFC 2231 encoded, so decode it, and it better end up as ascii. pcharset = charset[0] or 'us-ascii' try: # LookupError will be raised if the charset isn't known to # Python. UnicodeError will be raised if the encoded text # contains a character not in the charset. as_bytes = charset[2].encode('raw-unicode-escape') charset = str(as_bytes, pcharset) except (LookupError, UnicodeError): charset = charset[2] # charset characters must be in us-ascii range try: charset.encode('us-ascii') except UnicodeError: return failobj # RFC 2046, $4.1.2 says charsets are not case sensitive return charset.lower()
[ "def", "get_content_charset", "(", "self", ",", "failobj", "=", "None", ")", ":", "missing", "=", "object", "(", ")", "charset", "=", "self", ".", "get_param", "(", "'charset'", ",", "missing", ")", "if", "charset", "is", "missing", ":", "return", "failobj", "if", "isinstance", "(", "charset", ",", "tuple", ")", ":", "# RFC 2231 encoded, so decode it, and it better end up as ascii.", "pcharset", "=", "charset", "[", "0", "]", "or", "'us-ascii'", "try", ":", "# LookupError will be raised if the charset isn't known to", "# Python. UnicodeError will be raised if the encoded text", "# contains a character not in the charset.", "as_bytes", "=", "charset", "[", "2", "]", ".", "encode", "(", "'raw-unicode-escape'", ")", "charset", "=", "str", "(", "as_bytes", ",", "pcharset", ")", "except", "(", "LookupError", ",", "UnicodeError", ")", ":", "charset", "=", "charset", "[", "2", "]", "# charset characters must be in us-ascii range", "try", ":", "charset", ".", "encode", "(", "'us-ascii'", ")", "except", "UnicodeError", ":", "return", "failobj", "# RFC 2046, $4.1.2 says charsets are not case sensitive", "return", "charset", ".", "lower", "(", ")" ]
https://github.com/4shadoww/hakkuframework/blob/409a11fc3819d251f86faa3473439f8c19066a21/lib/future/backports/email/message.py#L833-L861
pnprog/goreviewpartner
cbcc486cd4c51fb6fc3bc0a1eab61ff34298dadf
gomill/sgf_moves.py
python
set_initial_position
(sgf_game, board)
Add setup stones to an Sgf_game reflecting a board position. sgf_game -- Sgf_game board -- boards.Board Replaces any existing setup stones in the Sgf_game's root node.
Add setup stones to an Sgf_game reflecting a board position.
[ "Add", "setup", "stones", "to", "an", "Sgf_game", "reflecting", "a", "board", "position", "." ]
def set_initial_position(sgf_game, board): """Add setup stones to an Sgf_game reflecting a board position. sgf_game -- Sgf_game board -- boards.Board Replaces any existing setup stones in the Sgf_game's root node. """ stones = {'b' : set(), 'w' : set()} for (colour, point) in board.list_occupied_points(): stones[colour].add(point) sgf_game.get_root().set_setup_stones(stones['b'], stones['w'])
[ "def", "set_initial_position", "(", "sgf_game", ",", "board", ")", ":", "stones", "=", "{", "'b'", ":", "set", "(", ")", ",", "'w'", ":", "set", "(", ")", "}", "for", "(", "colour", ",", "point", ")", "in", "board", ".", "list_occupied_points", "(", ")", ":", "stones", "[", "colour", "]", ".", "add", "(", "point", ")", "sgf_game", ".", "get_root", "(", ")", ".", "set_setup_stones", "(", "stones", "[", "'b'", "]", ",", "stones", "[", "'w'", "]", ")" ]
https://github.com/pnprog/goreviewpartner/blob/cbcc486cd4c51fb6fc3bc0a1eab61ff34298dadf/gomill/sgf_moves.py#L60-L72
simonw/sqlite-utils
e0c476bc380744680c8b7675c24fb0e9f5ec6dcd
sqlite_utils/cli.py
python
dump
(path, load_extension)
Output a SQL dump of the schema and full contents of the database
Output a SQL dump of the schema and full contents of the database
[ "Output", "a", "SQL", "dump", "of", "the", "schema", "and", "full", "contents", "of", "the", "database" ]
def dump(path, load_extension): """Output a SQL dump of the schema and full contents of the database""" db = sqlite_utils.Database(path) _load_extensions(db, load_extension) for line in db.conn.iterdump(): click.echo(line)
[ "def", "dump", "(", "path", ",", "load_extension", ")", ":", "db", "=", "sqlite_utils", ".", "Database", "(", "path", ")", "_load_extensions", "(", "db", ",", "load_extension", ")", "for", "line", "in", "db", ".", "conn", ".", "iterdump", "(", ")", ":", "click", ".", "echo", "(", "line", ")" ]
https://github.com/simonw/sqlite-utils/blob/e0c476bc380744680c8b7675c24fb0e9f5ec6dcd/sqlite_utils/cli.py#L326-L331
Rapptz/RoboDanny
1fb95d76d1b7685e2e2ff950e11cddfc96efbfec
cogs/mod.py
python
Mod.tempban
(self, ctx, duration: time.FutureTime, member: MemberID, *, reason: ActionReason = None)
Temporarily bans a member for the specified duration. The duration can be a a short time form, e.g. 30d or a more human duration such as "until thursday at 3PM" or a more concrete time such as "2024-12-31". Note that times are in UTC. You can also ban from ID to ban regardless whether they're in the server or not. In order for this to work, the bot must have Ban Member permissions. To use this command you must have Ban Members permission.
Temporarily bans a member for the specified duration.
[ "Temporarily", "bans", "a", "member", "for", "the", "specified", "duration", "." ]
async def tempban(self, ctx, duration: time.FutureTime, member: MemberID, *, reason: ActionReason = None): """Temporarily bans a member for the specified duration. The duration can be a a short time form, e.g. 30d or a more human duration such as "until thursday at 3PM" or a more concrete time such as "2024-12-31". Note that times are in UTC. You can also ban from ID to ban regardless whether they're in the server or not. In order for this to work, the bot must have Ban Member permissions. To use this command you must have Ban Members permission. """ if reason is None: reason = f'Action done by {ctx.author} (ID: {ctx.author.id})' reminder = self.bot.get_cog('Reminder') if reminder is None: return await ctx.send('Sorry, this functionality is currently unavailable. Try again later?') until = f'until {time.format_dt(duration.dt, "F")}' heads_up_message = f'You have been banned from {ctx.guild.name} {until}. Reason: {reason}' try: await member.send(heads_up_message) except (AttributeError, discord.HTTPException): # best attempt, oh well. pass reason = safe_reason_append(reason, until) await ctx.guild.ban(member, reason=reason) timer = await reminder.create_timer(duration.dt, 'tempban', ctx.guild.id, ctx.author.id, member.id, connection=ctx.db, created=ctx.message.created_at) await ctx.send(f'Banned {member} for {time.format_relative(duration.dt)}.')
[ "async", "def", "tempban", "(", "self", ",", "ctx", ",", "duration", ":", "time", ".", "FutureTime", ",", "member", ":", "MemberID", ",", "*", ",", "reason", ":", "ActionReason", "=", "None", ")", ":", "if", "reason", "is", "None", ":", "reason", "=", "f'Action done by {ctx.author} (ID: {ctx.author.id})'", "reminder", "=", "self", ".", "bot", ".", "get_cog", "(", "'Reminder'", ")", "if", "reminder", "is", "None", ":", "return", "await", "ctx", ".", "send", "(", "'Sorry, this functionality is currently unavailable. Try again later?'", ")", "until", "=", "f'until {time.format_dt(duration.dt, \"F\")}'", "heads_up_message", "=", "f'You have been banned from {ctx.guild.name} {until}. Reason: {reason}'", "try", ":", "await", "member", ".", "send", "(", "heads_up_message", ")", "except", "(", "AttributeError", ",", "discord", ".", "HTTPException", ")", ":", "# best attempt, oh well.", "pass", "reason", "=", "safe_reason_append", "(", "reason", ",", "until", ")", "await", "ctx", ".", "guild", ".", "ban", "(", "member", ",", "reason", "=", "reason", ")", "timer", "=", "await", "reminder", ".", "create_timer", "(", "duration", ".", "dt", ",", "'tempban'", ",", "ctx", ".", "guild", ".", "id", ",", "ctx", ".", "author", ".", "id", ",", "member", ".", "id", ",", "connection", "=", "ctx", ".", "db", ",", "created", "=", "ctx", ".", "message", ".", "created_at", ")", "await", "ctx", ".", "send", "(", "f'Banned {member} for {time.format_relative(duration.dt)}.'", ")" ]
https://github.com/Rapptz/RoboDanny/blob/1fb95d76d1b7685e2e2ff950e11cddfc96efbfec/cogs/mod.py#L1020-L1060
dragonfly/dragonfly
a579b5eadf452e23b07d4caf27b402703b0012b7
dragonfly/distributions/discrete.py
python
Binomial.get_mean
(self)
return self.n*self.p
Returns mean
Returns mean
[ "Returns", "mean" ]
def get_mean(self): """ Returns mean """ return self.n*self.p
[ "def", "get_mean", "(", "self", ")", ":", "return", "self", ".", "n", "*", "self", ".", "p" ]
https://github.com/dragonfly/dragonfly/blob/a579b5eadf452e23b07d4caf27b402703b0012b7/dragonfly/distributions/discrete.py#L93-L95
HiKapok/tf.fashionAI
bc7d26c78e845df4eda0997494a5859cab1ec5de
preprocessing/preprocessing.py
python
rotate_all
(images, angles, x, y, interpolation="NEAREST")
Rotate image(s) by the passed angle(s) in radians. Args: images: A tensor of shape (num_images, num_rows, num_columns, num_channels) (NHWC), (num_rows, num_columns, num_channels) (HWC), or (num_rows, num_columns) (HW). angles: A scalar angle to rotate all images by, or (if images has rank 4) a vector of length num_images, with an angle for each image in the batch. interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR". Returns: Image(s) with the same type and shape as `images`, rotated by the given angle(s). Empty space due to the rotation will be filled with zeros. Raises: TypeError: If `image` is an invalid type.
Rotate image(s) by the passed angle(s) in radians. Args: images: A tensor of shape (num_images, num_rows, num_columns, num_channels) (NHWC), (num_rows, num_columns, num_channels) (HWC), or (num_rows, num_columns) (HW). angles: A scalar angle to rotate all images by, or (if images has rank 4) a vector of length num_images, with an angle for each image in the batch. interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR". Returns: Image(s) with the same type and shape as `images`, rotated by the given angle(s). Empty space due to the rotation will be filled with zeros. Raises: TypeError: If `image` is an invalid type.
[ "Rotate", "image", "(", "s", ")", "by", "the", "passed", "angle", "(", "s", ")", "in", "radians", ".", "Args", ":", "images", ":", "A", "tensor", "of", "shape", "(", "num_images", "num_rows", "num_columns", "num_channels", ")", "(", "NHWC", ")", "(", "num_rows", "num_columns", "num_channels", ")", "(", "HWC", ")", "or", "(", "num_rows", "num_columns", ")", "(", "HW", ")", ".", "angles", ":", "A", "scalar", "angle", "to", "rotate", "all", "images", "by", "or", "(", "if", "images", "has", "rank", "4", ")", "a", "vector", "of", "length", "num_images", "with", "an", "angle", "for", "each", "image", "in", "the", "batch", ".", "interpolation", ":", "Interpolation", "mode", ".", "Supported", "values", ":", "NEAREST", "BILINEAR", ".", "Returns", ":", "Image", "(", "s", ")", "with", "the", "same", "type", "and", "shape", "as", "images", "rotated", "by", "the", "given", "angle", "(", "s", ")", ".", "Empty", "space", "due", "to", "the", "rotation", "will", "be", "filled", "with", "zeros", ".", "Raises", ":", "TypeError", ":", "If", "image", "is", "an", "invalid", "type", "." ]
def rotate_all(images, angles, x, y, interpolation="NEAREST"): """Rotate image(s) by the passed angle(s) in radians. Args: images: A tensor of shape (num_images, num_rows, num_columns, num_channels) (NHWC), (num_rows, num_columns, num_channels) (HWC), or (num_rows, num_columns) (HW). angles: A scalar angle to rotate all images by, or (if images has rank 4) a vector of length num_images, with an angle for each image in the batch. interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR". Returns: Image(s) with the same type and shape as `images`, rotated by the given angle(s). Empty space due to the rotation will be filled with zeros. Raises: TypeError: If `image` is an invalid type. """ image_or_images = tf.convert_to_tensor(images, name="images") if len(image_or_images.get_shape()) == 2: images = image_or_images[None, :, :, None] elif len(image_or_images.get_shape()) == 3: images = image_or_images[None, :, :, :] elif len(image_or_images.get_shape()) == 4: images = image_or_images else: raise TypeError("Images should have rank between 2 and 4.") image_height = tf.cast(tf.shape(images)[1], tf.float32)[None] image_width = tf.cast(tf.shape(images)[2], tf.float32)[None] rotate_matrix = get_projective_transforms(angles, image_height, image_width, x, y) flaten_rotate_matrix = tf.squeeze(rotate_matrix) a0, a1, a2, b0, b1, b2 = flaten_rotate_matrix[0], \ flaten_rotate_matrix[1], \ flaten_rotate_matrix[2], \ flaten_rotate_matrix[3], \ flaten_rotate_matrix[4], \ flaten_rotate_matrix[5] normalizor = a1 * b0 - a0 * b1 + 1e-8 new_x = -(b1 * x - a1 * y - b1 * a2 + a1 * b2)/normalizor new_y = (b0 * x - a0 * y - a2 * b0 + a0 * b2)/normalizor #new_x, new_y = new_x/tf.cast(shape[1], tf.float32), new_y/tf.cast(shape[0], tf.float32) output = tf.contrib.image.transform(images, rotate_matrix, interpolation=interpolation) if len(image_or_images.get_shape()) == 2: return output[0, :, :, 0], new_x, new_y elif len(image_or_images.get_shape()) == 3: return output[0, :, :, :], new_x, new_y else: return output, new_x, new_y
[ "def", "rotate_all", "(", "images", ",", "angles", ",", "x", ",", "y", ",", "interpolation", "=", "\"NEAREST\"", ")", ":", "image_or_images", "=", "tf", ".", "convert_to_tensor", "(", "images", ",", "name", "=", "\"images\"", ")", "if", "len", "(", "image_or_images", ".", "get_shape", "(", ")", ")", "==", "2", ":", "images", "=", "image_or_images", "[", "None", ",", ":", ",", ":", ",", "None", "]", "elif", "len", "(", "image_or_images", ".", "get_shape", "(", ")", ")", "==", "3", ":", "images", "=", "image_or_images", "[", "None", ",", ":", ",", ":", ",", ":", "]", "elif", "len", "(", "image_or_images", ".", "get_shape", "(", ")", ")", "==", "4", ":", "images", "=", "image_or_images", "else", ":", "raise", "TypeError", "(", "\"Images should have rank between 2 and 4.\"", ")", "image_height", "=", "tf", ".", "cast", "(", "tf", ".", "shape", "(", "images", ")", "[", "1", "]", ",", "tf", ".", "float32", ")", "[", "None", "]", "image_width", "=", "tf", ".", "cast", "(", "tf", ".", "shape", "(", "images", ")", "[", "2", "]", ",", "tf", ".", "float32", ")", "[", "None", "]", "rotate_matrix", "=", "get_projective_transforms", "(", "angles", ",", "image_height", ",", "image_width", ",", "x", ",", "y", ")", "flaten_rotate_matrix", "=", "tf", ".", "squeeze", "(", "rotate_matrix", ")", "a0", ",", "a1", ",", "a2", ",", "b0", ",", "b1", ",", "b2", "=", "flaten_rotate_matrix", "[", "0", "]", ",", "flaten_rotate_matrix", "[", "1", "]", ",", "flaten_rotate_matrix", "[", "2", "]", ",", "flaten_rotate_matrix", "[", "3", "]", ",", "flaten_rotate_matrix", "[", "4", "]", ",", "flaten_rotate_matrix", "[", "5", "]", "normalizor", "=", "a1", "*", "b0", "-", "a0", "*", "b1", "+", "1e-8", "new_x", "=", "-", "(", "b1", "*", "x", "-", "a1", "*", "y", "-", "b1", "*", "a2", "+", "a1", "*", "b2", ")", "/", "normalizor", "new_y", "=", "(", "b0", "*", "x", "-", "a0", "*", "y", "-", "a2", "*", "b0", "+", "a0", "*", "b2", ")", "/", "normalizor", "#new_x, new_y = new_x/tf.cast(shape[1], tf.float32), new_y/tf.cast(shape[0], tf.float32)", "output", "=", "tf", ".", "contrib", ".", "image", ".", "transform", "(", "images", ",", "rotate_matrix", ",", "interpolation", "=", "interpolation", ")", "if", "len", "(", "image_or_images", ".", "get_shape", "(", ")", ")", "==", "2", ":", "return", "output", "[", "0", ",", ":", ",", ":", ",", "0", "]", ",", "new_x", ",", "new_y", "elif", "len", "(", "image_or_images", ".", "get_shape", "(", ")", ")", "==", "3", ":", "return", "output", "[", "0", ",", ":", ",", ":", ",", ":", "]", ",", "new_x", ",", "new_y", "else", ":", "return", "output", ",", "new_x", ",", "new_y" ]
https://github.com/HiKapok/tf.fashionAI/blob/bc7d26c78e845df4eda0997494a5859cab1ec5de/preprocessing/preprocessing.py#L673-L723
azavea/raster-vision
fc181a6f31f085affa1ee12f0204bdbc5a6bf85a
rastervision_pipeline/rastervision/pipeline/file_system/utils.py
python
file_to_json
(uri: str)
return json.loads(file_to_str(uri))
Return JSON dict based on file at uri.
Return JSON dict based on file at uri.
[ "Return", "JSON", "dict", "based", "on", "file", "at", "uri", "." ]
def file_to_json(uri: str) -> dict: """Return JSON dict based on file at uri.""" return json.loads(file_to_str(uri))
[ "def", "file_to_json", "(", "uri", ":", "str", ")", "->", "dict", ":", "return", "json", ".", "loads", "(", "file_to_str", "(", "uri", ")", ")" ]
https://github.com/azavea/raster-vision/blob/fc181a6f31f085affa1ee12f0204bdbc5a6bf85a/rastervision_pipeline/rastervision/pipeline/file_system/utils.py#L320-L322
rossant/galry
6201fa32fb5c9ef3cea700cc22caf52fb69ebe31
galry/pyplot.py
python
Figure.text
(self, *args, **kwargs)
Render text. Arguments: * text: a string or a list of strings * coordinates: a tuple with x, y coordinates of the text, or a list with coordinates for each string. * fontsize=24: the font size * color: the color of the text * letter_spacing: the letter spacing * interline=0.: the interline when there are several independent texts
Render text. Arguments: * text: a string or a list of strings * coordinates: a tuple with x, y coordinates of the text, or a list with coordinates for each string. * fontsize=24: the font size * color: the color of the text * letter_spacing: the letter spacing * interline=0.: the interline when there are several independent texts
[ "Render", "text", ".", "Arguments", ":", "*", "text", ":", "a", "string", "or", "a", "list", "of", "strings", "*", "coordinates", ":", "a", "tuple", "with", "x", "y", "coordinates", "of", "the", "text", "or", "a", "list", "with", "coordinates", "for", "each", "string", ".", "*", "fontsize", "=", "24", ":", "the", "font", "size", "*", "color", ":", "the", "color", "of", "the", "text", "*", "letter_spacing", ":", "the", "letter", "spacing", "*", "interline", "=", "0", ".", ":", "the", "interline", "when", "there", "are", "several", "independent", "texts" ]
def text(self, *args, **kwargs): """Render text. Arguments: * text: a string or a list of strings * coordinates: a tuple with x, y coordinates of the text, or a list with coordinates for each string. * fontsize=24: the font size * color: the color of the text * letter_spacing: the letter spacing * interline=0.: the interline when there are several independent texts """ self.add_visual(vs.TextVisual, *args, **kwargs)
[ "def", "text", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "add_visual", "(", "vs", ".", "TextVisual", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/rossant/galry/blob/6201fa32fb5c9ef3cea700cc22caf52fb69ebe31/galry/pyplot.py#L331-L346
wistbean/learn_python3_spider
73c873f4845f4385f097e5057407d03dd37a117b
stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/util.py
python
EventMixin.add
(self, event, subscriber, append=True)
Add a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be added (and called when the event is published). :param append: Whether to append or prepend the subscriber to an existing subscriber list for the event.
Add a subscriber for an event.
[ "Add", "a", "subscriber", "for", "an", "event", "." ]
def add(self, event, subscriber, append=True): """ Add a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be added (and called when the event is published). :param append: Whether to append or prepend the subscriber to an existing subscriber list for the event. """ subs = self._subscribers if event not in subs: subs[event] = deque([subscriber]) else: sq = subs[event] if append: sq.append(subscriber) else: sq.appendleft(subscriber)
[ "def", "add", "(", "self", ",", "event", ",", "subscriber", ",", "append", "=", "True", ")", ":", "subs", "=", "self", ".", "_subscribers", "if", "event", "not", "in", "subs", ":", "subs", "[", "event", "]", "=", "deque", "(", "[", "subscriber", "]", ")", "else", ":", "sq", "=", "subs", "[", "event", "]", "if", "append", ":", "sq", ".", "append", "(", "subscriber", ")", "else", ":", "sq", ".", "appendleft", "(", "subscriber", ")" ]
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/util.py#L988-L1006
Yubico/python-fido2
58471d4af1c09cc9fe316771d6203b4844cbc668
fido2/ctap2/base.py
python
AuthenticatorData.create
(cls, rp_id_hash, flags, counter, credential_data=b"", extensions=None)
return cls( rp_id_hash + struct.pack(">BI", flags, counter) + credential_data + (cbor.encode(extensions) if extensions is not None else b"") )
Create an AuthenticatorData instance. :param rp_id_hash: SHA256 hash of the RP ID. :param flags: Flags of the AuthenticatorData. :param counter: Signature counter of the authenticator data. :param credential_data: Authenticated credential data (only if attested credential data flag is set). :param extensions: Authenticator extensions (only if ED flag is set). :return: The authenticator data.
Create an AuthenticatorData instance.
[ "Create", "an", "AuthenticatorData", "instance", "." ]
def create(cls, rp_id_hash, flags, counter, credential_data=b"", extensions=None): """Create an AuthenticatorData instance. :param rp_id_hash: SHA256 hash of the RP ID. :param flags: Flags of the AuthenticatorData. :param counter: Signature counter of the authenticator data. :param credential_data: Authenticated credential data (only if attested credential data flag is set). :param extensions: Authenticator extensions (only if ED flag is set). :return: The authenticator data. """ return cls( rp_id_hash + struct.pack(">BI", flags, counter) + credential_data + (cbor.encode(extensions) if extensions is not None else b"") )
[ "def", "create", "(", "cls", ",", "rp_id_hash", ",", "flags", ",", "counter", ",", "credential_data", "=", "b\"\"", ",", "extensions", "=", "None", ")", ":", "return", "cls", "(", "rp_id_hash", "+", "struct", ".", "pack", "(", "\">BI\"", ",", "flags", ",", "counter", ")", "+", "credential_data", "+", "(", "cbor", ".", "encode", "(", "extensions", ")", "if", "extensions", "is", "not", "None", "else", "b\"\"", ")", ")" ]
https://github.com/Yubico/python-fido2/blob/58471d4af1c09cc9fe316771d6203b4844cbc668/fido2/ctap2/base.py#L313-L329
nosmokingbandit/Watcher3
0217e75158b563bdefc8e01c3be7620008cf3977
lib/infi/pkg_resources/_vendor/pyparsing.py
python
pyparsing_common.convertToDatetime
(fmt="%Y-%m-%dT%H:%M:%S.%f")
return cvt_fn
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
[ "Helper", "to", "create", "a", "parse", "action", "for", "converting", "parsed", "datetime", "string", "to", "Python", "datetime", ".", "datetime" ]
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): """ Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] """ def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt) except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn
[ "def", "convertToDatetime", "(", "fmt", "=", "\"%Y-%m-%dT%H:%M:%S.%f\"", ")", ":", "def", "cvt_fn", "(", "s", ",", "l", ",", "t", ")", ":", "try", ":", "return", "datetime", ".", "strptime", "(", "t", "[", "0", "]", ",", "fmt", ")", "except", "ValueError", "as", "ve", ":", "raise", "ParseException", "(", "s", ",", "l", ",", "str", "(", "ve", ")", ")", "return", "cvt_fn" ]
https://github.com/nosmokingbandit/Watcher3/blob/0217e75158b563bdefc8e01c3be7620008cf3977/lib/infi/pkg_resources/_vendor/pyparsing.py#L5569-L5588
getsentry/sentry
83b1f25aac3e08075e0e2495bc29efaf35aca18a
src/sentry/api/endpoints/project_reprocessing.py
python
ProjectReprocessingEndpoint.post
(self, request: Request, project)
return Response(status=200)
Triggers the reprocessing process as a task
Triggers the reprocessing process as a task
[ "Triggers", "the", "reprocessing", "process", "as", "a", "task" ]
def post(self, request: Request, project) -> Response: """ Triggers the reprocessing process as a task """ trigger_reprocessing(project) return Response(status=200)
[ "def", "post", "(", "self", ",", "request", ":", "Request", ",", "project", ")", "->", "Response", ":", "trigger_reprocessing", "(", "project", ")", "return", "Response", "(", "status", "=", "200", ")" ]
https://github.com/getsentry/sentry/blob/83b1f25aac3e08075e0e2495bc29efaf35aca18a/src/sentry/api/endpoints/project_reprocessing.py#L11-L16
imcaspar/gpt2-ml
f6286b16cbbee6dddbe1ba557fffb47eaf998cd1
train/modeling.py
python
GroverModel.lm_loss
(self)
return loss
:return: stuff
:return: stuff
[ ":", "return", ":", "stuff" ]
def lm_loss(self): """ :return: stuff """ target_ids_flat = tf.reshape(self.target_ids, [-1]) # 1 if it's valid and 0 otherwise. label_weights = tf.cast(tf.not_equal(target_ids_flat, self.pad_token_id), dtype=self.logits_flat.dtype) # [batch_size * seq_length, vocab_size] one_hot_labels = tf.one_hot(target_ids_flat, depth=self.config.vocab_size, dtype=self.logits_flat.dtype) # [batch_size * seq_length, vocab_size] logprobs_flat = tf.nn.log_softmax(self.logits_flat, axis=-1) per_example_loss = -tf.reduce_sum(logprobs_flat * one_hot_labels, axis=[-1]) # per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_flat, labels=target_ids_flat) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return loss
[ "def", "lm_loss", "(", "self", ")", ":", "target_ids_flat", "=", "tf", ".", "reshape", "(", "self", ".", "target_ids", ",", "[", "-", "1", "]", ")", "# 1 if it's valid and 0 otherwise.", "label_weights", "=", "tf", ".", "cast", "(", "tf", ".", "not_equal", "(", "target_ids_flat", ",", "self", ".", "pad_token_id", ")", ",", "dtype", "=", "self", ".", "logits_flat", ".", "dtype", ")", "# [batch_size * seq_length, vocab_size]", "one_hot_labels", "=", "tf", ".", "one_hot", "(", "target_ids_flat", ",", "depth", "=", "self", ".", "config", ".", "vocab_size", ",", "dtype", "=", "self", ".", "logits_flat", ".", "dtype", ")", "# [batch_size * seq_length, vocab_size]", "logprobs_flat", "=", "tf", ".", "nn", ".", "log_softmax", "(", "self", ".", "logits_flat", ",", "axis", "=", "-", "1", ")", "per_example_loss", "=", "-", "tf", ".", "reduce_sum", "(", "logprobs_flat", "*", "one_hot_labels", ",", "axis", "=", "[", "-", "1", "]", ")", "# per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_flat, labels=target_ids_flat)", "numerator", "=", "tf", ".", "reduce_sum", "(", "label_weights", "*", "per_example_loss", ")", "denominator", "=", "tf", ".", "reduce_sum", "(", "label_weights", ")", "+", "1e-5", "loss", "=", "numerator", "/", "denominator", "return", "loss" ]
https://github.com/imcaspar/gpt2-ml/blob/f6286b16cbbee6dddbe1ba557fffb47eaf998cd1/train/modeling.py#L524-L548
devassistant/devassistant
2dbfeaa666a64127263664d18969c55d19ecc83e
devassistant/gui/gui_helper.py
python
GuiHelper.checkbutton_with_label
(self, description)
return align
The function creates a checkbutton with label
The function creates a checkbutton with label
[ "The", "function", "creates", "a", "checkbutton", "with", "label" ]
def checkbutton_with_label(self, description): """ The function creates a checkbutton with label """ act_btn = Gtk.CheckButton(description) align = self.create_alignment() act_btn.add(align) return align
[ "def", "checkbutton_with_label", "(", "self", ",", "description", ")", ":", "act_btn", "=", "Gtk", ".", "CheckButton", "(", "description", ")", "align", "=", "self", ".", "create_alignment", "(", ")", "act_btn", ".", "add", "(", "align", ")", "return", "align" ]
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/gui/gui_helper.py#L97-L104
googleads/googleads-python-lib
b3b42a6deedbe6eaa1c9b30183a9eae3f9e9a7ee
examples/ad_manager/v202105/content_service/get_all_content.py
python
main
(client)
[]
def main(client): # Initialize appropriate service. content_service = client.GetService('ContentService', version='v202105') # Create a statement to select content. statement = ad_manager.StatementBuilder(version='v202105') # Retrieve a small amount of content at a time, paging # through until all content have been retrieved. while True: response = content_service.getContentByStatement(statement.ToStatement()) if 'results' in response and len(response['results']): for content in response['results']: # Print out some information for each content. content_description = ('Content with ID "%d" and name "%s"' % (content['id'], content['name'].encode('utf-8'))) if content['contentBundleIds']: content_description += (' belonging to bundle IDs %s' % content['contentBundleIds']) content_description += ' was found.' print(content_description) statement.offset += statement.limit else: break print('\nNumber of results found: %s' % response['totalResultSetSize'])
[ "def", "main", "(", "client", ")", ":", "# Initialize appropriate service.", "content_service", "=", "client", ".", "GetService", "(", "'ContentService'", ",", "version", "=", "'v202105'", ")", "# Create a statement to select content.", "statement", "=", "ad_manager", ".", "StatementBuilder", "(", "version", "=", "'v202105'", ")", "# Retrieve a small amount of content at a time, paging", "# through until all content have been retrieved.", "while", "True", ":", "response", "=", "content_service", ".", "getContentByStatement", "(", "statement", ".", "ToStatement", "(", ")", ")", "if", "'results'", "in", "response", "and", "len", "(", "response", "[", "'results'", "]", ")", ":", "for", "content", "in", "response", "[", "'results'", "]", ":", "# Print out some information for each content.", "content_description", "=", "(", "'Content with ID \"%d\" and name \"%s\"'", "%", "(", "content", "[", "'id'", "]", ",", "content", "[", "'name'", "]", ".", "encode", "(", "'utf-8'", ")", ")", ")", "if", "content", "[", "'contentBundleIds'", "]", ":", "content_description", "+=", "(", "' belonging to bundle IDs %s'", "%", "content", "[", "'contentBundleIds'", "]", ")", "content_description", "+=", "' was found.'", "print", "(", "content_description", ")", "statement", ".", "offset", "+=", "statement", ".", "limit", "else", ":", "break", "print", "(", "'\\nNumber of results found: %s'", "%", "response", "[", "'totalResultSetSize'", "]", ")" ]
https://github.com/googleads/googleads-python-lib/blob/b3b42a6deedbe6eaa1c9b30183a9eae3f9e9a7ee/examples/ad_manager/v202105/content_service/get_all_content.py#L23-L48
PaddlePaddle/models
511e2e282960ed4c7440c3f1d1e62017acb90e11
tutorials/mobilenetv3_prod/Step1-5/mobilenetv3_ref/utils.py
python
setup_for_distributed
(is_master)
This function disables printing when not in master process
This function disables printing when not in master process
[ "This", "function", "disables", "printing", "when", "not", "in", "master", "process" ]
def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print
[ "def", "setup_for_distributed", "(", "is_master", ")", ":", "import", "builtins", "as", "__builtin__", "builtin_print", "=", "__builtin__", ".", "print", "def", "print", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "force", "=", "kwargs", ".", "pop", "(", "'force'", ",", "False", ")", "if", "is_master", "or", "force", ":", "builtin_print", "(", "*", "args", ",", "*", "*", "kwargs", ")", "__builtin__", ".", "print", "=", "print" ]
https://github.com/PaddlePaddle/models/blob/511e2e282960ed4c7440c3f1d1e62017acb90e11/tutorials/mobilenetv3_prod/Step1-5/mobilenetv3_ref/utils.py#L187-L199