repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
CitrineInformatics/python-citrination-client
citrination_client/views/search_template/client.py
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/views/search_template/client.py#L17-L37
def get_available_columns(self, dataset_ids): """ Retrieves the set of columns from the combination of dataset ids given :param dataset_ids: The id of the dataset to retrieve columns from :type dataset_ids: list of int :return: A list of column names from the dataset ids given. :rtype: list of str """ if not isinstance(dataset_ids, list): dataset_ids = [dataset_ids] data = { "dataset_ids": dataset_ids } failure_message = "Failed to get available columns in dataset(s) {}".format(dataset_ids) return self._get_success_json(self._post_json( 'v1/datasets/get-available-columns', data, failure_message=failure_message))['data']
[ "def", "get_available_columns", "(", "self", ",", "dataset_ids", ")", ":", "if", "not", "isinstance", "(", "dataset_ids", ",", "list", ")", ":", "dataset_ids", "=", "[", "dataset_ids", "]", "data", "=", "{", "\"dataset_ids\"", ":", "dataset_ids", "}", "failure_message", "=", "\"Failed to get available columns in dataset(s) {}\"", ".", "format", "(", "dataset_ids", ")", "return", "self", ".", "_get_success_json", "(", "self", ".", "_post_json", "(", "'v1/datasets/get-available-columns'", ",", "data", ",", "failure_message", "=", "failure_message", ")", ")", "[", "'data'", "]" ]
Retrieves the set of columns from the combination of dataset ids given :param dataset_ids: The id of the dataset to retrieve columns from :type dataset_ids: list of int :return: A list of column names from the dataset ids given. :rtype: list of str
[ "Retrieves", "the", "set", "of", "columns", "from", "the", "combination", "of", "dataset", "ids", "given" ]
python
valid
markuskiller/textblob-de
textblob_de/blob.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L650-L659
def words(self): """Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens. """ return WordList( word_tokenize(self.raw, self.tokenizer, include_punc=False))
[ "def", "words", "(", "self", ")", ":", "return", "WordList", "(", "word_tokenize", "(", "self", ".", "raw", ",", "self", ".", "tokenizer", ",", "include_punc", "=", "False", ")", ")" ]
Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens.
[ "Return", "a", "list", "of", "word", "tokens", ".", "This", "excludes", "punctuation", "characters", ".", "If", "you", "want", "to", "include", "punctuation", "characters", "access", "the", "tokens", "property", "." ]
python
train
CI-WATER/gsshapy
gsshapy/orm/loc.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/loc.py#L61-L81
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Generic Output Location Read from File Method """ # Assign file extension attribute to file object self.fileExtension = extension # Open file and parse into a data structure with open(path, 'r') as f: for line in f: sline = line.strip().split() if len(sline) == 1: self.numLocations = sline[0] else: # Create GSSHAPY OutputLocation object location = OutputLocation(linkOrCellI=sline[0], nodeOrCellJ=sline[1]) # Associate OutputLocation with OutputLocationFile location.outputLocationFile = self
[ "def", "_read", "(", "self", ",", "directory", ",", "filename", ",", "session", ",", "path", ",", "name", ",", "extension", ",", "spatial", ",", "spatialReferenceID", ",", "replaceParamFile", ")", ":", "# Assign file extension attribute to file object", "self", ".", "fileExtension", "=", "extension", "# Open file and parse into a data structure", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "sline", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "len", "(", "sline", ")", "==", "1", ":", "self", ".", "numLocations", "=", "sline", "[", "0", "]", "else", ":", "# Create GSSHAPY OutputLocation object", "location", "=", "OutputLocation", "(", "linkOrCellI", "=", "sline", "[", "0", "]", ",", "nodeOrCellJ", "=", "sline", "[", "1", "]", ")", "# Associate OutputLocation with OutputLocationFile", "location", ".", "outputLocationFile", "=", "self" ]
Generic Output Location Read from File Method
[ "Generic", "Output", "Location", "Read", "from", "File", "Method" ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/sip/domain/auth_types/auth_registrations_mapping/auth_registrations_credential_list_mapping.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/sip/domain/auth_types/auth_registrations_mapping/auth_registrations_credential_list_mapping.py#L142-L156
def get(self, sid): """ Constructs a AuthRegistrationsCredentialListMappingContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext """ return AuthRegistrationsCredentialListMappingContext( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "AuthRegistrationsCredentialListMappingContext", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "domain_sid", "=", "self", ".", "_solution", "[", "'domain_sid'", "]", ",", "sid", "=", "sid", ",", ")" ]
Constructs a AuthRegistrationsCredentialListMappingContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingContext
[ "Constructs", "a", "AuthRegistrationsCredentialListMappingContext" ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L2000-L2006
def connect_to_database_odbc_access(self, dsn: str, autocommit: bool = True) -> None: """Connects to an Access database via ODBC, with the DSN prespecified.""" self.connect(engine=ENGINE_ACCESS, interface=INTERFACE_ODBC, dsn=dsn, autocommit=autocommit)
[ "def", "connect_to_database_odbc_access", "(", "self", ",", "dsn", ":", "str", ",", "autocommit", ":", "bool", "=", "True", ")", "->", "None", ":", "self", ".", "connect", "(", "engine", "=", "ENGINE_ACCESS", ",", "interface", "=", "INTERFACE_ODBC", ",", "dsn", "=", "dsn", ",", "autocommit", "=", "autocommit", ")" ]
Connects to an Access database via ODBC, with the DSN prespecified.
[ "Connects", "to", "an", "Access", "database", "via", "ODBC", "with", "the", "DSN", "prespecified", "." ]
python
train
tensorpack/tensorpack
tensorpack/tfutils/varmanip.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/varmanip.py#L101-L116
def update(self, prms): """ Args: prms(dict): dict of {variable name: value} Any name in prms must be in the graph and in vars_to_update. """ with self.sess.as_default(): fetches = [] feeds = {} for name, value in six.iteritems(prms): assert name in self.name_map var = self.name_map[name] fetches.append(var.initializer) # This is the implementation of `var.load` feeds[var.initializer.inputs[1]] = SessionUpdate.relaxed_value_for_var(value, var) self.sess.run(fetches, feed_dict=feeds)
[ "def", "update", "(", "self", ",", "prms", ")", ":", "with", "self", ".", "sess", ".", "as_default", "(", ")", ":", "fetches", "=", "[", "]", "feeds", "=", "{", "}", "for", "name", ",", "value", "in", "six", ".", "iteritems", "(", "prms", ")", ":", "assert", "name", "in", "self", ".", "name_map", "var", "=", "self", ".", "name_map", "[", "name", "]", "fetches", ".", "append", "(", "var", ".", "initializer", ")", "# This is the implementation of `var.load`", "feeds", "[", "var", ".", "initializer", ".", "inputs", "[", "1", "]", "]", "=", "SessionUpdate", ".", "relaxed_value_for_var", "(", "value", ",", "var", ")", "self", ".", "sess", ".", "run", "(", "fetches", ",", "feed_dict", "=", "feeds", ")" ]
Args: prms(dict): dict of {variable name: value} Any name in prms must be in the graph and in vars_to_update.
[ "Args", ":", "prms", "(", "dict", ")", ":", "dict", "of", "{", "variable", "name", ":", "value", "}", "Any", "name", "in", "prms", "must", "be", "in", "the", "graph", "and", "in", "vars_to_update", "." ]
python
train
ageitgey/face_recognition
face_recognition/api.py
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L32-L39
def _rect_to_css(rect): """ Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order :param rect: a dlib 'rect' object :return: a plain tuple representation of the rect in (top, right, bottom, left) order """ return rect.top(), rect.right(), rect.bottom(), rect.left()
[ "def", "_rect_to_css", "(", "rect", ")", ":", "return", "rect", ".", "top", "(", ")", ",", "rect", ".", "right", "(", ")", ",", "rect", ".", "bottom", "(", ")", ",", "rect", ".", "left", "(", ")" ]
Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order :param rect: a dlib 'rect' object :return: a plain tuple representation of the rect in (top, right, bottom, left) order
[ "Convert", "a", "dlib", "rect", "object", "to", "a", "plain", "tuple", "in", "(", "top", "right", "bottom", "left", ")", "order" ]
python
train
churchill-lab/emase
emase/AlignmentPropertyMatrix.py
https://github.com/churchill-lab/emase/blob/ae3c6955bb175c1dec88dbf9fac1a7dcc16f4449/emase/AlignmentPropertyMatrix.py#L388-L404
def print_read(self, rid): """ Prints nonzero rows of the read wanted """ if self.rname is not None: print self.rname[rid] print '--' r = self.get_read_data(rid) aligned_loci = np.unique(r.nonzero()[1]) for locus in aligned_loci: nzvec = r[:, locus].todense().transpose()[0].A.flatten() if self.lname is not None: print self.lname[locus], else: print locus, print nzvec
[ "def", "print_read", "(", "self", ",", "rid", ")", ":", "if", "self", ".", "rname", "is", "not", "None", ":", "print", "self", ".", "rname", "[", "rid", "]", "print", "'--'", "r", "=", "self", ".", "get_read_data", "(", "rid", ")", "aligned_loci", "=", "np", ".", "unique", "(", "r", ".", "nonzero", "(", ")", "[", "1", "]", ")", "for", "locus", "in", "aligned_loci", ":", "nzvec", "=", "r", "[", ":", ",", "locus", "]", ".", "todense", "(", ")", ".", "transpose", "(", ")", "[", "0", "]", ".", "A", ".", "flatten", "(", ")", "if", "self", ".", "lname", "is", "not", "None", ":", "print", "self", ".", "lname", "[", "locus", "]", ",", "else", ":", "print", "locus", ",", "print", "nzvec" ]
Prints nonzero rows of the read wanted
[ "Prints", "nonzero", "rows", "of", "the", "read", "wanted" ]
python
valid
hannorein/rebound
rebound/simulationarchive.py
https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulationarchive.py#L154-L172
def _getSnapshotIndex(self, t): """ Return the index for the snapshot just before t """ if t>self.tmax or t<self.tmin: raise ValueError("Requested time outside of baseline stored in binary file.") # Bisection method l = 0 r = len(self) while True: bi = l+(r-l)//2 if self.t[bi]>t: r = bi else: l = bi if r-1<=l: bi = l break return bi, self.t[bi]
[ "def", "_getSnapshotIndex", "(", "self", ",", "t", ")", ":", "if", "t", ">", "self", ".", "tmax", "or", "t", "<", "self", ".", "tmin", ":", "raise", "ValueError", "(", "\"Requested time outside of baseline stored in binary file.\"", ")", "# Bisection method", "l", "=", "0", "r", "=", "len", "(", "self", ")", "while", "True", ":", "bi", "=", "l", "+", "(", "r", "-", "l", ")", "//", "2", "if", "self", ".", "t", "[", "bi", "]", ">", "t", ":", "r", "=", "bi", "else", ":", "l", "=", "bi", "if", "r", "-", "1", "<=", "l", ":", "bi", "=", "l", "break", "return", "bi", ",", "self", ".", "t", "[", "bi", "]" ]
Return the index for the snapshot just before t
[ "Return", "the", "index", "for", "the", "snapshot", "just", "before", "t" ]
python
train
ssalentin/plip
plip/modules/supplemental.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/supplemental.py#L151-L189
def cluster_doubles(double_list): """Given a list of doubles, they are clustered if they share one element :param double_list: list of doubles :returns : list of clusters (tuples) """ location = {} # hashtable of which cluster each element is in clusters = [] # Go through each double for t in double_list: a, b = t[0], t[1] # If they both are already in different clusters, merge the clusters if a in location and b in location: if location[a] != location[b]: if location[a] < location[b]: clusters[location[a]] = clusters[location[a]].union(clusters[location[b]]) # Merge clusters clusters = clusters[:location[b]] + clusters[location[b]+1:] else: clusters[location[b]] = clusters[location[b]].union(clusters[location[a]]) # Merge clusters clusters = clusters[:location[a]] + clusters[location[a]+1:] # Rebuild index of locations for each element as they have changed now location = {} for i, cluster in enumerate(clusters): for c in cluster: location[c] = i else: # If a is already in a cluster, add b to that cluster if a in location: clusters[location[a]].add(b) location[b] = location[a] # If b is already in a cluster, add a to that cluster if b in location: clusters[location[b]].add(a) location[a] = location[b] # If neither a nor b is in any cluster, create a new one with a and b if not (b in location and a in location): clusters.append(set(t)) location[a] = len(clusters) - 1 location[b] = len(clusters) - 1 return map(tuple, clusters)
[ "def", "cluster_doubles", "(", "double_list", ")", ":", "location", "=", "{", "}", "# hashtable of which cluster each element is in", "clusters", "=", "[", "]", "# Go through each double", "for", "t", "in", "double_list", ":", "a", ",", "b", "=", "t", "[", "0", "]", ",", "t", "[", "1", "]", "# If they both are already in different clusters, merge the clusters", "if", "a", "in", "location", "and", "b", "in", "location", ":", "if", "location", "[", "a", "]", "!=", "location", "[", "b", "]", ":", "if", "location", "[", "a", "]", "<", "location", "[", "b", "]", ":", "clusters", "[", "location", "[", "a", "]", "]", "=", "clusters", "[", "location", "[", "a", "]", "]", ".", "union", "(", "clusters", "[", "location", "[", "b", "]", "]", ")", "# Merge clusters", "clusters", "=", "clusters", "[", ":", "location", "[", "b", "]", "]", "+", "clusters", "[", "location", "[", "b", "]", "+", "1", ":", "]", "else", ":", "clusters", "[", "location", "[", "b", "]", "]", "=", "clusters", "[", "location", "[", "b", "]", "]", ".", "union", "(", "clusters", "[", "location", "[", "a", "]", "]", ")", "# Merge clusters", "clusters", "=", "clusters", "[", ":", "location", "[", "a", "]", "]", "+", "clusters", "[", "location", "[", "a", "]", "+", "1", ":", "]", "# Rebuild index of locations for each element as they have changed now", "location", "=", "{", "}", "for", "i", ",", "cluster", "in", "enumerate", "(", "clusters", ")", ":", "for", "c", "in", "cluster", ":", "location", "[", "c", "]", "=", "i", "else", ":", "# If a is already in a cluster, add b to that cluster", "if", "a", "in", "location", ":", "clusters", "[", "location", "[", "a", "]", "]", ".", "add", "(", "b", ")", "location", "[", "b", "]", "=", "location", "[", "a", "]", "# If b is already in a cluster, add a to that cluster", "if", "b", "in", "location", ":", "clusters", "[", "location", "[", "b", "]", "]", ".", "add", "(", "a", ")", "location", "[", "a", "]", "=", "location", "[", "b", "]", "# If neither a nor b is in any cluster, create a new one with a and b", "if", "not", "(", "b", "in", "location", "and", "a", "in", "location", ")", ":", "clusters", ".", "append", "(", "set", "(", "t", ")", ")", "location", "[", "a", "]", "=", "len", "(", "clusters", ")", "-", "1", "location", "[", "b", "]", "=", "len", "(", "clusters", ")", "-", "1", "return", "map", "(", "tuple", ",", "clusters", ")" ]
Given a list of doubles, they are clustered if they share one element :param double_list: list of doubles :returns : list of clusters (tuples)
[ "Given", "a", "list", "of", "doubles", "they", "are", "clustered", "if", "they", "share", "one", "element", ":", "param", "double_list", ":", "list", "of", "doubles", ":", "returns", ":", "list", "of", "clusters", "(", "tuples", ")" ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/pseudos.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/pseudos.py#L1808-L1824
def sorted(self, attrname, reverse=False): """ Sort the table according to the value of attribute attrname. Return: New class:`PseudoTable` object """ attrs = [] for i, pseudo in self: try: a = getattr(pseudo, attrname) except AttributeError: a = np.inf attrs.append((i, a)) # Sort attrs, and build new table with sorted pseudos. return self.__class__([self[a[0]] for a in sorted(attrs, key=lambda t: t[1], reverse=reverse)])
[ "def", "sorted", "(", "self", ",", "attrname", ",", "reverse", "=", "False", ")", ":", "attrs", "=", "[", "]", "for", "i", ",", "pseudo", "in", "self", ":", "try", ":", "a", "=", "getattr", "(", "pseudo", ",", "attrname", ")", "except", "AttributeError", ":", "a", "=", "np", ".", "inf", "attrs", ".", "append", "(", "(", "i", ",", "a", ")", ")", "# Sort attrs, and build new table with sorted pseudos.", "return", "self", ".", "__class__", "(", "[", "self", "[", "a", "[", "0", "]", "]", "for", "a", "in", "sorted", "(", "attrs", ",", "key", "=", "lambda", "t", ":", "t", "[", "1", "]", ",", "reverse", "=", "reverse", ")", "]", ")" ]
Sort the table according to the value of attribute attrname. Return: New class:`PseudoTable` object
[ "Sort", "the", "table", "according", "to", "the", "value", "of", "attribute", "attrname", "." ]
python
train
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L1689-L1735
def open_file(infile, verbose=True): """ Open file and return a list of the file's lines. Try to use utf-8 encoding, and if that fails use Latin-1. Parameters ---------- infile : str full path to file Returns ---------- data: list all lines in the file """ try: with codecs.open(infile, "r", "utf-8") as f: lines = list(f.readlines()) # file might not exist except FileNotFoundError: if verbose: print( '-W- You are trying to open a file: {} that does not exist'.format(infile)) return [] # encoding might be wrong except UnicodeDecodeError: try: with codecs.open(infile, "r", "Latin-1") as f: print( '-I- Using less strict decoding for {}, output may have formatting errors'.format(infile)) lines = list(f.readlines()) # if file exists, and encoding is correct, who knows what the problem is except Exception as ex: print("-W- ", type(ex), ex) return [] except Exception as ex: print("-W- ", type(ex), ex) return [] # don't leave a blank line at the end i = 0 while i < 10: if not len(lines[-1].strip("\n").strip("\t")): lines = lines[:-1] i += 1 else: i = 10 return lines
[ "def", "open_file", "(", "infile", ",", "verbose", "=", "True", ")", ":", "try", ":", "with", "codecs", ".", "open", "(", "infile", ",", "\"r\"", ",", "\"utf-8\"", ")", "as", "f", ":", "lines", "=", "list", "(", "f", ".", "readlines", "(", ")", ")", "# file might not exist", "except", "FileNotFoundError", ":", "if", "verbose", ":", "print", "(", "'-W- You are trying to open a file: {} that does not exist'", ".", "format", "(", "infile", ")", ")", "return", "[", "]", "# encoding might be wrong", "except", "UnicodeDecodeError", ":", "try", ":", "with", "codecs", ".", "open", "(", "infile", ",", "\"r\"", ",", "\"Latin-1\"", ")", "as", "f", ":", "print", "(", "'-I- Using less strict decoding for {}, output may have formatting errors'", ".", "format", "(", "infile", ")", ")", "lines", "=", "list", "(", "f", ".", "readlines", "(", ")", ")", "# if file exists, and encoding is correct, who knows what the problem is", "except", "Exception", "as", "ex", ":", "print", "(", "\"-W- \"", ",", "type", "(", "ex", ")", ",", "ex", ")", "return", "[", "]", "except", "Exception", "as", "ex", ":", "print", "(", "\"-W- \"", ",", "type", "(", "ex", ")", ",", "ex", ")", "return", "[", "]", "# don't leave a blank line at the end", "i", "=", "0", "while", "i", "<", "10", ":", "if", "not", "len", "(", "lines", "[", "-", "1", "]", ".", "strip", "(", "\"\\n\"", ")", ".", "strip", "(", "\"\\t\"", ")", ")", ":", "lines", "=", "lines", "[", ":", "-", "1", "]", "i", "+=", "1", "else", ":", "i", "=", "10", "return", "lines" ]
Open file and return a list of the file's lines. Try to use utf-8 encoding, and if that fails use Latin-1. Parameters ---------- infile : str full path to file Returns ---------- data: list all lines in the file
[ "Open", "file", "and", "return", "a", "list", "of", "the", "file", "s", "lines", ".", "Try", "to", "use", "utf", "-", "8", "encoding", "and", "if", "that", "fails", "use", "Latin", "-", "1", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/installer.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/installer.py#L271-L291
def try_mongodb_opts(self, host="localhost", database_name='INGInious'): """ Try MongoDB configuration """ try: mongo_client = MongoClient(host=host) except Exception as e: self._display_warning("Cannot connect to MongoDB on host %s: %s" % (host, str(e))) return None try: database = mongo_client[database_name] except Exception as e: self._display_warning("Cannot access database %s: %s" % (database_name, str(e))) return None try: GridFS(database) except Exception as e: self._display_warning("Cannot access gridfs %s: %s" % (database_name, str(e))) return None return database
[ "def", "try_mongodb_opts", "(", "self", ",", "host", "=", "\"localhost\"", ",", "database_name", "=", "'INGInious'", ")", ":", "try", ":", "mongo_client", "=", "MongoClient", "(", "host", "=", "host", ")", "except", "Exception", "as", "e", ":", "self", ".", "_display_warning", "(", "\"Cannot connect to MongoDB on host %s: %s\"", "%", "(", "host", ",", "str", "(", "e", ")", ")", ")", "return", "None", "try", ":", "database", "=", "mongo_client", "[", "database_name", "]", "except", "Exception", "as", "e", ":", "self", ".", "_display_warning", "(", "\"Cannot access database %s: %s\"", "%", "(", "database_name", ",", "str", "(", "e", ")", ")", ")", "return", "None", "try", ":", "GridFS", "(", "database", ")", "except", "Exception", "as", "e", ":", "self", ".", "_display_warning", "(", "\"Cannot access gridfs %s: %s\"", "%", "(", "database_name", ",", "str", "(", "e", ")", ")", ")", "return", "None", "return", "database" ]
Try MongoDB configuration
[ "Try", "MongoDB", "configuration" ]
python
train
jepegit/cellpy
dev_utils/BioLogic_.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/dev_utils/BioLogic_.py#L26-L49
def fieldname_to_dtype(fieldname): """Converts a column header from the MPT file into a tuple of canonical name and appropriate numpy dtype""" if fieldname == 'mode': return ('mode', np.uint8) elif fieldname in ("ox/red", "error", "control changes", "Ns changes", "counter inc."): return (fieldname, np.bool_) elif fieldname in ("time/s", "P/W", "(Q-Qo)/mA.h", "x", "control/V", "control/V/mA", "(Q-Qo)/C", "dQ/C", "freq/Hz", "|Ewe|/V", "|I|/A", "Phase(Z)/deg", "|Z|/Ohm", "Re(Z)/Ohm", "-Im(Z)/Ohm"): return (fieldname, np.float_) elif fieldname in ("cycle number", "I Range", "Ns", "half cycle"): return (fieldname, np.int_) elif fieldname in ("dq/mA.h", "dQ/mA.h"): return ("dQ/mA.h", np.float_) elif fieldname in ("I/mA", "<I>/mA"): return ("I/mA", np.float_) elif fieldname in ("Ewe/V", "<Ewe>/V"): return ("Ewe/V", np.float_) else: raise ValueError("Invalid column header: %s" % fieldname)
[ "def", "fieldname_to_dtype", "(", "fieldname", ")", ":", "if", "fieldname", "==", "'mode'", ":", "return", "(", "'mode'", ",", "np", ".", "uint8", ")", "elif", "fieldname", "in", "(", "\"ox/red\"", ",", "\"error\"", ",", "\"control changes\"", ",", "\"Ns changes\"", ",", "\"counter inc.\"", ")", ":", "return", "(", "fieldname", ",", "np", ".", "bool_", ")", "elif", "fieldname", "in", "(", "\"time/s\"", ",", "\"P/W\"", ",", "\"(Q-Qo)/mA.h\"", ",", "\"x\"", ",", "\"control/V\"", ",", "\"control/V/mA\"", ",", "\"(Q-Qo)/C\"", ",", "\"dQ/C\"", ",", "\"freq/Hz\"", ",", "\"|Ewe|/V\"", ",", "\"|I|/A\"", ",", "\"Phase(Z)/deg\"", ",", "\"|Z|/Ohm\"", ",", "\"Re(Z)/Ohm\"", ",", "\"-Im(Z)/Ohm\"", ")", ":", "return", "(", "fieldname", ",", "np", ".", "float_", ")", "elif", "fieldname", "in", "(", "\"cycle number\"", ",", "\"I Range\"", ",", "\"Ns\"", ",", "\"half cycle\"", ")", ":", "return", "(", "fieldname", ",", "np", ".", "int_", ")", "elif", "fieldname", "in", "(", "\"dq/mA.h\"", ",", "\"dQ/mA.h\"", ")", ":", "return", "(", "\"dQ/mA.h\"", ",", "np", ".", "float_", ")", "elif", "fieldname", "in", "(", "\"I/mA\"", ",", "\"<I>/mA\"", ")", ":", "return", "(", "\"I/mA\"", ",", "np", ".", "float_", ")", "elif", "fieldname", "in", "(", "\"Ewe/V\"", ",", "\"<Ewe>/V\"", ")", ":", "return", "(", "\"Ewe/V\"", ",", "np", ".", "float_", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid column header: %s\"", "%", "fieldname", ")" ]
Converts a column header from the MPT file into a tuple of canonical name and appropriate numpy dtype
[ "Converts", "a", "column", "header", "from", "the", "MPT", "file", "into", "a", "tuple", "of", "canonical", "name", "and", "appropriate", "numpy", "dtype" ]
python
train
saltstack/salt
salt/utils/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3675-L3694
def unregister_vm(vm_ref): ''' Destroys the virtual machine vm_ref Managed object reference of a virtual machine object ''' vm_name = get_managed_object_name(vm_ref) log.trace('Destroying vm \'%s\'', vm_name) try: vm_ref.UnregisterVM() except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: raise salt.exceptions.VMwareRuntimeError(exc.msg)
[ "def", "unregister_vm", "(", "vm_ref", ")", ":", "vm_name", "=", "get_managed_object_name", "(", "vm_ref", ")", "log", ".", "trace", "(", "'Destroying vm \\'%s\\''", ",", "vm_name", ")", "try", ":", "vm_ref", ".", "UnregisterVM", "(", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")" ]
Destroys the virtual machine vm_ref Managed object reference of a virtual machine object
[ "Destroys", "the", "virtual", "machine" ]
python
train
Krokop/python-xmlstats
xmlstats/__init__.py
https://github.com/Krokop/python-xmlstats/blob/d3ec60f09bdee71bbeaac8992ff792b5ebbdd704/xmlstats/__init__.py#L78-L87
def _build_url(self, host, sport, method, id, format, parameters): """ build url from args """ path = "/".join(filter(None, (sport, method, id))) url = "https://" + host + "/" + path + "." + format if parameters: paramstring = urllib.parse.urlencode(parameters) url = url + "?" + paramstring return url
[ "def", "_build_url", "(", "self", ",", "host", ",", "sport", ",", "method", ",", "id", ",", "format", ",", "parameters", ")", ":", "path", "=", "\"/\"", ".", "join", "(", "filter", "(", "None", ",", "(", "sport", ",", "method", ",", "id", ")", ")", ")", "url", "=", "\"https://\"", "+", "host", "+", "\"/\"", "+", "path", "+", "\".\"", "+", "format", "if", "parameters", ":", "paramstring", "=", "urllib", ".", "parse", ".", "urlencode", "(", "parameters", ")", "url", "=", "url", "+", "\"?\"", "+", "paramstring", "return", "url" ]
build url from args
[ "build", "url", "from", "args" ]
python
train
fitnr/censusgeocode
censusgeocode/censusgeocode.py
https://github.com/fitnr/censusgeocode/blob/9414c331a63fbcfff6b7295cd8935c40ce54c88c/censusgeocode/censusgeocode.py#L117-L126
def address(self, street, city=None, state=None, zipcode=None, **kwargs): '''Geocode an address.''' fields = { 'street': street, 'city': city, 'state': state, 'zip': zipcode, } return self._fetch('address', fields, **kwargs)
[ "def", "address", "(", "self", ",", "street", ",", "city", "=", "None", ",", "state", "=", "None", ",", "zipcode", "=", "None", ",", "*", "*", "kwargs", ")", ":", "fields", "=", "{", "'street'", ":", "street", ",", "'city'", ":", "city", ",", "'state'", ":", "state", ",", "'zip'", ":", "zipcode", ",", "}", "return", "self", ".", "_fetch", "(", "'address'", ",", "fields", ",", "*", "*", "kwargs", ")" ]
Geocode an address.
[ "Geocode", "an", "address", "." ]
python
train
bububa/pyTOP
pyTOP/campaign.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/campaign.py#L367-L375
def record_get(self, creative_ids, nick=None): '''xxxxx.xxxxx.creatives.record.get =================================== 根据一个创意Id列表取得创意对应的修改记录''' request = TOPRequest('xxxxx.xxxxx.creatives.record.get') request['creative_ids'] = creative_ids if nick!=None: request['nick'] = nick self.create(self.execute(request), models = {'result':CreativeRecord}) return self.result
[ "def", "record_get", "(", "self", ",", "creative_ids", ",", "nick", "=", "None", ")", ":", "request", "=", "TOPRequest", "(", "'xxxxx.xxxxx.creatives.record.get'", ")", "request", "[", "'creative_ids'", "]", "=", "creative_ids", "if", "nick", "!=", "None", ":", "request", "[", "'nick'", "]", "=", "nick", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ")", ",", "models", "=", "{", "'result'", ":", "CreativeRecord", "}", ")", "return", "self", ".", "result" ]
xxxxx.xxxxx.creatives.record.get =================================== 根据一个创意Id列表取得创意对应的修改记录
[ "xxxxx", ".", "xxxxx", ".", "creatives", ".", "record", ".", "get", "===================================", "根据一个创意Id列表取得创意对应的修改记录" ]
python
train
saltant-org/saltant-py
saltant/models/base_task_instance.py
https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/base_task_instance.py#L125-L141
def wait_until_finished( self, refresh_period=DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD ): """Wait until a task instance with the given UUID is finished. Args: refresh_period (int, optional): How many seconds to wait before checking the task's status. Defaults to 5 seconds. Returns: :class:`saltant.models.base_task_instance.BaseTaskInstance`: This task instance model after it finished. """ return self.manager.wait_until_finished( uuid=self.uuid, refresh_period=refresh_period )
[ "def", "wait_until_finished", "(", "self", ",", "refresh_period", "=", "DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD", ")", ":", "return", "self", ".", "manager", ".", "wait_until_finished", "(", "uuid", "=", "self", ".", "uuid", ",", "refresh_period", "=", "refresh_period", ")" ]
Wait until a task instance with the given UUID is finished. Args: refresh_period (int, optional): How many seconds to wait before checking the task's status. Defaults to 5 seconds. Returns: :class:`saltant.models.base_task_instance.BaseTaskInstance`: This task instance model after it finished.
[ "Wait", "until", "a", "task", "instance", "with", "the", "given", "UUID", "is", "finished", "." ]
python
train
senaite/senaite.core
bika/lims/barcode.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/barcode.py#L95-L119
def handle_AnalysisRequest(self, instance): """Possible redirects for an AR. - If AR is sample_due: receive it before proceeding. - If AR belongs to Batch, redirect to the BatchBook view. - If AR does not belong to Batch: - if permission/workflow permit: go to AR manage_results. - For other ARs, just redirect to the view screen. """ # - If AR is sample_due: receive it before proceeding. wf = getToolByName(self.context, 'portal_workflow') if wf.getInfoFor(instance, 'review_state') == 'sample_due': try: wf.doActionFor(instance, 'receive') except WorkflowException: pass # - If AR belongs to Batch, redirect to the BatchBook view. batch = instance.getBatch() if batch: return batch.absolute_url() + "/batchbook" # - if permission/workflow permit: go to AR manage_results. mtool = getToolByName(self.context, 'portal_membership') if mtool.checkPermission(EditResults, instance): return instance.absolute_url() + '/manage_results' # - For other ARs, just redirect to the view screen. return instance.absolute_url()
[ "def", "handle_AnalysisRequest", "(", "self", ",", "instance", ")", ":", "# - If AR is sample_due: receive it before proceeding.", "wf", "=", "getToolByName", "(", "self", ".", "context", ",", "'portal_workflow'", ")", "if", "wf", ".", "getInfoFor", "(", "instance", ",", "'review_state'", ")", "==", "'sample_due'", ":", "try", ":", "wf", ".", "doActionFor", "(", "instance", ",", "'receive'", ")", "except", "WorkflowException", ":", "pass", "# - If AR belongs to Batch, redirect to the BatchBook view.", "batch", "=", "instance", ".", "getBatch", "(", ")", "if", "batch", ":", "return", "batch", ".", "absolute_url", "(", ")", "+", "\"/batchbook\"", "# - if permission/workflow permit: go to AR manage_results.", "mtool", "=", "getToolByName", "(", "self", ".", "context", ",", "'portal_membership'", ")", "if", "mtool", ".", "checkPermission", "(", "EditResults", ",", "instance", ")", ":", "return", "instance", ".", "absolute_url", "(", ")", "+", "'/manage_results'", "# - For other ARs, just redirect to the view screen.", "return", "instance", ".", "absolute_url", "(", ")" ]
Possible redirects for an AR. - If AR is sample_due: receive it before proceeding. - If AR belongs to Batch, redirect to the BatchBook view. - If AR does not belong to Batch: - if permission/workflow permit: go to AR manage_results. - For other ARs, just redirect to the view screen.
[ "Possible", "redirects", "for", "an", "AR", ".", "-", "If", "AR", "is", "sample_due", ":", "receive", "it", "before", "proceeding", ".", "-", "If", "AR", "belongs", "to", "Batch", "redirect", "to", "the", "BatchBook", "view", ".", "-", "If", "AR", "does", "not", "belong", "to", "Batch", ":", "-", "if", "permission", "/", "workflow", "permit", ":", "go", "to", "AR", "manage_results", ".", "-", "For", "other", "ARs", "just", "redirect", "to", "the", "view", "screen", "." ]
python
train
log2timeline/plaso
plaso/output/l2t_csv.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/l2t_csv.py#L31-L42
def _FormatField(self, field): """Formats a field. Args: field (str): field value. Returns: str: formatted field value. """ if self._FIELD_DELIMITER and isinstance(field, py2to3.STRING_TYPES): return field.replace(self._FIELD_DELIMITER, ' ') return field
[ "def", "_FormatField", "(", "self", ",", "field", ")", ":", "if", "self", ".", "_FIELD_DELIMITER", "and", "isinstance", "(", "field", ",", "py2to3", ".", "STRING_TYPES", ")", ":", "return", "field", ".", "replace", "(", "self", ".", "_FIELD_DELIMITER", ",", "' '", ")", "return", "field" ]
Formats a field. Args: field (str): field value. Returns: str: formatted field value.
[ "Formats", "a", "field", "." ]
python
train
ggravlingen/pytradfri
pytradfri/util.py
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/util.py#L86-L88
def get_selected_values(self, selection): """Return a list of values for the given selection.""" return [v for b, v in self._choices if b & selection]
[ "def", "get_selected_values", "(", "self", ",", "selection", ")", ":", "return", "[", "v", "for", "b", ",", "v", "in", "self", ".", "_choices", "if", "b", "&", "selection", "]" ]
Return a list of values for the given selection.
[ "Return", "a", "list", "of", "values", "for", "the", "given", "selection", "." ]
python
train
hozn/coilmq
coilmq/store/dbm.py
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/store/dbm.py#L200-L210
def has_frames(self, destination): """ Whether specified queue has any frames. @param destination: The queue name (destinationination). @type destination: C{str} @return: Whether there are any frames in the specified queue. @rtype: C{bool} """ return (destination in self.queue_metadata) and bool(self.queue_metadata[destination]['frames'])
[ "def", "has_frames", "(", "self", ",", "destination", ")", ":", "return", "(", "destination", "in", "self", ".", "queue_metadata", ")", "and", "bool", "(", "self", ".", "queue_metadata", "[", "destination", "]", "[", "'frames'", "]", ")" ]
Whether specified queue has any frames. @param destination: The queue name (destinationination). @type destination: C{str} @return: Whether there are any frames in the specified queue. @rtype: C{bool}
[ "Whether", "specified", "queue", "has", "any", "frames", "." ]
python
train
shaldengeki/python-mal
myanimelist/character.py
https://github.com/shaldengeki/python-mal/blob/2c3356411a74d88ba13f6b970388040d696f8392/myanimelist/character.py#L250-L279
def parse_clubs(self, clubs_page): """Parses the DOM and returns character clubs attributes. :type clubs_page: :class:`bs4.BeautifulSoup` :param clubs_page: MAL character clubs page's DOM :rtype: dict :return: character clubs attributes. """ character_info = self.parse_sidebar(clubs_page) second_col = clubs_page.find(u'div', {'id': 'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1] try: clubs_header = second_col.find(u'div', text=u'Related Clubs') character_info[u'clubs'] = [] if clubs_header: curr_elt = clubs_header.nextSibling while curr_elt is not None: if curr_elt.name == u'div': link = curr_elt.find(u'a') club_id = int(re.match(r'/clubs\.php\?cid=(?P<id>[0-9]+)', link.get(u'href')).group(u'id')) num_members = int(re.match(r'(?P<num>[0-9]+) members', curr_elt.find(u'small').text).group(u'num')) character_info[u'clubs'].append(self.session.club(club_id).set({'name': link.text, 'num_members': num_members})) curr_elt = curr_elt.nextSibling except: if not self.session.suppress_parse_exceptions: raise return character_info
[ "def", "parse_clubs", "(", "self", ",", "clubs_page", ")", ":", "character_info", "=", "self", ".", "parse_sidebar", "(", "clubs_page", ")", "second_col", "=", "clubs_page", ".", "find", "(", "u'div'", ",", "{", "'id'", ":", "'content'", "}", ")", ".", "find", "(", "u'table'", ")", ".", "find", "(", "u'tr'", ")", ".", "find_all", "(", "u'td'", ",", "recursive", "=", "False", ")", "[", "1", "]", "try", ":", "clubs_header", "=", "second_col", ".", "find", "(", "u'div'", ",", "text", "=", "u'Related Clubs'", ")", "character_info", "[", "u'clubs'", "]", "=", "[", "]", "if", "clubs_header", ":", "curr_elt", "=", "clubs_header", ".", "nextSibling", "while", "curr_elt", "is", "not", "None", ":", "if", "curr_elt", ".", "name", "==", "u'div'", ":", "link", "=", "curr_elt", ".", "find", "(", "u'a'", ")", "club_id", "=", "int", "(", "re", ".", "match", "(", "r'/clubs\\.php\\?cid=(?P<id>[0-9]+)'", ",", "link", ".", "get", "(", "u'href'", ")", ")", ".", "group", "(", "u'id'", ")", ")", "num_members", "=", "int", "(", "re", ".", "match", "(", "r'(?P<num>[0-9]+) members'", ",", "curr_elt", ".", "find", "(", "u'small'", ")", ".", "text", ")", ".", "group", "(", "u'num'", ")", ")", "character_info", "[", "u'clubs'", "]", ".", "append", "(", "self", ".", "session", ".", "club", "(", "club_id", ")", ".", "set", "(", "{", "'name'", ":", "link", ".", "text", ",", "'num_members'", ":", "num_members", "}", ")", ")", "curr_elt", "=", "curr_elt", ".", "nextSibling", "except", ":", "if", "not", "self", ".", "session", ".", "suppress_parse_exceptions", ":", "raise", "return", "character_info" ]
Parses the DOM and returns character clubs attributes. :type clubs_page: :class:`bs4.BeautifulSoup` :param clubs_page: MAL character clubs page's DOM :rtype: dict :return: character clubs attributes.
[ "Parses", "the", "DOM", "and", "returns", "character", "clubs", "attributes", "." ]
python
train
ladybug-tools/ladybug
ladybug/analysisperiod.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/analysisperiod.py#L134-L158
def from_json(cls, data): """Create an analysis period from a dictionary. Args: data: { st_month: An integer between 1-12 for starting month (default = 1) st_day: An integer between 1-31 for starting day (default = 1). Note that some months are shorter than 31 days. st_hour: An integer between 0-23 for starting hour (default = 0) end_month: An integer between 1-12 for ending month (default = 12) end_day: An integer between 1-31 for ending day (default = 31) Note that some months are shorter than 31 days. end_hour: An integer between 0-23 for ending hour (default = 23) timestep: An integer number from 1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30, 60 } """ keys = ('st_month', 'st_day', 'st_hour', 'end_month', 'end_day', 'end_hour', 'timestep', 'is_leap_year') for key in keys: if key not in data: data[key] = None return cls( data['st_month'], data['st_day'], data['st_hour'], data['end_month'], data['end_day'], data['end_hour'], data['timestep'], data['is_leap_year'])
[ "def", "from_json", "(", "cls", ",", "data", ")", ":", "keys", "=", "(", "'st_month'", ",", "'st_day'", ",", "'st_hour'", ",", "'end_month'", ",", "'end_day'", ",", "'end_hour'", ",", "'timestep'", ",", "'is_leap_year'", ")", "for", "key", "in", "keys", ":", "if", "key", "not", "in", "data", ":", "data", "[", "key", "]", "=", "None", "return", "cls", "(", "data", "[", "'st_month'", "]", ",", "data", "[", "'st_day'", "]", ",", "data", "[", "'st_hour'", "]", ",", "data", "[", "'end_month'", "]", ",", "data", "[", "'end_day'", "]", ",", "data", "[", "'end_hour'", "]", ",", "data", "[", "'timestep'", "]", ",", "data", "[", "'is_leap_year'", "]", ")" ]
Create an analysis period from a dictionary. Args: data: { st_month: An integer between 1-12 for starting month (default = 1) st_day: An integer between 1-31 for starting day (default = 1). Note that some months are shorter than 31 days. st_hour: An integer between 0-23 for starting hour (default = 0) end_month: An integer between 1-12 for ending month (default = 12) end_day: An integer between 1-31 for ending day (default = 31) Note that some months are shorter than 31 days. end_hour: An integer between 0-23 for ending hour (default = 23) timestep: An integer number from 1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30, 60 }
[ "Create", "an", "analysis", "period", "from", "a", "dictionary", ".", "Args", ":", "data", ":", "{", "st_month", ":", "An", "integer", "between", "1", "-", "12", "for", "starting", "month", "(", "default", "=", "1", ")", "st_day", ":", "An", "integer", "between", "1", "-", "31", "for", "starting", "day", "(", "default", "=", "1", ")", ".", "Note", "that", "some", "months", "are", "shorter", "than", "31", "days", ".", "st_hour", ":", "An", "integer", "between", "0", "-", "23", "for", "starting", "hour", "(", "default", "=", "0", ")", "end_month", ":", "An", "integer", "between", "1", "-", "12", "for", "ending", "month", "(", "default", "=", "12", ")", "end_day", ":", "An", "integer", "between", "1", "-", "31", "for", "ending", "day", "(", "default", "=", "31", ")", "Note", "that", "some", "months", "are", "shorter", "than", "31", "days", ".", "end_hour", ":", "An", "integer", "between", "0", "-", "23", "for", "ending", "hour", "(", "default", "=", "23", ")", "timestep", ":", "An", "integer", "number", "from", "1", "2", "3", "4", "5", "6", "10", "12", "15", "20", "30", "60", "}" ]
python
train
orb-framework/orb
orb/core/schema.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/schema.py#L414-L423
def setColumns(self, columns): """ Sets the columns that this schema uses. :param columns | [<orb.Column>, ..] """ self.__columns = {} for name, column in columns.items(): self.__columns[name] = column column.setSchema(self)
[ "def", "setColumns", "(", "self", ",", "columns", ")", ":", "self", ".", "__columns", "=", "{", "}", "for", "name", ",", "column", "in", "columns", ".", "items", "(", ")", ":", "self", ".", "__columns", "[", "name", "]", "=", "column", "column", ".", "setSchema", "(", "self", ")" ]
Sets the columns that this schema uses. :param columns | [<orb.Column>, ..]
[ "Sets", "the", "columns", "that", "this", "schema", "uses", ".", ":", "param", "columns", "|", "[", "<orb", ".", "Column", ">", "..", "]" ]
python
train
pypa/pipenv
pipenv/vendor/vistir/contextmanagers.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/contextmanagers.py#L256-L300
def open_file(link, session=None, stream=True): """ Open local or remote file for reading. :type link: pip._internal.index.Link or str :type session: requests.Session :param bool stream: Try to stream if remote, default True :raises ValueError: If link points to a local directory. :return: a context manager to the opened file-like object """ if not isinstance(link, six.string_types): try: link = link.url_without_fragment except AttributeError: raise ValueError("Cannot parse url from unkown type: {0!r}".format(link)) if not is_valid_url(link) and os.path.exists(link): link = path_to_url(link) if is_file_url(link): # Local URL local_path = url_to_path(link) if os.path.isdir(local_path): raise ValueError("Cannot open directory for read: {}".format(link)) else: with io.open(local_path, "rb") as local_file: yield local_file else: # Remote URL headers = {"Accept-Encoding": "identity"} if not session: from requests import Session session = Session() with session.get(link, headers=headers, stream=stream) as resp: try: raw = getattr(resp, "raw", None) result = raw if raw else resp yield result finally: if raw: conn = getattr(raw, "_connection") if conn is not None: conn.close() result.close()
[ "def", "open_file", "(", "link", ",", "session", "=", "None", ",", "stream", "=", "True", ")", ":", "if", "not", "isinstance", "(", "link", ",", "six", ".", "string_types", ")", ":", "try", ":", "link", "=", "link", ".", "url_without_fragment", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"Cannot parse url from unkown type: {0!r}\"", ".", "format", "(", "link", ")", ")", "if", "not", "is_valid_url", "(", "link", ")", "and", "os", ".", "path", ".", "exists", "(", "link", ")", ":", "link", "=", "path_to_url", "(", "link", ")", "if", "is_file_url", "(", "link", ")", ":", "# Local URL", "local_path", "=", "url_to_path", "(", "link", ")", "if", "os", ".", "path", ".", "isdir", "(", "local_path", ")", ":", "raise", "ValueError", "(", "\"Cannot open directory for read: {}\"", ".", "format", "(", "link", ")", ")", "else", ":", "with", "io", ".", "open", "(", "local_path", ",", "\"rb\"", ")", "as", "local_file", ":", "yield", "local_file", "else", ":", "# Remote URL", "headers", "=", "{", "\"Accept-Encoding\"", ":", "\"identity\"", "}", "if", "not", "session", ":", "from", "requests", "import", "Session", "session", "=", "Session", "(", ")", "with", "session", ".", "get", "(", "link", ",", "headers", "=", "headers", ",", "stream", "=", "stream", ")", "as", "resp", ":", "try", ":", "raw", "=", "getattr", "(", "resp", ",", "\"raw\"", ",", "None", ")", "result", "=", "raw", "if", "raw", "else", "resp", "yield", "result", "finally", ":", "if", "raw", ":", "conn", "=", "getattr", "(", "raw", ",", "\"_connection\"", ")", "if", "conn", "is", "not", "None", ":", "conn", ".", "close", "(", ")", "result", ".", "close", "(", ")" ]
Open local or remote file for reading. :type link: pip._internal.index.Link or str :type session: requests.Session :param bool stream: Try to stream if remote, default True :raises ValueError: If link points to a local directory. :return: a context manager to the opened file-like object
[ "Open", "local", "or", "remote", "file", "for", "reading", "." ]
python
train
helixyte/everest
everest/representers/utils.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/representers/utils.py#L41-L50
def get_mapping_registry(content_type): """ Returns the data element registry for the given content type (a Singleton). :Note: This only works after a representer for the given content type has been created. """ reg = get_current_registry() rpr_reg = reg.queryUtility(IRepresenterRegistry) return rpr_reg.get_mapping_registry(content_type)
[ "def", "get_mapping_registry", "(", "content_type", ")", ":", "reg", "=", "get_current_registry", "(", ")", "rpr_reg", "=", "reg", ".", "queryUtility", "(", "IRepresenterRegistry", ")", "return", "rpr_reg", ".", "get_mapping_registry", "(", "content_type", ")" ]
Returns the data element registry for the given content type (a Singleton). :Note: This only works after a representer for the given content type has been created.
[ "Returns", "the", "data", "element", "registry", "for", "the", "given", "content", "type", "(", "a", "Singleton", ")", "." ]
python
train
JoseAntFer/pyny3d
pyny3d/geoms.py
https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L2328-L2388
def photo(self, azimuth_zenit, plot=False): """ Computes a change of the reference system for the whole ``pyny.Space`` to align the `y` axis with a given direction. Returns its elements (polygons, holes, points) extracted in a list. In its conception, this method was created as a tool for the shadows computation to calculate "what is in front and what is behind to the look of the Sun". For this reason, the direction is given in spherical coordinates by two angles: the azimth and the zenit. * The azimuth is zero when pointing to the South, -pi/4 to the East, pi/4 to the West and pi/2 to the North. * The zenit is zero at the ground level and pi/4 "pointing completely orthogonal to the sky". In short, this methods answer "How would the ``pyny.Space`` look in a photograph taken from an arbitrary direction in cylindrical perpective?" The photograph has a new reference system: x, y, depth. The sign of the new depth coordinate has to be checked before assuming what is closer and what is further inasmuch as it changes depending on the direction of the photo. :param azimuth_zenit: Direction of the photo in spherical coordinates and in radians. :type azimuth_zenit: tuple :param plot: If True, is shows the photo visualization. :type plot: bool :returns: Exploded ``pyny.Space`` :rtype: list .. note:: Before assume that this method do exactly what it is supposed to do, it is highly recommended to visualy verify throught the *plot=True* argument. It is easy to introduce the angles in a different sign criteria, among other frequent mistakes. """ self.lock() a, z = azimuth_zenit R = np.array([[np.cos(a), -np.sin(a)*np.cos(z), np.sin(z)*np.sin(a)], [np.sin(a), np.cos(z)*np.cos(a), -np.cos(a)*np.sin(z)], [0, np.sin(z), np.cos(z)]]) _, points = self.map G = np.dot(R, points.T).T # Here it is in self.Space coordinates # Coordinate change G = np.array([G[:,0], G[:,2], G[:,1]]).T # Photograph coordinate poly_hole_points = self.explode_map(G) if plot: polygons, holes, points = poly_hole_points aux_surface = Surface(polygons, holes=holes, make_ccw=False) ax = aux_surface.plot2d(alpha=0.6, ret=True) if points.shape[0] > 0: ax.scatter(points[:, 0], points[:, 1], c='#990000', s=25) return poly_hole_points
[ "def", "photo", "(", "self", ",", "azimuth_zenit", ",", "plot", "=", "False", ")", ":", "self", ".", "lock", "(", ")", "a", ",", "z", "=", "azimuth_zenit", "R", "=", "np", ".", "array", "(", "[", "[", "np", ".", "cos", "(", "a", ")", ",", "-", "np", ".", "sin", "(", "a", ")", "*", "np", ".", "cos", "(", "z", ")", ",", "np", ".", "sin", "(", "z", ")", "*", "np", ".", "sin", "(", "a", ")", "]", ",", "[", "np", ".", "sin", "(", "a", ")", ",", "np", ".", "cos", "(", "z", ")", "*", "np", ".", "cos", "(", "a", ")", ",", "-", "np", ".", "cos", "(", "a", ")", "*", "np", ".", "sin", "(", "z", ")", "]", ",", "[", "0", ",", "np", ".", "sin", "(", "z", ")", ",", "np", ".", "cos", "(", "z", ")", "]", "]", ")", "_", ",", "points", "=", "self", ".", "map", "G", "=", "np", ".", "dot", "(", "R", ",", "points", ".", "T", ")", ".", "T", "# Here it is in self.Space coordinates\r", "# Coordinate change\r", "G", "=", "np", ".", "array", "(", "[", "G", "[", ":", ",", "0", "]", ",", "G", "[", ":", ",", "2", "]", ",", "G", "[", ":", ",", "1", "]", "]", ")", ".", "T", "# Photograph coordinate\r", "poly_hole_points", "=", "self", ".", "explode_map", "(", "G", ")", "if", "plot", ":", "polygons", ",", "holes", ",", "points", "=", "poly_hole_points", "aux_surface", "=", "Surface", "(", "polygons", ",", "holes", "=", "holes", ",", "make_ccw", "=", "False", ")", "ax", "=", "aux_surface", ".", "plot2d", "(", "alpha", "=", "0.6", ",", "ret", "=", "True", ")", "if", "points", ".", "shape", "[", "0", "]", ">", "0", ":", "ax", ".", "scatter", "(", "points", "[", ":", ",", "0", "]", ",", "points", "[", ":", ",", "1", "]", ",", "c", "=", "'#990000'", ",", "s", "=", "25", ")", "return", "poly_hole_points" ]
Computes a change of the reference system for the whole ``pyny.Space`` to align the `y` axis with a given direction. Returns its elements (polygons, holes, points) extracted in a list. In its conception, this method was created as a tool for the shadows computation to calculate "what is in front and what is behind to the look of the Sun". For this reason, the direction is given in spherical coordinates by two angles: the azimth and the zenit. * The azimuth is zero when pointing to the South, -pi/4 to the East, pi/4 to the West and pi/2 to the North. * The zenit is zero at the ground level and pi/4 "pointing completely orthogonal to the sky". In short, this methods answer "How would the ``pyny.Space`` look in a photograph taken from an arbitrary direction in cylindrical perpective?" The photograph has a new reference system: x, y, depth. The sign of the new depth coordinate has to be checked before assuming what is closer and what is further inasmuch as it changes depending on the direction of the photo. :param azimuth_zenit: Direction of the photo in spherical coordinates and in radians. :type azimuth_zenit: tuple :param plot: If True, is shows the photo visualization. :type plot: bool :returns: Exploded ``pyny.Space`` :rtype: list .. note:: Before assume that this method do exactly what it is supposed to do, it is highly recommended to visualy verify throught the *plot=True* argument. It is easy to introduce the angles in a different sign criteria, among other frequent mistakes.
[ "Computes", "a", "change", "of", "the", "reference", "system", "for", "the", "whole", "pyny", ".", "Space", "to", "align", "the", "y", "axis", "with", "a", "given", "direction", ".", "Returns", "its", "elements", "(", "polygons", "holes", "points", ")", "extracted", "in", "a", "list", ".", "In", "its", "conception", "this", "method", "was", "created", "as", "a", "tool", "for", "the", "shadows", "computation", "to", "calculate", "what", "is", "in", "front", "and", "what", "is", "behind", "to", "the", "look", "of", "the", "Sun", ".", "For", "this", "reason", "the", "direction", "is", "given", "in", "spherical", "coordinates", "by", "two", "angles", ":", "the", "azimth", "and", "the", "zenit", ".", "*", "The", "azimuth", "is", "zero", "when", "pointing", "to", "the", "South", "-", "pi", "/", "4", "to", "the", "East", "pi", "/", "4", "to", "the", "West", "and", "pi", "/", "2", "to", "the", "North", ".", "*", "The", "zenit", "is", "zero", "at", "the", "ground", "level", "and", "pi", "/", "4", "pointing", "completely", "orthogonal", "to", "the", "sky", ".", "In", "short", "this", "methods", "answer", "How", "would", "the", "pyny", ".", "Space", "look", "in", "a", "photograph", "taken", "from", "an", "arbitrary", "direction", "in", "cylindrical", "perpective?", "The", "photograph", "has", "a", "new", "reference", "system", ":", "x", "y", "depth", ".", "The", "sign", "of", "the", "new", "depth", "coordinate", "has", "to", "be", "checked", "before", "assuming", "what", "is", "closer", "and", "what", "is", "further", "inasmuch", "as", "it", "changes", "depending", "on", "the", "direction", "of", "the", "photo", ".", ":", "param", "azimuth_zenit", ":", "Direction", "of", "the", "photo", "in", "spherical", "coordinates", "and", "in", "radians", ".", ":", "type", "azimuth_zenit", ":", "tuple", ":", "param", "plot", ":", "If", "True", "is", "shows", "the", "photo", "visualization", ".", ":", "type", "plot", ":", "bool", ":", "returns", ":", "Exploded", "pyny", ".", "Space", ":", "rtype", ":", "list", "..", "note", "::", "Before", "assume", "that", "this", "method", "do", "exactly", "what", "it", "is", "supposed", "to", "do", "it", "is", "highly", "recommended", "to", "visualy", "verify", "throught", "the", "*", "plot", "=", "True", "*", "argument", ".", "It", "is", "easy", "to", "introduce", "the", "angles", "in", "a", "different", "sign", "criteria", "among", "other", "frequent", "mistakes", "." ]
python
train
nickjj/ansigenome
ansigenome/scan.py
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L400-L429
def augment_main_keys(self, keys, file): """ Add the main key if it is missing. """ nfile = file ansigenome_block = """ ansigenome_info: galaxy_id: '' travis: False synopsis: | Describe your role in a few paragraphs.... usage: | Describe how to use in more detail... #custom: | # Any custom output you want after the usage section.. """ for key in keys: if key[0] not in nfile: if key[0] == "ansigenome_info": # make sure ansigenome_info is always on the bottom nfile = nfile + "\n{0}".format(ansigenome_block) else: nfile = "\n{0}: {1}\n\n".format(key[0], key[1]) + nfile return nfile
[ "def", "augment_main_keys", "(", "self", ",", "keys", ",", "file", ")", ":", "nfile", "=", "file", "ansigenome_block", "=", "\"\"\"\nansigenome_info:\n galaxy_id: ''\n\n travis: False\n\n synopsis: |\n Describe your role in a few paragraphs....\n\n usage: |\n Describe how to use in more detail...\n\n #custom: |\n # Any custom output you want after the usage section..\n\"\"\"", "for", "key", "in", "keys", ":", "if", "key", "[", "0", "]", "not", "in", "nfile", ":", "if", "key", "[", "0", "]", "==", "\"ansigenome_info\"", ":", "# make sure ansigenome_info is always on the bottom", "nfile", "=", "nfile", "+", "\"\\n{0}\"", ".", "format", "(", "ansigenome_block", ")", "else", ":", "nfile", "=", "\"\\n{0}: {1}\\n\\n\"", ".", "format", "(", "key", "[", "0", "]", ",", "key", "[", "1", "]", ")", "+", "nfile", "return", "nfile" ]
Add the main key if it is missing.
[ "Add", "the", "main", "key", "if", "it", "is", "missing", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_gopro.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_gopro.py#L76-L82
def cmd_gopro_status(self, args): '''show gopro status''' master = self.master if 'GOPRO_HEARTBEAT' in master.messages: print(master.messages['GOPRO_HEARTBEAT']) else: print("No GOPRO_HEARTBEAT messages")
[ "def", "cmd_gopro_status", "(", "self", ",", "args", ")", ":", "master", "=", "self", ".", "master", "if", "'GOPRO_HEARTBEAT'", "in", "master", ".", "messages", ":", "print", "(", "master", ".", "messages", "[", "'GOPRO_HEARTBEAT'", "]", ")", "else", ":", "print", "(", "\"No GOPRO_HEARTBEAT messages\"", ")" ]
show gopro status
[ "show", "gopro", "status" ]
python
train
ntoll/uflash
uflash.py
https://github.com/ntoll/uflash/blob/867468d386da0aa20212b69a152ce8bfc0972366/uflash.py#L265-L281
def save_hex(hex_file, path): """ Given a string representation of a hex file, this function copies it to the specified path thus causing the device mounted at that point to be flashed. If the hex_file is empty it will raise a ValueError. If the filename at the end of the path does not end in '.hex' it will raise a ValueError. """ if not hex_file: raise ValueError('Cannot flash an empty .hex file.') if not path.endswith('.hex'): raise ValueError('The path to flash must be for a .hex file.') with open(path, 'wb') as output: output.write(hex_file.encode('ascii'))
[ "def", "save_hex", "(", "hex_file", ",", "path", ")", ":", "if", "not", "hex_file", ":", "raise", "ValueError", "(", "'Cannot flash an empty .hex file.'", ")", "if", "not", "path", ".", "endswith", "(", "'.hex'", ")", ":", "raise", "ValueError", "(", "'The path to flash must be for a .hex file.'", ")", "with", "open", "(", "path", ",", "'wb'", ")", "as", "output", ":", "output", ".", "write", "(", "hex_file", ".", "encode", "(", "'ascii'", ")", ")" ]
Given a string representation of a hex file, this function copies it to the specified path thus causing the device mounted at that point to be flashed. If the hex_file is empty it will raise a ValueError. If the filename at the end of the path does not end in '.hex' it will raise a ValueError.
[ "Given", "a", "string", "representation", "of", "a", "hex", "file", "this", "function", "copies", "it", "to", "the", "specified", "path", "thus", "causing", "the", "device", "mounted", "at", "that", "point", "to", "be", "flashed", "." ]
python
train
offu/WeRoBot
werobot/client.py
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/client.py#L328-L344
def upload_custom_service_account_avatar(self, account, avatar): """ 设置客服帐号的头像。 :param account: 客服账号的用户名 :param avatar: 头像文件,必须是 jpg 格式 :return: 返回的 JSON 数据包 """ return self.post( url= "http://api.weixin.qq.com/customservice/kfaccount/uploadheadimg", params={ "access_token": self.token, "kf_account": account }, files={"media": avatar} )
[ "def", "upload_custom_service_account_avatar", "(", "self", ",", "account", ",", "avatar", ")", ":", "return", "self", ".", "post", "(", "url", "=", "\"http://api.weixin.qq.com/customservice/kfaccount/uploadheadimg\"", ",", "params", "=", "{", "\"access_token\"", ":", "self", ".", "token", ",", "\"kf_account\"", ":", "account", "}", ",", "files", "=", "{", "\"media\"", ":", "avatar", "}", ")" ]
设置客服帐号的头像。 :param account: 客服账号的用户名 :param avatar: 头像文件,必须是 jpg 格式 :return: 返回的 JSON 数据包
[ "设置客服帐号的头像。" ]
python
train
saltstack/salt
salt/states/icinga2.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/icinga2.py#L199-L239
def request_cert(name, master, ticket, port="5665"): ''' Request CA certificate from master icinga2 node. name The domain name for which this certificate will be saved master Icinga2 master node for which this certificate will be saved ticket Authentication ticket generated on icinga2 master port Icinga2 port, defaults to 5665 ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} cert = "{0}ca.crt".format(get_certs_path()) # Checking if execution is needed. if os.path.isfile(cert): ret['comment'] = 'No execution needed. Cert: {0} already exists.'.format(cert) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Certificate request from icinga2 master would be executed' return ret # Executing the command. cert_request = __salt__['icinga2.request_cert'](name, master, ticket, port) if not cert_request['retcode']: ret['comment'] = "Certificate request from icinga2 master executed" ret['changes']['cert'] = "Executed. Certificate requested: {0}".format(cert) return ret ret['comment'] = "FAILED. Certificate requested failed with output: {0}".format(cert_request['stdout']) ret['result'] = False return ret
[ "def", "request_cert", "(", "name", ",", "master", ",", "ticket", ",", "port", "=", "\"5665\"", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "cert", "=", "\"{0}ca.crt\"", ".", "format", "(", "get_certs_path", "(", ")", ")", "# Checking if execution is needed.", "if", "os", ".", "path", ".", "isfile", "(", "cert", ")", ":", "ret", "[", "'comment'", "]", "=", "'No execution needed. Cert: {0} already exists.'", ".", "format", "(", "cert", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Certificate request from icinga2 master would be executed'", "return", "ret", "# Executing the command.", "cert_request", "=", "__salt__", "[", "'icinga2.request_cert'", "]", "(", "name", ",", "master", ",", "ticket", ",", "port", ")", "if", "not", "cert_request", "[", "'retcode'", "]", ":", "ret", "[", "'comment'", "]", "=", "\"Certificate request from icinga2 master executed\"", "ret", "[", "'changes'", "]", "[", "'cert'", "]", "=", "\"Executed. Certificate requested: {0}\"", ".", "format", "(", "cert", ")", "return", "ret", "ret", "[", "'comment'", "]", "=", "\"FAILED. Certificate requested failed with output: {0}\"", ".", "format", "(", "cert_request", "[", "'stdout'", "]", ")", "ret", "[", "'result'", "]", "=", "False", "return", "ret" ]
Request CA certificate from master icinga2 node. name The domain name for which this certificate will be saved master Icinga2 master node for which this certificate will be saved ticket Authentication ticket generated on icinga2 master port Icinga2 port, defaults to 5665
[ "Request", "CA", "certificate", "from", "master", "icinga2", "node", "." ]
python
train
collectiveacuity/labPack
labpack/events/meetup.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/events/meetup.py#L1296-L1346
def list_member_groups(self, member_id): ''' a method to retrieve a list of meetup groups member belongs to :param member_id: integer with meetup member id :return: dictionary with list of group details in [json] group_details = self.objects.group_profile.schema ''' # https://www.meetup.com/meetup_api/docs/members/:member_id/#get title = '%s.list_member_groups' % self.__class__.__name__ # validate inputs input_fields = { 'member_id': member_id } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct member id if not member_id: raise IndexError('%s requires member id argument.' % title) # compose request fields url = '%s/members/%s' % (self.endpoint, str(member_id)) params = { 'fields': 'memberships' } # send requests response_details = self._get_request(url, params=params) # construct method output dictionary member_groups = { 'json': [] } for key, value in response_details.items(): if not key == 'json': member_groups[key] = value # parse response if response_details['json']: if 'memberships' in response_details['json'].keys(): for group in response_details['json']['memberships']['member']: member_groups['json'].append(self.objects.group_profile.ingest(**group)) return member_groups
[ "def", "list_member_groups", "(", "self", ",", "member_id", ")", ":", "# https://www.meetup.com/meetup_api/docs/members/:member_id/#get\r", "title", "=", "'%s.list_member_groups'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs\r", "input_fields", "=", "{", "'member_id'", ":", "member_id", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# construct member id\r", "if", "not", "member_id", ":", "raise", "IndexError", "(", "'%s requires member id argument.'", "%", "title", ")", "# compose request fields\r", "url", "=", "'%s/members/%s'", "%", "(", "self", ".", "endpoint", ",", "str", "(", "member_id", ")", ")", "params", "=", "{", "'fields'", ":", "'memberships'", "}", "# send requests\r", "response_details", "=", "self", ".", "_get_request", "(", "url", ",", "params", "=", "params", ")", "# construct method output dictionary\r", "member_groups", "=", "{", "'json'", ":", "[", "]", "}", "for", "key", ",", "value", "in", "response_details", ".", "items", "(", ")", ":", "if", "not", "key", "==", "'json'", ":", "member_groups", "[", "key", "]", "=", "value", "# parse response\r", "if", "response_details", "[", "'json'", "]", ":", "if", "'memberships'", "in", "response_details", "[", "'json'", "]", ".", "keys", "(", ")", ":", "for", "group", "in", "response_details", "[", "'json'", "]", "[", "'memberships'", "]", "[", "'member'", "]", ":", "member_groups", "[", "'json'", "]", ".", "append", "(", "self", ".", "objects", ".", "group_profile", ".", "ingest", "(", "*", "*", "group", ")", ")", "return", "member_groups" ]
a method to retrieve a list of meetup groups member belongs to :param member_id: integer with meetup member id :return: dictionary with list of group details in [json] group_details = self.objects.group_profile.schema
[ "a", "method", "to", "retrieve", "a", "list", "of", "meetup", "groups", "member", "belongs", "to", ":", "param", "member_id", ":", "integer", "with", "meetup", "member", "id", ":", "return", ":", "dictionary", "with", "list", "of", "group", "details", "in", "[", "json", "]", "group_details", "=", "self", ".", "objects", ".", "group_profile", ".", "schema" ]
python
train
kronenthaler/mod-pbxproj
pbxproj/pbxextensions/ProjectFlags.py
https://github.com/kronenthaler/mod-pbxproj/blob/8de3cbdd3210480ddbb1fa0f50a4f4ea87de6e71/pbxproj/pbxextensions/ProjectFlags.py#L218-L240
def add_code_sign(self, code_sign_identity, development_team, provisioning_profile_uuid, provisioning_profile_specifier, target_name=None, configuration_name=None): """ Adds the code sign information to the project and creates the appropriate flags in the configuration. In xcode 8+ the provisioning_profile_uuid becomes optional, and the provisioning_profile_specifier becomes mandatory. Contrariwise, in xcode 8< provisioning_profile_uuid becomes mandatory and provisioning_profile_specifier becomes optional. :param code_sign_identity: Code sign identity name. Usually formatted as: 'iPhone Distribution[: <Company name> (MAAYFEXXXX)]' :param development_team: Development team identifier string. Usually formatted as: 'MAAYFEXXXX' :param provisioning_profile_uuid: Provisioning profile UUID string. Usually formatted as: '6f1ffc4d-xxxx-xxxx-xxxx-6dc186280e1e' :param provisioning_profile_specifier: Provisioning profile specifier (a.k.a. name) string. :param target_name: Target name or list of target names to add the flag to or None for every target :param configuration_name: Configuration name to add the flag to or None for every configuration :return: """ self.set_flags(u'CODE_SIGN_IDENTITY[sdk=iphoneos*]', code_sign_identity, target_name, configuration_name) self.set_flags(u'DEVELOPMENT_TEAM', development_team, target_name, configuration_name) self.set_flags(u'PROVISIONING_PROFILE', provisioning_profile_uuid, target_name, configuration_name) self.set_flags(u'PROVISIONING_PROFILE_SPECIFIER', provisioning_profile_specifier, target_name, configuration_name) for target in self.objects.get_targets(target_name): self.objects[self.rootObject].set_provisioning_style(PBXProvioningTypes.MANUAL, target)
[ "def", "add_code_sign", "(", "self", ",", "code_sign_identity", ",", "development_team", ",", "provisioning_profile_uuid", ",", "provisioning_profile_specifier", ",", "target_name", "=", "None", ",", "configuration_name", "=", "None", ")", ":", "self", ".", "set_flags", "(", "u'CODE_SIGN_IDENTITY[sdk=iphoneos*]'", ",", "code_sign_identity", ",", "target_name", ",", "configuration_name", ")", "self", ".", "set_flags", "(", "u'DEVELOPMENT_TEAM'", ",", "development_team", ",", "target_name", ",", "configuration_name", ")", "self", ".", "set_flags", "(", "u'PROVISIONING_PROFILE'", ",", "provisioning_profile_uuid", ",", "target_name", ",", "configuration_name", ")", "self", ".", "set_flags", "(", "u'PROVISIONING_PROFILE_SPECIFIER'", ",", "provisioning_profile_specifier", ",", "target_name", ",", "configuration_name", ")", "for", "target", "in", "self", ".", "objects", ".", "get_targets", "(", "target_name", ")", ":", "self", ".", "objects", "[", "self", ".", "rootObject", "]", ".", "set_provisioning_style", "(", "PBXProvioningTypes", ".", "MANUAL", ",", "target", ")" ]
Adds the code sign information to the project and creates the appropriate flags in the configuration. In xcode 8+ the provisioning_profile_uuid becomes optional, and the provisioning_profile_specifier becomes mandatory. Contrariwise, in xcode 8< provisioning_profile_uuid becomes mandatory and provisioning_profile_specifier becomes optional. :param code_sign_identity: Code sign identity name. Usually formatted as: 'iPhone Distribution[: <Company name> (MAAYFEXXXX)]' :param development_team: Development team identifier string. Usually formatted as: 'MAAYFEXXXX' :param provisioning_profile_uuid: Provisioning profile UUID string. Usually formatted as: '6f1ffc4d-xxxx-xxxx-xxxx-6dc186280e1e' :param provisioning_profile_specifier: Provisioning profile specifier (a.k.a. name) string. :param target_name: Target name or list of target names to add the flag to or None for every target :param configuration_name: Configuration name to add the flag to or None for every configuration :return:
[ "Adds", "the", "code", "sign", "information", "to", "the", "project", "and", "creates", "the", "appropriate", "flags", "in", "the", "configuration", ".", "In", "xcode", "8", "+", "the", "provisioning_profile_uuid", "becomes", "optional", "and", "the", "provisioning_profile_specifier", "becomes", "mandatory", ".", "Contrariwise", "in", "xcode", "8<", "provisioning_profile_uuid", "becomes", "mandatory", "and", "provisioning_profile_specifier", "becomes", "optional", "." ]
python
train
deepmind/sonnet
sonnet/python/modules/gated_rnn.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/gated_rnn.py#L519-L540
def lstm_with_zoneout(hidden_size, keep_prob_c=0.5, keep_prob_h=0.95, **kwargs): """LSTM with recurrent dropout. Args: hidden_size: the LSTM hidden size. keep_prob_c: the probability to use the new value of the cell state rather than freezing it. keep_prob_h: the probability to use the new value of the hidden state rather than freezing it. **kwargs: Extra keyword arguments to pass to the LSTM. Returns: A tuple (train_lstm, test_lstm) where train_lstm is an LSTM with recurrent dropout enabled to be used for training and test_lstm is the same LSTM without zoneout. """ lstm = LSTM(hidden_size, **kwargs) keep_probs = LSTMState(keep_prob_h, keep_prob_c) train_lstm = ZoneoutWrapper(lstm, keep_probs, is_training=True) test_lstm = ZoneoutWrapper(lstm, keep_probs, is_training=False) return train_lstm, test_lstm
[ "def", "lstm_with_zoneout", "(", "hidden_size", ",", "keep_prob_c", "=", "0.5", ",", "keep_prob_h", "=", "0.95", ",", "*", "*", "kwargs", ")", ":", "lstm", "=", "LSTM", "(", "hidden_size", ",", "*", "*", "kwargs", ")", "keep_probs", "=", "LSTMState", "(", "keep_prob_h", ",", "keep_prob_c", ")", "train_lstm", "=", "ZoneoutWrapper", "(", "lstm", ",", "keep_probs", ",", "is_training", "=", "True", ")", "test_lstm", "=", "ZoneoutWrapper", "(", "lstm", ",", "keep_probs", ",", "is_training", "=", "False", ")", "return", "train_lstm", ",", "test_lstm" ]
LSTM with recurrent dropout. Args: hidden_size: the LSTM hidden size. keep_prob_c: the probability to use the new value of the cell state rather than freezing it. keep_prob_h: the probability to use the new value of the hidden state rather than freezing it. **kwargs: Extra keyword arguments to pass to the LSTM. Returns: A tuple (train_lstm, test_lstm) where train_lstm is an LSTM with recurrent dropout enabled to be used for training and test_lstm is the same LSTM without zoneout.
[ "LSTM", "with", "recurrent", "dropout", "." ]
python
train
xu2243051/easyui-menu
easyui/mixins/easyui_mixins.py
https://github.com/xu2243051/easyui-menu/blob/4da0b50cf2d3ddb0f1ec7a4da65fd3c4339f8dfb/easyui/mixins/easyui_mixins.py#L101-L149
def get_filter_dict(self): ''' 处理过滤字段 rows 一页显示多少行 page 第几页, 1开始 order desc, asc sort 指定排序的字段 order_by(sort) querydict 中的字段名和格式需要可以直接查询 ''' querydict = self.get_querydict() # post ,在cookie中设置了csrfmiddlewaretoken if querydict.has_key('csrfmiddlewaretoken'): querydict.pop('csrfmiddlewaretoken') try: page = int(querydict.pop('page')) rows = int(querydict.pop('rows')) setattr(self, 'easyui_page', page) setattr(self, 'easyui_rows', rows) except KeyError: setattr(self, 'easyui_page', None) setattr(self, 'easyui_rows', None) try: # order-> string The default sort order, can only be 'asc' or 'desc' # sort-> filed name # order_by('id') order_by('-id') order = querydict.pop('order') sort = querydict.pop('sort') # order = 1 # sort = 1 if order == 'asc': setattr(self, 'easyui_order', sort) else: setattr(self, 'easyui_order', '-%s'% sort) except KeyError: setattr(self, 'easyui_order', None) # 过滤掉那些没有填写数据的input字段 remove_key = [] for key in querydict: if querydict[key] == '': remove_key.append(key) for key in remove_key: querydict.pop(key) return querydict
[ "def", "get_filter_dict", "(", "self", ")", ":", "querydict", "=", "self", ".", "get_querydict", "(", ")", "# post ,在cookie中设置了csrfmiddlewaretoken", "if", "querydict", ".", "has_key", "(", "'csrfmiddlewaretoken'", ")", ":", "querydict", ".", "pop", "(", "'csrfmiddlewaretoken'", ")", "try", ":", "page", "=", "int", "(", "querydict", ".", "pop", "(", "'page'", ")", ")", "rows", "=", "int", "(", "querydict", ".", "pop", "(", "'rows'", ")", ")", "setattr", "(", "self", ",", "'easyui_page'", ",", "page", ")", "setattr", "(", "self", ",", "'easyui_rows'", ",", "rows", ")", "except", "KeyError", ":", "setattr", "(", "self", ",", "'easyui_page'", ",", "None", ")", "setattr", "(", "self", ",", "'easyui_rows'", ",", "None", ")", "try", ":", "# order-> string The default sort order, can only be 'asc' or 'desc' ", "# sort-> filed name", "# order_by('id') order_by('-id')", "order", "=", "querydict", ".", "pop", "(", "'order'", ")", "sort", "=", "querydict", ".", "pop", "(", "'sort'", ")", "# order = 1", "# sort = 1", "if", "order", "==", "'asc'", ":", "setattr", "(", "self", ",", "'easyui_order'", ",", "sort", ")", "else", ":", "setattr", "(", "self", ",", "'easyui_order'", ",", "'-%s'", "%", "sort", ")", "except", "KeyError", ":", "setattr", "(", "self", ",", "'easyui_order'", ",", "None", ")", "# 过滤掉那些没有填写数据的input字段", "remove_key", "=", "[", "]", "for", "key", "in", "querydict", ":", "if", "querydict", "[", "key", "]", "==", "''", ":", "remove_key", ".", "append", "(", "key", ")", "for", "key", "in", "remove_key", ":", "querydict", ".", "pop", "(", "key", ")", "return", "querydict" ]
处理过滤字段 rows 一页显示多少行 page 第几页, 1开始 order desc, asc sort 指定排序的字段 order_by(sort) querydict 中的字段名和格式需要可以直接查询
[ "处理过滤字段", "rows", "一页显示多少行", "page", "第几页", "1开始", "order", "desc", "asc", "sort", "指定排序的字段", "order_by", "(", "sort", ")", "querydict", "中的字段名和格式需要可以直接查询" ]
python
valid
mixcloud/django-experiments
experiments/admin.py
https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/admin.py#L130-L156
def set_state_view(self, request): """ Changes the experiment state """ if not request.user.has_perm('experiments.change_experiment'): return HttpResponseForbidden() try: state = int(request.POST.get("state", "")) except ValueError: return HttpResponseBadRequest() try: experiment = Experiment.objects.get(name=request.POST.get("experiment")) except Experiment.DoesNotExist: return HttpResponseBadRequest() experiment.state = state if state == 0: experiment.end_date = timezone.now() else: experiment.end_date = None experiment.save() return HttpResponse()
[ "def", "set_state_view", "(", "self", ",", "request", ")", ":", "if", "not", "request", ".", "user", ".", "has_perm", "(", "'experiments.change_experiment'", ")", ":", "return", "HttpResponseForbidden", "(", ")", "try", ":", "state", "=", "int", "(", "request", ".", "POST", ".", "get", "(", "\"state\"", ",", "\"\"", ")", ")", "except", "ValueError", ":", "return", "HttpResponseBadRequest", "(", ")", "try", ":", "experiment", "=", "Experiment", ".", "objects", ".", "get", "(", "name", "=", "request", ".", "POST", ".", "get", "(", "\"experiment\"", ")", ")", "except", "Experiment", ".", "DoesNotExist", ":", "return", "HttpResponseBadRequest", "(", ")", "experiment", ".", "state", "=", "state", "if", "state", "==", "0", ":", "experiment", ".", "end_date", "=", "timezone", ".", "now", "(", ")", "else", ":", "experiment", ".", "end_date", "=", "None", "experiment", ".", "save", "(", ")", "return", "HttpResponse", "(", ")" ]
Changes the experiment state
[ "Changes", "the", "experiment", "state" ]
python
train
deepmipt/DeepPavlov
deeppavlov/models/spelling_correction/levenshtein/tabled_trie.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/spelling_correction/levenshtein/tabled_trie.py#L139-L160
def words(self): """ Возвращает итератор по словам, содержащимся в боре """ branch, word, indexes = [self.root], [], [0] letters_with_children = [self._get_children_and_letters(self.root)] while len(branch) > 0: if self.is_final(branch[-1]): yield "".join(word) while indexes[-1] == len(letters_with_children[-1]): indexes.pop() letters_with_children.pop() branch.pop() if len(indexes) == 0: raise StopIteration() word.pop() next_letter, next_child = letters_with_children[-1][indexes[-1]] indexes[-1] += 1 indexes.append(0) word.append(next_letter) branch.append(next_child) letters_with_children.append(self._get_children_and_letters(branch[-1]))
[ "def", "words", "(", "self", ")", ":", "branch", ",", "word", ",", "indexes", "=", "[", "self", ".", "root", "]", ",", "[", "]", ",", "[", "0", "]", "letters_with_children", "=", "[", "self", ".", "_get_children_and_letters", "(", "self", ".", "root", ")", "]", "while", "len", "(", "branch", ")", ">", "0", ":", "if", "self", ".", "is_final", "(", "branch", "[", "-", "1", "]", ")", ":", "yield", "\"\"", ".", "join", "(", "word", ")", "while", "indexes", "[", "-", "1", "]", "==", "len", "(", "letters_with_children", "[", "-", "1", "]", ")", ":", "indexes", ".", "pop", "(", ")", "letters_with_children", ".", "pop", "(", ")", "branch", ".", "pop", "(", ")", "if", "len", "(", "indexes", ")", "==", "0", ":", "raise", "StopIteration", "(", ")", "word", ".", "pop", "(", ")", "next_letter", ",", "next_child", "=", "letters_with_children", "[", "-", "1", "]", "[", "indexes", "[", "-", "1", "]", "]", "indexes", "[", "-", "1", "]", "+=", "1", "indexes", ".", "append", "(", "0", ")", "word", ".", "append", "(", "next_letter", ")", "branch", ".", "append", "(", "next_child", ")", "letters_with_children", ".", "append", "(", "self", ".", "_get_children_and_letters", "(", "branch", "[", "-", "1", "]", ")", ")" ]
Возвращает итератор по словам, содержащимся в боре
[ "Возвращает", "итератор", "по", "словам", "содержащимся", "в", "боре" ]
python
test
hwmrocker/smtplibaio
smtplibaio/smtp.py
https://github.com/hwmrocker/smtplibaio/blob/84ce8e45b7e706476739d0efcb416c18ecabbbb6/smtplibaio/smtp.py#L861-L913
async def _auth_cram_md5(self, username, password): """ Performs an authentication attemps using the CRAM-MD5 mechanism. Protocol: 1. Send 'AUTH CRAM-MD5' to server ; 2. If the server replies with a 334 return code, we can go on: 1) The challenge (sent by the server) is base64-decoded ; 2) The decoded challenge is hashed using HMAC-MD5 and the user password as key (shared secret) ; 3) The hashed challenge is converted to a string of lowercase hexadecimal digits ; 4) The username and a space character are prepended to the hex digits ; 5) The concatenation is base64-encoded and sent to the server. 6) If the server replies with a return code of 235, user is authenticated. Args: username (str): Identifier of the user trying to authenticate. password (str): Password for the user. Raises: ConnectionResetError: If the connection with the server is unexpectedely lost. SMTPAuthenticationError: If the authentication attempt fails. Returns: (int, str): A (code, message) 2-tuple containing the server response. """ mechanism = "CRAM-MD5" code, message = await self.do_cmd("AUTH", mechanism, success=(334,)) decoded_challenge = base64.b64decode(message) challenge_hash = hmac.new( key=password.encode("utf-8"), msg=decoded_challenge, digestmod="md5" ) hex_hash = challenge_hash.hexdigest() response = "{} {}".format(username, hex_hash) encoded_response = SMTP.b64enc(response) try: code, message = await self.do_cmd(encoded_response, success=(235, 503)) except SMTPCommandFailedError as e: raise SMTPAuthenticationError(e.code, e.message, mechanism) return code, message
[ "async", "def", "_auth_cram_md5", "(", "self", ",", "username", ",", "password", ")", ":", "mechanism", "=", "\"CRAM-MD5\"", "code", ",", "message", "=", "await", "self", ".", "do_cmd", "(", "\"AUTH\"", ",", "mechanism", ",", "success", "=", "(", "334", ",", ")", ")", "decoded_challenge", "=", "base64", ".", "b64decode", "(", "message", ")", "challenge_hash", "=", "hmac", ".", "new", "(", "key", "=", "password", ".", "encode", "(", "\"utf-8\"", ")", ",", "msg", "=", "decoded_challenge", ",", "digestmod", "=", "\"md5\"", ")", "hex_hash", "=", "challenge_hash", ".", "hexdigest", "(", ")", "response", "=", "\"{} {}\"", ".", "format", "(", "username", ",", "hex_hash", ")", "encoded_response", "=", "SMTP", ".", "b64enc", "(", "response", ")", "try", ":", "code", ",", "message", "=", "await", "self", ".", "do_cmd", "(", "encoded_response", ",", "success", "=", "(", "235", ",", "503", ")", ")", "except", "SMTPCommandFailedError", "as", "e", ":", "raise", "SMTPAuthenticationError", "(", "e", ".", "code", ",", "e", ".", "message", ",", "mechanism", ")", "return", "code", ",", "message" ]
Performs an authentication attemps using the CRAM-MD5 mechanism. Protocol: 1. Send 'AUTH CRAM-MD5' to server ; 2. If the server replies with a 334 return code, we can go on: 1) The challenge (sent by the server) is base64-decoded ; 2) The decoded challenge is hashed using HMAC-MD5 and the user password as key (shared secret) ; 3) The hashed challenge is converted to a string of lowercase hexadecimal digits ; 4) The username and a space character are prepended to the hex digits ; 5) The concatenation is base64-encoded and sent to the server. 6) If the server replies with a return code of 235, user is authenticated. Args: username (str): Identifier of the user trying to authenticate. password (str): Password for the user. Raises: ConnectionResetError: If the connection with the server is unexpectedely lost. SMTPAuthenticationError: If the authentication attempt fails. Returns: (int, str): A (code, message) 2-tuple containing the server response.
[ "Performs", "an", "authentication", "attemps", "using", "the", "CRAM", "-", "MD5", "mechanism", "." ]
python
train
secdev/scapy
scapy/contrib/isotp.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/isotp.py#L1110-L1126
def _recv_sf(self, data): """Process a received 'Single Frame' frame""" self.rx_timer.cancel() if self.rx_state != ISOTP_IDLE: warning("RX state was reset because single frame was received") self.rx_state = ISOTP_IDLE length = six.indexbytes(data, 0) & 0xf if len(data) - 1 < length: return 1 msg = data[1:1 + length] self.rx_queue.put(msg) for cb in self.rx_callbacks: cb(msg) self.call_release() return 0
[ "def", "_recv_sf", "(", "self", ",", "data", ")", ":", "self", ".", "rx_timer", ".", "cancel", "(", ")", "if", "self", ".", "rx_state", "!=", "ISOTP_IDLE", ":", "warning", "(", "\"RX state was reset because single frame was received\"", ")", "self", ".", "rx_state", "=", "ISOTP_IDLE", "length", "=", "six", ".", "indexbytes", "(", "data", ",", "0", ")", "&", "0xf", "if", "len", "(", "data", ")", "-", "1", "<", "length", ":", "return", "1", "msg", "=", "data", "[", "1", ":", "1", "+", "length", "]", "self", ".", "rx_queue", ".", "put", "(", "msg", ")", "for", "cb", "in", "self", ".", "rx_callbacks", ":", "cb", "(", "msg", ")", "self", ".", "call_release", "(", ")", "return", "0" ]
Process a received 'Single Frame' frame
[ "Process", "a", "received", "Single", "Frame", "frame" ]
python
train
newville/wxmplot
wxmplot/stackedplotframe.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/stackedplotframe.py#L63-L67
def unzoom(self, event=None, panel='top'): """zoom out 1 level, or to full data range """ panel = self.get_panel(panel) panel.conf.unzoom(event=event) self.panel.set_viewlimits()
[ "def", "unzoom", "(", "self", ",", "event", "=", "None", ",", "panel", "=", "'top'", ")", ":", "panel", "=", "self", ".", "get_panel", "(", "panel", ")", "panel", ".", "conf", ".", "unzoom", "(", "event", "=", "event", ")", "self", ".", "panel", ".", "set_viewlimits", "(", ")" ]
zoom out 1 level, or to full data range
[ "zoom", "out", "1", "level", "or", "to", "full", "data", "range" ]
python
train
totalgood/twip
twip/tweets.py
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/tweets.py#L69-L76
def get_twitter(app_key=None, app_secret=None, search='python', location='', **kwargs): """Location may be specified with a string name or latitude, longitude, radius""" if not app_key: from settings_secret import TWITTER_API_KEY as app_key if not app_secret: from settings_secret import TWITTER_API_SECRET as app_secret twitter = Twython(app_key, app_secret, oauth_version=2) return Twython(app_key, access_token=twitter.obtain_access_token())
[ "def", "get_twitter", "(", "app_key", "=", "None", ",", "app_secret", "=", "None", ",", "search", "=", "'python'", ",", "location", "=", "''", ",", "*", "*", "kwargs", ")", ":", "if", "not", "app_key", ":", "from", "settings_secret", "import", "TWITTER_API_KEY", "as", "app_key", "if", "not", "app_secret", ":", "from", "settings_secret", "import", "TWITTER_API_SECRET", "as", "app_secret", "twitter", "=", "Twython", "(", "app_key", ",", "app_secret", ",", "oauth_version", "=", "2", ")", "return", "Twython", "(", "app_key", ",", "access_token", "=", "twitter", ".", "obtain_access_token", "(", ")", ")" ]
Location may be specified with a string name or latitude, longitude, radius
[ "Location", "may", "be", "specified", "with", "a", "string", "name", "or", "latitude", "longitude", "radius" ]
python
train
mitsei/dlkit
dlkit/json_/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/sessions.py#L2644-L2670
def get_activities_by_genus_type(self, activity_genus_type): """Gets an ``ActivityList`` corresponding to the given activity genus ``Type`` which does not include activities of genus types derived from the specified ``Type``. In plenary mode, the returned list contains all known activities or an error results. Otherwise, the returned list may contain only those activities that are accessible through this session. arg: activity_genus_type (osid.type.Type): an activity genus type return: (osid.learning.ActivityList) - the returned ``Activity`` list raise: NullArgument - ``activity_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_genus_type # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('learning', collection='Activity', runtime=self._runtime) result = collection.find( dict({'genusTypeId': str(activity_genus_type)}, **self._view_filter())).sort('_id', DESCENDING) return objects.ActivityList(result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_activities_by_genus_type", "(", "self", ",", "activity_genus_type", ")", ":", "# Implemented from template for", "# osid.resource.ResourceLookupSession.get_resources_by_genus_type", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONClientValidated", "(", "'learning'", ",", "collection", "=", "'Activity'", ",", "runtime", "=", "self", ".", "_runtime", ")", "result", "=", "collection", ".", "find", "(", "dict", "(", "{", "'genusTypeId'", ":", "str", "(", "activity_genus_type", ")", "}", ",", "*", "*", "self", ".", "_view_filter", "(", ")", ")", ")", ".", "sort", "(", "'_id'", ",", "DESCENDING", ")", "return", "objects", ".", "ActivityList", "(", "result", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
Gets an ``ActivityList`` corresponding to the given activity genus ``Type`` which does not include activities of genus types derived from the specified ``Type``. In plenary mode, the returned list contains all known activities or an error results. Otherwise, the returned list may contain only those activities that are accessible through this session. arg: activity_genus_type (osid.type.Type): an activity genus type return: (osid.learning.ActivityList) - the returned ``Activity`` list raise: NullArgument - ``activity_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "an", "ActivityList", "corresponding", "to", "the", "given", "activity", "genus", "Type", "which", "does", "not", "include", "activities", "of", "genus", "types", "derived", "from", "the", "specified", "Type", "." ]
python
train
pycontribs/pyrax
pyrax/utils.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/utils.py#L470-L484
def wait_for_build(obj, att=None, desired=None, callback=None, interval=None, attempts=None, verbose=None, verbose_atts=None): """ Designed to handle the most common use case for wait_until: an object whose 'status' attribute will end up in either 'ACTIVE' or 'ERROR' state. Since builds don't happen very quickly, the interval will default to 20 seconds to avoid excess polling. """ att = att or "status" desired = desired or ["ACTIVE", "ERROR", "available", "COMPLETED"] interval = interval or 20 attempts = attempts or 0 verbose_atts = verbose_atts or "progress" return wait_until(obj, att, desired, callback=callback, interval=interval, attempts=attempts, verbose=verbose, verbose_atts=verbose_atts)
[ "def", "wait_for_build", "(", "obj", ",", "att", "=", "None", ",", "desired", "=", "None", ",", "callback", "=", "None", ",", "interval", "=", "None", ",", "attempts", "=", "None", ",", "verbose", "=", "None", ",", "verbose_atts", "=", "None", ")", ":", "att", "=", "att", "or", "\"status\"", "desired", "=", "desired", "or", "[", "\"ACTIVE\"", ",", "\"ERROR\"", ",", "\"available\"", ",", "\"COMPLETED\"", "]", "interval", "=", "interval", "or", "20", "attempts", "=", "attempts", "or", "0", "verbose_atts", "=", "verbose_atts", "or", "\"progress\"", "return", "wait_until", "(", "obj", ",", "att", ",", "desired", ",", "callback", "=", "callback", ",", "interval", "=", "interval", ",", "attempts", "=", "attempts", ",", "verbose", "=", "verbose", ",", "verbose_atts", "=", "verbose_atts", ")" ]
Designed to handle the most common use case for wait_until: an object whose 'status' attribute will end up in either 'ACTIVE' or 'ERROR' state. Since builds don't happen very quickly, the interval will default to 20 seconds to avoid excess polling.
[ "Designed", "to", "handle", "the", "most", "common", "use", "case", "for", "wait_until", ":", "an", "object", "whose", "status", "attribute", "will", "end", "up", "in", "either", "ACTIVE", "or", "ERROR", "state", ".", "Since", "builds", "don", "t", "happen", "very", "quickly", "the", "interval", "will", "default", "to", "20", "seconds", "to", "avoid", "excess", "polling", "." ]
python
train
kpdyer/regex2dfa
third_party/re2/lib/codereview/codereview.py
https://github.com/kpdyer/regex2dfa/blob/109f877e60ef0dfcb430f11516d215930b7b9936/third_party/re2/lib/codereview/codereview.py#L3449-L3459
def GetUnknownFiles(self): """Return a list of files unknown to the VCS.""" args = [] status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."], silent_ok=True) unknown_files = [] for line in status.splitlines(): st, fn = line.split(" ", 1) if st == "?": unknown_files.append(fn) return unknown_files
[ "def", "GetUnknownFiles", "(", "self", ")", ":", "args", "=", "[", "]", "status", "=", "RunShell", "(", "[", "\"hg\"", ",", "\"status\"", ",", "\"--rev\"", ",", "self", ".", "base_rev", ",", "\"-u\"", ",", "\".\"", "]", ",", "silent_ok", "=", "True", ")", "unknown_files", "=", "[", "]", "for", "line", "in", "status", ".", "splitlines", "(", ")", ":", "st", ",", "fn", "=", "line", ".", "split", "(", "\" \"", ",", "1", ")", "if", "st", "==", "\"?\"", ":", "unknown_files", ".", "append", "(", "fn", ")", "return", "unknown_files" ]
Return a list of files unknown to the VCS.
[ "Return", "a", "list", "of", "files", "unknown", "to", "the", "VCS", "." ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/terminal/win32_output.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/terminal/win32_output.py#L312-L338
def scroll_buffer_to_prompt(self): """ To be called before drawing the prompt. This should scroll the console to left, with the cursor at the bottom (if possible). """ # Get current window size info = self.get_win32_screen_buffer_info() sr = info.srWindow cursor_pos = info.dwCursorPosition result = SMALL_RECT() # Scroll to the left. result.Left = 0 result.Right = sr.Right - sr.Left # Scroll vertical win_height = sr.Bottom - sr.Top if 0 < sr.Bottom - cursor_pos.Y < win_height - 1: # no vertical scroll if cursor already on the screen result.Bottom = sr.Bottom else: result.Bottom = max(win_height, cursor_pos.Y) result.Top = result.Bottom - win_height # Scroll API self._winapi(windll.kernel32.SetConsoleWindowInfo, self.hconsole, True, byref(result))
[ "def", "scroll_buffer_to_prompt", "(", "self", ")", ":", "# Get current window size", "info", "=", "self", ".", "get_win32_screen_buffer_info", "(", ")", "sr", "=", "info", ".", "srWindow", "cursor_pos", "=", "info", ".", "dwCursorPosition", "result", "=", "SMALL_RECT", "(", ")", "# Scroll to the left.", "result", ".", "Left", "=", "0", "result", ".", "Right", "=", "sr", ".", "Right", "-", "sr", ".", "Left", "# Scroll vertical", "win_height", "=", "sr", ".", "Bottom", "-", "sr", ".", "Top", "if", "0", "<", "sr", ".", "Bottom", "-", "cursor_pos", ".", "Y", "<", "win_height", "-", "1", ":", "# no vertical scroll if cursor already on the screen", "result", ".", "Bottom", "=", "sr", ".", "Bottom", "else", ":", "result", ".", "Bottom", "=", "max", "(", "win_height", ",", "cursor_pos", ".", "Y", ")", "result", ".", "Top", "=", "result", ".", "Bottom", "-", "win_height", "# Scroll API", "self", ".", "_winapi", "(", "windll", ".", "kernel32", ".", "SetConsoleWindowInfo", ",", "self", ".", "hconsole", ",", "True", ",", "byref", "(", "result", ")", ")" ]
To be called before drawing the prompt. This should scroll the console to left, with the cursor at the bottom (if possible).
[ "To", "be", "called", "before", "drawing", "the", "prompt", ".", "This", "should", "scroll", "the", "console", "to", "left", "with", "the", "cursor", "at", "the", "bottom", "(", "if", "possible", ")", "." ]
python
train
benmack/eo-box
eobox/raster/rasterprocessing.py
https://github.com/benmack/eo-box/blob/a291450c766bf50ea06adcdeb5729a4aad790ed5/eobox/raster/rasterprocessing.py#L115-L131
def windows_df(self): """Get Windows (W) W-row, W-col and W-index of windows e.g. loaded with :meth:`block_windows` as a dataframe. Returns: [dataframe] -- A dataframe with the window information and indices (row, col, index). """ import pandas as pd if self.windows is None: raise Exception("You need to call the block_windows or windows before.") df_wins = [] for row, col, win in zip(self.windows_row, self.windows_col, self.windows): df_wins.append(pd.DataFrame({"row":[row], "col":[col], "Window":[win]})) df_wins = pd.concat(df_wins).set_index(["row", "col"]) df_wins["window_index"] = range(df_wins.shape[0]) df_wins = df_wins.sort_index() return df_wins
[ "def", "windows_df", "(", "self", ")", ":", "import", "pandas", "as", "pd", "if", "self", ".", "windows", "is", "None", ":", "raise", "Exception", "(", "\"You need to call the block_windows or windows before.\"", ")", "df_wins", "=", "[", "]", "for", "row", ",", "col", ",", "win", "in", "zip", "(", "self", ".", "windows_row", ",", "self", ".", "windows_col", ",", "self", ".", "windows", ")", ":", "df_wins", ".", "append", "(", "pd", ".", "DataFrame", "(", "{", "\"row\"", ":", "[", "row", "]", ",", "\"col\"", ":", "[", "col", "]", ",", "\"Window\"", ":", "[", "win", "]", "}", ")", ")", "df_wins", "=", "pd", ".", "concat", "(", "df_wins", ")", ".", "set_index", "(", "[", "\"row\"", ",", "\"col\"", "]", ")", "df_wins", "[", "\"window_index\"", "]", "=", "range", "(", "df_wins", ".", "shape", "[", "0", "]", ")", "df_wins", "=", "df_wins", ".", "sort_index", "(", ")", "return", "df_wins" ]
Get Windows (W) W-row, W-col and W-index of windows e.g. loaded with :meth:`block_windows` as a dataframe. Returns: [dataframe] -- A dataframe with the window information and indices (row, col, index).
[ "Get", "Windows", "(", "W", ")", "W", "-", "row", "W", "-", "col", "and", "W", "-", "index", "of", "windows", "e", ".", "g", ".", "loaded", "with", ":", "meth", ":", "block_windows", "as", "a", "dataframe", "." ]
python
train
mozilla/treeherder
treeherder/autoclassify/matchers.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/matchers.py#L56-L125
def elasticsearch_matcher(text_log_error): """ Query Elasticsearch and score the results. Uses a filtered search checking test, status, expected, and the message as a phrase query with non-alphabet tokens removed. """ # Note: Elasticsearch is currently disabled in all environments (see bug 1527868). if not settings.ELASTICSEARCH_URL: return [] failure_line = text_log_error.metadata.failure_line if failure_line.action != "test_result" or not failure_line.message: logger.debug("Skipped elasticsearch matching") return filters = [ {'term': {'test': failure_line.test}}, {'term': {'status': failure_line.status}}, {'term': {'expected': failure_line.expected}}, {'exists': {'field': 'best_classification'}} ] if failure_line.subtest: query = filters.append({'term': {'subtest': failure_line.subtest}}) query = { 'query': { 'bool': { 'filter': filters, 'must': [{ 'match_phrase': { 'message': failure_line.message[:1024], }, }], }, }, } try: results = search(query) except Exception: logger.error("Elasticsearch lookup failed: %s %s %s %s %s", failure_line.test, failure_line.subtest, failure_line.status, failure_line.expected, failure_line.message) raise if len(results) > 1: args = ( text_log_error.id, failure_line.id, len(results), ) logger.info('text_log_error=%i failure_line=%i Elasticsearch produced %i results' % args) newrelic.agent.record_custom_event('es_matches', { 'num_results': len(results), 'text_log_error_id': text_log_error.id, 'failure_line_id': failure_line.id, }) scorer = MatchScorer(failure_line.message) matches = [(item, item['message']) for item in results] best_match = scorer.best_match(matches) if not best_match: return score, es_result = best_match # TODO: score all results and return # TODO: just return results with score above cut off? return [(score, es_result['best_classification'])]
[ "def", "elasticsearch_matcher", "(", "text_log_error", ")", ":", "# Note: Elasticsearch is currently disabled in all environments (see bug 1527868).", "if", "not", "settings", ".", "ELASTICSEARCH_URL", ":", "return", "[", "]", "failure_line", "=", "text_log_error", ".", "metadata", ".", "failure_line", "if", "failure_line", ".", "action", "!=", "\"test_result\"", "or", "not", "failure_line", ".", "message", ":", "logger", ".", "debug", "(", "\"Skipped elasticsearch matching\"", ")", "return", "filters", "=", "[", "{", "'term'", ":", "{", "'test'", ":", "failure_line", ".", "test", "}", "}", ",", "{", "'term'", ":", "{", "'status'", ":", "failure_line", ".", "status", "}", "}", ",", "{", "'term'", ":", "{", "'expected'", ":", "failure_line", ".", "expected", "}", "}", ",", "{", "'exists'", ":", "{", "'field'", ":", "'best_classification'", "}", "}", "]", "if", "failure_line", ".", "subtest", ":", "query", "=", "filters", ".", "append", "(", "{", "'term'", ":", "{", "'subtest'", ":", "failure_line", ".", "subtest", "}", "}", ")", "query", "=", "{", "'query'", ":", "{", "'bool'", ":", "{", "'filter'", ":", "filters", ",", "'must'", ":", "[", "{", "'match_phrase'", ":", "{", "'message'", ":", "failure_line", ".", "message", "[", ":", "1024", "]", ",", "}", ",", "}", "]", ",", "}", ",", "}", ",", "}", "try", ":", "results", "=", "search", "(", "query", ")", "except", "Exception", ":", "logger", ".", "error", "(", "\"Elasticsearch lookup failed: %s %s %s %s %s\"", ",", "failure_line", ".", "test", ",", "failure_line", ".", "subtest", ",", "failure_line", ".", "status", ",", "failure_line", ".", "expected", ",", "failure_line", ".", "message", ")", "raise", "if", "len", "(", "results", ")", ">", "1", ":", "args", "=", "(", "text_log_error", ".", "id", ",", "failure_line", ".", "id", ",", "len", "(", "results", ")", ",", ")", "logger", ".", "info", "(", "'text_log_error=%i failure_line=%i Elasticsearch produced %i results'", "%", "args", ")", "newrelic", ".", "agent", ".", "record_custom_event", "(", "'es_matches'", ",", "{", "'num_results'", ":", "len", "(", "results", ")", ",", "'text_log_error_id'", ":", "text_log_error", ".", "id", ",", "'failure_line_id'", ":", "failure_line", ".", "id", ",", "}", ")", "scorer", "=", "MatchScorer", "(", "failure_line", ".", "message", ")", "matches", "=", "[", "(", "item", ",", "item", "[", "'message'", "]", ")", "for", "item", "in", "results", "]", "best_match", "=", "scorer", ".", "best_match", "(", "matches", ")", "if", "not", "best_match", ":", "return", "score", ",", "es_result", "=", "best_match", "# TODO: score all results and return", "# TODO: just return results with score above cut off?", "return", "[", "(", "score", ",", "es_result", "[", "'best_classification'", "]", ")", "]" ]
Query Elasticsearch and score the results. Uses a filtered search checking test, status, expected, and the message as a phrase query with non-alphabet tokens removed.
[ "Query", "Elasticsearch", "and", "score", "the", "results", "." ]
python
train
the01/python-paps
paps/si/app/sensorServer.py
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorServer.py#L96-L147
def _do_join_packet(self, packet, ip, port): """ React to join packet - add a client to this server :param packet: Packet from client that wants to join :type packet: paps.si.app.message.APPJoinMessage :param ip: Client ip address :type ip: unicode :param port: Client port :type port: int :rtype: None """ self.debug("()") device_id = packet.header.device_id key = u"{}:{}".format(ip, port) if device_id == Id.REQUEST: device_id = self._new_device_id(key) client = self._clients.get(device_id, {}) data = {} if packet.payload: try: data = packet.payload except: data = {} client['device_id'] = device_id client['key'] = key people = [] try: for index, person_dict in enumerate(data['people']): person = Person() person.from_dict(person_dict) person.id = u"{}.{}".format(device_id, person.id) # To get original id -> id.split('.')[0] people.append(person) self.changer.on_person_new(people) except: self.exception("Failed to update people") return # Original ids (without device id) client['people'] = people # Add config to client data? client_dict = dict(client) del client_dict['people'] self._send_packet(ip, port, APPConfigMessage(payload=client_dict)) self._clients[device_id] = client self._key2deviceId[key] = device_id
[ "def", "_do_join_packet", "(", "self", ",", "packet", ",", "ip", ",", "port", ")", ":", "self", ".", "debug", "(", "\"()\"", ")", "device_id", "=", "packet", ".", "header", ".", "device_id", "key", "=", "u\"{}:{}\"", ".", "format", "(", "ip", ",", "port", ")", "if", "device_id", "==", "Id", ".", "REQUEST", ":", "device_id", "=", "self", ".", "_new_device_id", "(", "key", ")", "client", "=", "self", ".", "_clients", ".", "get", "(", "device_id", ",", "{", "}", ")", "data", "=", "{", "}", "if", "packet", ".", "payload", ":", "try", ":", "data", "=", "packet", ".", "payload", "except", ":", "data", "=", "{", "}", "client", "[", "'device_id'", "]", "=", "device_id", "client", "[", "'key'", "]", "=", "key", "people", "=", "[", "]", "try", ":", "for", "index", ",", "person_dict", "in", "enumerate", "(", "data", "[", "'people'", "]", ")", ":", "person", "=", "Person", "(", ")", "person", ".", "from_dict", "(", "person_dict", ")", "person", ".", "id", "=", "u\"{}.{}\"", ".", "format", "(", "device_id", ",", "person", ".", "id", ")", "# To get original id -> id.split('.')[0]", "people", ".", "append", "(", "person", ")", "self", ".", "changer", ".", "on_person_new", "(", "people", ")", "except", ":", "self", ".", "exception", "(", "\"Failed to update people\"", ")", "return", "# Original ids (without device id)", "client", "[", "'people'", "]", "=", "people", "# Add config to client data?", "client_dict", "=", "dict", "(", "client", ")", "del", "client_dict", "[", "'people'", "]", "self", ".", "_send_packet", "(", "ip", ",", "port", ",", "APPConfigMessage", "(", "payload", "=", "client_dict", ")", ")", "self", ".", "_clients", "[", "device_id", "]", "=", "client", "self", ".", "_key2deviceId", "[", "key", "]", "=", "device_id" ]
React to join packet - add a client to this server :param packet: Packet from client that wants to join :type packet: paps.si.app.message.APPJoinMessage :param ip: Client ip address :type ip: unicode :param port: Client port :type port: int :rtype: None
[ "React", "to", "join", "packet", "-", "add", "a", "client", "to", "this", "server" ]
python
train
MattParr/python-atws
atws/picklist.py
https://github.com/MattParr/python-atws/blob/2128baf85d00dcc290ecf911d6c636ac0abe5f33/atws/picklist.py#L208-L219
def lookup(self, label): ''' take a field_name_label and return the id''' if self.is_child: try: return self._children[label] except KeyError: self._children[label] = ChildFieldPicklist(self.parent, label, self.field_name) return self._children[label] else: return get_label_value(label, self._picklist)
[ "def", "lookup", "(", "self", ",", "label", ")", ":", "if", "self", ".", "is_child", ":", "try", ":", "return", "self", ".", "_children", "[", "label", "]", "except", "KeyError", ":", "self", ".", "_children", "[", "label", "]", "=", "ChildFieldPicklist", "(", "self", ".", "parent", ",", "label", ",", "self", ".", "field_name", ")", "return", "self", ".", "_children", "[", "label", "]", "else", ":", "return", "get_label_value", "(", "label", ",", "self", ".", "_picklist", ")" ]
take a field_name_label and return the id
[ "take", "a", "field_name_label", "and", "return", "the", "id" ]
python
train
BerkeleyAutomation/visualization
visualization/visualizer3d.py
https://github.com/BerkeleyAutomation/visualization/blob/f8d038cc65c78f841ef27f99fb2a638f44fa72b6/visualization/visualizer3d.py#L374-L398
def pose(T_frame_world, alpha=0.1, tube_radius=0.005, center_scale=0.01): """Plot a 3D pose as a set of axes (x red, y green, z blue). Parameters ---------- T_frame_world : autolab_core.RigidTransform The pose relative to world coordinates. alpha : float Length of plotted x,y,z axes. tube_radius : float Radius of plotted x,y,z axes. center_scale : float Radius of the pose's origin ball. """ R = T_frame_world.rotation t = T_frame_world.translation x_axis_tf = np.array([t, t + alpha * R[:,0]]) y_axis_tf = np.array([t, t + alpha * R[:,1]]) z_axis_tf = np.array([t, t + alpha * R[:,2]]) Visualizer3D.points(t, color=(1,1,1), scale=center_scale) Visualizer3D.plot3d(x_axis_tf, color=(1,0,0), tube_radius=tube_radius) Visualizer3D.plot3d(y_axis_tf, color=(0,1,0), tube_radius=tube_radius) Visualizer3D.plot3d(z_axis_tf, color=(0,0,1), tube_radius=tube_radius)
[ "def", "pose", "(", "T_frame_world", ",", "alpha", "=", "0.1", ",", "tube_radius", "=", "0.005", ",", "center_scale", "=", "0.01", ")", ":", "R", "=", "T_frame_world", ".", "rotation", "t", "=", "T_frame_world", ".", "translation", "x_axis_tf", "=", "np", ".", "array", "(", "[", "t", ",", "t", "+", "alpha", "*", "R", "[", ":", ",", "0", "]", "]", ")", "y_axis_tf", "=", "np", ".", "array", "(", "[", "t", ",", "t", "+", "alpha", "*", "R", "[", ":", ",", "1", "]", "]", ")", "z_axis_tf", "=", "np", ".", "array", "(", "[", "t", ",", "t", "+", "alpha", "*", "R", "[", ":", ",", "2", "]", "]", ")", "Visualizer3D", ".", "points", "(", "t", ",", "color", "=", "(", "1", ",", "1", ",", "1", ")", ",", "scale", "=", "center_scale", ")", "Visualizer3D", ".", "plot3d", "(", "x_axis_tf", ",", "color", "=", "(", "1", ",", "0", ",", "0", ")", ",", "tube_radius", "=", "tube_radius", ")", "Visualizer3D", ".", "plot3d", "(", "y_axis_tf", ",", "color", "=", "(", "0", ",", "1", ",", "0", ")", ",", "tube_radius", "=", "tube_radius", ")", "Visualizer3D", ".", "plot3d", "(", "z_axis_tf", ",", "color", "=", "(", "0", ",", "0", ",", "1", ")", ",", "tube_radius", "=", "tube_radius", ")" ]
Plot a 3D pose as a set of axes (x red, y green, z blue). Parameters ---------- T_frame_world : autolab_core.RigidTransform The pose relative to world coordinates. alpha : float Length of plotted x,y,z axes. tube_radius : float Radius of plotted x,y,z axes. center_scale : float Radius of the pose's origin ball.
[ "Plot", "a", "3D", "pose", "as", "a", "set", "of", "axes", "(", "x", "red", "y", "green", "z", "blue", ")", "." ]
python
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/reftrack/refobjinter.py
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/reftrack/refobjinter.py#L235-L256
def set_reference(self, refobj, reference): """Connect the given reftrack node with the given refernce node :param refobj: the reftrack node to update :type refobj: str :param reference: the reference node :type reference: str :returns: None :rtype: None :raises: None """ refnodeattr = "%s.referencenode" % refobj if reference: cmds.connectAttr("%s.message" % reference, refnodeattr, force=True) ns = cmds.referenceQuery(reference, namespace=True) cmds.setAttr("%s.namespace" % refobj, ns, type="string") else: conns = cmds.listConnections(refnodeattr, plugs=True) if not conns: return for c in conns: cmds.disconnectAttr(c, refnodeattr)
[ "def", "set_reference", "(", "self", ",", "refobj", ",", "reference", ")", ":", "refnodeattr", "=", "\"%s.referencenode\"", "%", "refobj", "if", "reference", ":", "cmds", ".", "connectAttr", "(", "\"%s.message\"", "%", "reference", ",", "refnodeattr", ",", "force", "=", "True", ")", "ns", "=", "cmds", ".", "referenceQuery", "(", "reference", ",", "namespace", "=", "True", ")", "cmds", ".", "setAttr", "(", "\"%s.namespace\"", "%", "refobj", ",", "ns", ",", "type", "=", "\"string\"", ")", "else", ":", "conns", "=", "cmds", ".", "listConnections", "(", "refnodeattr", ",", "plugs", "=", "True", ")", "if", "not", "conns", ":", "return", "for", "c", "in", "conns", ":", "cmds", ".", "disconnectAttr", "(", "c", ",", "refnodeattr", ")" ]
Connect the given reftrack node with the given refernce node :param refobj: the reftrack node to update :type refobj: str :param reference: the reference node :type reference: str :returns: None :rtype: None :raises: None
[ "Connect", "the", "given", "reftrack", "node", "with", "the", "given", "refernce", "node" ]
python
train
duniter/duniter-python-api
duniterpy/helpers.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/helpers.py#L5-L15
def ensure_bytes(data: Union[str, bytes]) -> bytes: """ Convert data in bytes if data is a string :param data: Data :rtype bytes: """ if isinstance(data, str): return bytes(data, 'utf-8') return data
[ "def", "ensure_bytes", "(", "data", ":", "Union", "[", "str", ",", "bytes", "]", ")", "->", "bytes", ":", "if", "isinstance", "(", "data", ",", "str", ")", ":", "return", "bytes", "(", "data", ",", "'utf-8'", ")", "return", "data" ]
Convert data in bytes if data is a string :param data: Data :rtype bytes:
[ "Convert", "data", "in", "bytes", "if", "data", "is", "a", "string" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/trax/trainer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trainer.py#L48-L62
def _default_output_dir(): """Default output directory.""" try: dataset_name = gin.query_parameter("inputs.dataset_name") except ValueError: dataset_name = "random" dir_name = "{model_name}_{dataset_name}_{timestamp}".format( model_name=gin.query_parameter("train.model").configurable.name, dataset_name=dataset_name, timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M"), ) dir_path = os.path.join("~", "trax", dir_name) print() trax.log("No --output_dir specified") return dir_path
[ "def", "_default_output_dir", "(", ")", ":", "try", ":", "dataset_name", "=", "gin", ".", "query_parameter", "(", "\"inputs.dataset_name\"", ")", "except", "ValueError", ":", "dataset_name", "=", "\"random\"", "dir_name", "=", "\"{model_name}_{dataset_name}_{timestamp}\"", ".", "format", "(", "model_name", "=", "gin", ".", "query_parameter", "(", "\"train.model\"", ")", ".", "configurable", ".", "name", ",", "dataset_name", "=", "dataset_name", ",", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y%m%d_%H%M\"", ")", ",", ")", "dir_path", "=", "os", ".", "path", ".", "join", "(", "\"~\"", ",", "\"trax\"", ",", "dir_name", ")", "print", "(", ")", "trax", ".", "log", "(", "\"No --output_dir specified\"", ")", "return", "dir_path" ]
Default output directory.
[ "Default", "output", "directory", "." ]
python
train
taskcluster/taskcluster-client.py
taskcluster/aio/hooks.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/hooks.py#L157-L174
async def triggerHook(self, *args, **kwargs): """ Trigger a hook This endpoint will trigger the creation of a task from a hook definition. The HTTP payload must match the hooks `triggerSchema`. If it does, it is provided as the `payload` property of the JSON-e context used to render the task template. This method takes input: ``v1/trigger-hook.json#`` This method gives output: ``v1/trigger-hook-response.json#`` This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["triggerHook"], *args, **kwargs)
[ "async", "def", "triggerHook", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"triggerHook\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Trigger a hook This endpoint will trigger the creation of a task from a hook definition. The HTTP payload must match the hooks `triggerSchema`. If it does, it is provided as the `payload` property of the JSON-e context used to render the task template. This method takes input: ``v1/trigger-hook.json#`` This method gives output: ``v1/trigger-hook-response.json#`` This method is ``stable``
[ "Trigger", "a", "hook" ]
python
train
SmokinCaterpillar/pypet
pypet/parameter.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/parameter.py#L2466-L2472
def _store(self): """Returns a dictionary containing pickle dumps""" store_dict = {} for key, val in self._data.items(): store_dict[key] = pickle.dumps(val, protocol=self.v_protocol) store_dict[PickleResult.PROTOCOL] = self.v_protocol return store_dict
[ "def", "_store", "(", "self", ")", ":", "store_dict", "=", "{", "}", "for", "key", ",", "val", "in", "self", ".", "_data", ".", "items", "(", ")", ":", "store_dict", "[", "key", "]", "=", "pickle", ".", "dumps", "(", "val", ",", "protocol", "=", "self", ".", "v_protocol", ")", "store_dict", "[", "PickleResult", ".", "PROTOCOL", "]", "=", "self", ".", "v_protocol", "return", "store_dict" ]
Returns a dictionary containing pickle dumps
[ "Returns", "a", "dictionary", "containing", "pickle", "dumps" ]
python
test
google/pyringe
pyringe/payload/gdb_service.py
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L557-L587
def _Inject(self, position, call): """Injects evaluation of 'call' in a safe location in the inferior. Due to the way these injected function calls work, gdb must not be killed until the call has returned. If that happens, the inferior will be sent SIGTRAP upon attempting to return from the dummy frame gdb constructs for us, and will most probably crash. Args: position: array of pid, tid, framedepth specifying the requested position. call: Any expression gdb can evaluate. Usually a function call. Raises: RuntimeError: if gdb is not being run in synchronous exec mode. """ self.EnsureGdbPosition(position[0], position[1], None) self.ClearBreakpoints() self._AddThreadSpecificBreakpoint(position) gdb.parse_and_eval('%s = 1' % GdbCache.PENDINGCALLS_TO_DO) gdb.parse_and_eval('%s = 1' % GdbCache.PENDINGBUSY) try: # We're "armed", risk the blocking call to Continue self.Continue(position) # Breakpoint was hit! if not gdb.selected_thread().is_stopped(): # This should not happen. Depending on how gdb is being used, the # semantics of self.Continue change, so I'd rather leave this check in # here, in case we ever *do* end up changing to async mode. raise RuntimeError('Gdb is not acting as expected, is it being run in ' 'async mode?') finally: gdb.parse_and_eval('%s = 0' % GdbCache.PENDINGBUSY) self.Call(position, call)
[ "def", "_Inject", "(", "self", ",", "position", ",", "call", ")", ":", "self", ".", "EnsureGdbPosition", "(", "position", "[", "0", "]", ",", "position", "[", "1", "]", ",", "None", ")", "self", ".", "ClearBreakpoints", "(", ")", "self", ".", "_AddThreadSpecificBreakpoint", "(", "position", ")", "gdb", ".", "parse_and_eval", "(", "'%s = 1'", "%", "GdbCache", ".", "PENDINGCALLS_TO_DO", ")", "gdb", ".", "parse_and_eval", "(", "'%s = 1'", "%", "GdbCache", ".", "PENDINGBUSY", ")", "try", ":", "# We're \"armed\", risk the blocking call to Continue", "self", ".", "Continue", "(", "position", ")", "# Breakpoint was hit!", "if", "not", "gdb", ".", "selected_thread", "(", ")", ".", "is_stopped", "(", ")", ":", "# This should not happen. Depending on how gdb is being used, the", "# semantics of self.Continue change, so I'd rather leave this check in", "# here, in case we ever *do* end up changing to async mode.", "raise", "RuntimeError", "(", "'Gdb is not acting as expected, is it being run in '", "'async mode?'", ")", "finally", ":", "gdb", ".", "parse_and_eval", "(", "'%s = 0'", "%", "GdbCache", ".", "PENDINGBUSY", ")", "self", ".", "Call", "(", "position", ",", "call", ")" ]
Injects evaluation of 'call' in a safe location in the inferior. Due to the way these injected function calls work, gdb must not be killed until the call has returned. If that happens, the inferior will be sent SIGTRAP upon attempting to return from the dummy frame gdb constructs for us, and will most probably crash. Args: position: array of pid, tid, framedepth specifying the requested position. call: Any expression gdb can evaluate. Usually a function call. Raises: RuntimeError: if gdb is not being run in synchronous exec mode.
[ "Injects", "evaluation", "of", "call", "in", "a", "safe", "location", "in", "the", "inferior", "." ]
python
train
Clarify/clarify_python
clarify_python/clarify.py
https://github.com/Clarify/clarify_python/blob/1a00a5e39f77af9ad7f2e08480a3ab14e7d72aeb/clarify_python/clarify.py#L533-L560
def delete_track_at_index(self, href=None, index=None): """Delete a track, or all the tracks. 'href' the relative href to the track list. May not be None. 'index' the index of the track to delete. If none is given, all tracks are deleted. Returns nothing. If the response status is not 204, throws an APIException.""" # Argument error checking. assert href is not None # Deal with any parameters that need to be passed in. data = None fields = {} if index is not None: fields['track'] = index if len(fields) > 0: data = fields raw_result = self.delete(href, data) if raw_result.status != 204: raise APIException(raw_result.status, raw_result.json)
[ "def", "delete_track_at_index", "(", "self", ",", "href", "=", "None", ",", "index", "=", "None", ")", ":", "# Argument error checking.", "assert", "href", "is", "not", "None", "# Deal with any parameters that need to be passed in.", "data", "=", "None", "fields", "=", "{", "}", "if", "index", "is", "not", "None", ":", "fields", "[", "'track'", "]", "=", "index", "if", "len", "(", "fields", ")", ">", "0", ":", "data", "=", "fields", "raw_result", "=", "self", ".", "delete", "(", "href", ",", "data", ")", "if", "raw_result", ".", "status", "!=", "204", ":", "raise", "APIException", "(", "raw_result", ".", "status", ",", "raw_result", ".", "json", ")" ]
Delete a track, or all the tracks. 'href' the relative href to the track list. May not be None. 'index' the index of the track to delete. If none is given, all tracks are deleted. Returns nothing. If the response status is not 204, throws an APIException.
[ "Delete", "a", "track", "or", "all", "the", "tracks", "." ]
python
train
saltstack/salt
salt/utils/network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L606-L627
def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask
[ "def", "cidr_to_ipv4_netmask", "(", "cidr_bits", ")", ":", "try", ":", "cidr_bits", "=", "int", "(", "cidr_bits", ")", "if", "not", "1", "<=", "cidr_bits", "<=", "32", ":", "return", "''", "except", "ValueError", ":", "return", "''", "netmask", "=", "''", "for", "idx", "in", "range", "(", "4", ")", ":", "if", "idx", ":", "netmask", "+=", "'.'", "if", "cidr_bits", ">=", "8", ":", "netmask", "+=", "'255'", "cidr_bits", "-=", "8", "else", ":", "netmask", "+=", "'{0:d}'", ".", "format", "(", "256", "-", "(", "2", "**", "(", "8", "-", "cidr_bits", ")", ")", ")", "cidr_bits", "=", "0", "return", "netmask" ]
Returns an IPv4 netmask
[ "Returns", "an", "IPv4", "netmask" ]
python
train
BlendedSiteGenerator/Blended
blended/__main__.py
https://github.com/BlendedSiteGenerator/Blended/blob/e5865a8633e461a22c86ef6ee98cdd7051c412ac/blended/__main__.py#L256-L280
def zip_built(outdir): """Packages the build folder into a zip""" print("Zipping the built files!") config_file_dir = os.path.join(cwd, "config.py") if not os.path.exists(config_file_dir): sys.exit( "There dosen't seem to be a configuration file. Have you run the init command?") else: sys.path.insert(0, cwd) try: from config import website_name except: sys.exit( "Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.") # Remove the build folder build_dir = os.path.join(cwd, outdir) zip_dir = os.path.join(cwd, website_name.replace(" ", "_") + "-build-" + str(datetime.now().date())) if os.path.exists(build_dir): shutil.make_archive(zip_dir, 'zip', build_dir) else: print("The " + outdir + "/ folder could not be found! Have you run 'blended build' yet?")
[ "def", "zip_built", "(", "outdir", ")", ":", "print", "(", "\"Zipping the built files!\"", ")", "config_file_dir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"config.py\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "config_file_dir", ")", ":", "sys", ".", "exit", "(", "\"There dosen't seem to be a configuration file. Have you run the init command?\"", ")", "else", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "cwd", ")", "try", ":", "from", "config", "import", "website_name", "except", ":", "sys", ".", "exit", "(", "\"Some of the configuration values could not be found! Maybe your config.py is too old. Run 'blended init' to fix.\"", ")", "# Remove the build folder", "build_dir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "outdir", ")", "zip_dir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "website_name", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "+", "\"-build-\"", "+", "str", "(", "datetime", ".", "now", "(", ")", ".", "date", "(", ")", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "build_dir", ")", ":", "shutil", ".", "make_archive", "(", "zip_dir", ",", "'zip'", ",", "build_dir", ")", "else", ":", "print", "(", "\"The \"", "+", "outdir", "+", "\"/ folder could not be found! Have you run 'blended build' yet?\"", ")" ]
Packages the build folder into a zip
[ "Packages", "the", "build", "folder", "into", "a", "zip" ]
python
train
ipazc/mtcnn
mtcnn/mtcnn.py
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/mtcnn.py#L230-L247
def __scale_image(image, scale: float): """ Scales the image to a given scale. :param image: :param scale: :return: """ height, width, _ = image.shape width_scaled = int(np.ceil(width * scale)) height_scaled = int(np.ceil(height * scale)) im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA) # Normalize the image's pixels im_data_normalized = (im_data - 127.5) * 0.0078125 return im_data_normalized
[ "def", "__scale_image", "(", "image", ",", "scale", ":", "float", ")", ":", "height", ",", "width", ",", "_", "=", "image", ".", "shape", "width_scaled", "=", "int", "(", "np", ".", "ceil", "(", "width", "*", "scale", ")", ")", "height_scaled", "=", "int", "(", "np", ".", "ceil", "(", "height", "*", "scale", ")", ")", "im_data", "=", "cv2", ".", "resize", "(", "image", ",", "(", "width_scaled", ",", "height_scaled", ")", ",", "interpolation", "=", "cv2", ".", "INTER_AREA", ")", "# Normalize the image's pixels", "im_data_normalized", "=", "(", "im_data", "-", "127.5", ")", "*", "0.0078125", "return", "im_data_normalized" ]
Scales the image to a given scale. :param image: :param scale: :return:
[ "Scales", "the", "image", "to", "a", "given", "scale", ".", ":", "param", "image", ":", ":", "param", "scale", ":", ":", "return", ":" ]
python
train
flatangle/flatlib
flatlib/dignities/accidental.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/dignities/accidental.py#L210-L213
def inHouseJoy(self): """ Returns if the object is in its house of joy. """ house = self.house() return props.object.houseJoy[self.obj.id] == house.id
[ "def", "inHouseJoy", "(", "self", ")", ":", "house", "=", "self", ".", "house", "(", ")", "return", "props", ".", "object", ".", "houseJoy", "[", "self", ".", "obj", ".", "id", "]", "==", "house", ".", "id" ]
Returns if the object is in its house of joy.
[ "Returns", "if", "the", "object", "is", "in", "its", "house", "of", "joy", "." ]
python
train
72squared/redpipe
redpipe/futures.py
https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/futures.py#L625-L654
def _json_default_encoder(func): """ Monkey-Patch the core json encoder library. This isn't as bad as it sounds. We override the default method so that if an object falls through and can't be encoded normally, we see if it is a Future object and return the result to be encoded. I set a special attribute on the Future object so I can tell that's what it is, and can grab the result. If that doesn't work, I fall back to the earlier behavior. The nice thing about patching the library this way is that it won't inerfere with existing code and it can itself be wrapped by other methods. So it's very extensible. :param func: the JSONEncoder.default method. :return: an object that can be json serialized. """ @wraps(func) def inner(self, o): try: return o._redpipe_future_result # noqa except AttributeError: pass return func(self, o) return inner
[ "def", "_json_default_encoder", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner", "(", "self", ",", "o", ")", ":", "try", ":", "return", "o", ".", "_redpipe_future_result", "# noqa", "except", "AttributeError", ":", "pass", "return", "func", "(", "self", ",", "o", ")", "return", "inner" ]
Monkey-Patch the core json encoder library. This isn't as bad as it sounds. We override the default method so that if an object falls through and can't be encoded normally, we see if it is a Future object and return the result to be encoded. I set a special attribute on the Future object so I can tell that's what it is, and can grab the result. If that doesn't work, I fall back to the earlier behavior. The nice thing about patching the library this way is that it won't inerfere with existing code and it can itself be wrapped by other methods. So it's very extensible. :param func: the JSONEncoder.default method. :return: an object that can be json serialized.
[ "Monkey", "-", "Patch", "the", "core", "json", "encoder", "library", ".", "This", "isn", "t", "as", "bad", "as", "it", "sounds", ".", "We", "override", "the", "default", "method", "so", "that", "if", "an", "object", "falls", "through", "and", "can", "t", "be", "encoded", "normally", "we", "see", "if", "it", "is", "a", "Future", "object", "and", "return", "the", "result", "to", "be", "encoded", "." ]
python
train
dfunckt/django-connections
connections/models.py
https://github.com/dfunckt/django-connections/blob/15f40d187df673da6e6245ccfeca3cf13355f0ab/connections/models.py#L158-L164
def connections_to_object(self, to_obj): """ Returns a ``Connection`` query set matching all connections with the given object as a destination. """ self._validate_ctypes(None, to_obj) return self.connections.filter(to_pk=to_obj.pk)
[ "def", "connections_to_object", "(", "self", ",", "to_obj", ")", ":", "self", ".", "_validate_ctypes", "(", "None", ",", "to_obj", ")", "return", "self", ".", "connections", ".", "filter", "(", "to_pk", "=", "to_obj", ".", "pk", ")" ]
Returns a ``Connection`` query set matching all connections with the given object as a destination.
[ "Returns", "a", "Connection", "query", "set", "matching", "all", "connections", "with", "the", "given", "object", "as", "a", "destination", "." ]
python
train
maas/python-libmaas
maas/client/viscera/maas.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/maas.py#L318-L345
async def _roundtrip(cls): """Testing helper: gets each value and sets it again.""" getters = { name[4:]: getattr(cls, name) for name in dir(cls) if name.startswith("get_") and name != "get_config" } setters = { name[4:]: getattr(cls, name) for name in dir(cls) if name.startswith("set_") and name != "set_config" } for name, getter in getters.items(): print(">>>", name) value = await getter() print(" ->", repr(value)) print(" ", type(value)) setter = setters[name] try: await setter(value) except CallError as error: print(error) print(error.content.decode("utf-8", "replace")) else: value2 = await getter() if value2 != value: print( "!!! Round-trip failed:", repr(value), "-->", repr(value2))
[ "async", "def", "_roundtrip", "(", "cls", ")", ":", "getters", "=", "{", "name", "[", "4", ":", "]", ":", "getattr", "(", "cls", ",", "name", ")", "for", "name", "in", "dir", "(", "cls", ")", "if", "name", ".", "startswith", "(", "\"get_\"", ")", "and", "name", "!=", "\"get_config\"", "}", "setters", "=", "{", "name", "[", "4", ":", "]", ":", "getattr", "(", "cls", ",", "name", ")", "for", "name", "in", "dir", "(", "cls", ")", "if", "name", ".", "startswith", "(", "\"set_\"", ")", "and", "name", "!=", "\"set_config\"", "}", "for", "name", ",", "getter", "in", "getters", ".", "items", "(", ")", ":", "print", "(", "\">>>\"", ",", "name", ")", "value", "=", "await", "getter", "(", ")", "print", "(", "\" ->\"", ",", "repr", "(", "value", ")", ")", "print", "(", "\" \"", ",", "type", "(", "value", ")", ")", "setter", "=", "setters", "[", "name", "]", "try", ":", "await", "setter", "(", "value", ")", "except", "CallError", "as", "error", ":", "print", "(", "error", ")", "print", "(", "error", ".", "content", ".", "decode", "(", "\"utf-8\"", ",", "\"replace\"", ")", ")", "else", ":", "value2", "=", "await", "getter", "(", ")", "if", "value2", "!=", "value", ":", "print", "(", "\"!!! Round-trip failed:\"", ",", "repr", "(", "value", ")", ",", "\"-->\"", ",", "repr", "(", "value2", ")", ")" ]
Testing helper: gets each value and sets it again.
[ "Testing", "helper", ":", "gets", "each", "value", "and", "sets", "it", "again", "." ]
python
train
rh-marketingops/dwm
dwm/dwmmain.py
https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/dwmmain.py#L259-L289
def _norm_include(self, record, hist=None): """ Normalization 'normIncludes' replace 'almost' values based on at least one of the following: includes strings, excludes strings, starts with string, ends with string :param dict record: dictionary of values to validate :param dict hist: existing input of history values """ if hist is None: hist = {} for field in record: if record[field] != '' and record[field] is not None: if field in self.fields: if 'normIncludes' in self.fields[field]['lookup']: field_val_new, hist, _ = IncludesLookup( fieldVal=record[field], lookupType='normIncludes', db=self.mongo, fieldName=field, histObj=hist) record[field] = field_val_new return record, hist
[ "def", "_norm_include", "(", "self", ",", "record", ",", "hist", "=", "None", ")", ":", "if", "hist", "is", "None", ":", "hist", "=", "{", "}", "for", "field", "in", "record", ":", "if", "record", "[", "field", "]", "!=", "''", "and", "record", "[", "field", "]", "is", "not", "None", ":", "if", "field", "in", "self", ".", "fields", ":", "if", "'normIncludes'", "in", "self", ".", "fields", "[", "field", "]", "[", "'lookup'", "]", ":", "field_val_new", ",", "hist", ",", "_", "=", "IncludesLookup", "(", "fieldVal", "=", "record", "[", "field", "]", ",", "lookupType", "=", "'normIncludes'", ",", "db", "=", "self", ".", "mongo", ",", "fieldName", "=", "field", ",", "histObj", "=", "hist", ")", "record", "[", "field", "]", "=", "field_val_new", "return", "record", ",", "hist" ]
Normalization 'normIncludes' replace 'almost' values based on at least one of the following: includes strings, excludes strings, starts with string, ends with string :param dict record: dictionary of values to validate :param dict hist: existing input of history values
[ "Normalization", "normIncludes", "replace", "almost", "values", "based", "on", "at", "least", "one", "of", "the", "following", ":", "includes", "strings", "excludes", "strings", "starts", "with", "string", "ends", "with", "string" ]
python
train
ecederstrand/exchangelib
exchangelib/util.py
https://github.com/ecederstrand/exchangelib/blob/736347b337c239fcd6d592db5b29e819f753c1ba/exchangelib/util.py#L391-L399
def is_xml(text): """ Helper function. Lightweight test if response is an XML doc """ # BOM_UTF8 is an UTF-8 byte order mark which may precede the XML from an Exchange server bom_len = len(BOM_UTF8) if text[:bom_len] == BOM_UTF8: return text[bom_len:bom_len + 5] == b'<?xml' return text[:5] == b'<?xml'
[ "def", "is_xml", "(", "text", ")", ":", "# BOM_UTF8 is an UTF-8 byte order mark which may precede the XML from an Exchange server", "bom_len", "=", "len", "(", "BOM_UTF8", ")", "if", "text", "[", ":", "bom_len", "]", "==", "BOM_UTF8", ":", "return", "text", "[", "bom_len", ":", "bom_len", "+", "5", "]", "==", "b'<?xml'", "return", "text", "[", ":", "5", "]", "==", "b'<?xml'" ]
Helper function. Lightweight test if response is an XML doc
[ "Helper", "function", ".", "Lightweight", "test", "if", "response", "is", "an", "XML", "doc" ]
python
train
pandas-dev/pandas
pandas/io/json/table_schema.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/table_schema.py#L71-L89
def set_default_names(data): """Sets index names to 'index' for regular, or 'level_x' for Multi""" if com._all_not_none(*data.index.names): nms = data.index.names if len(nms) == 1 and data.index.name == 'index': warnings.warn("Index name of 'index' is not round-trippable") elif len(nms) > 1 and any(x.startswith('level_') for x in nms): warnings.warn("Index names beginning with 'level_' are not " "round-trippable") return data data = data.copy() if data.index.nlevels > 1: names = [name if name is not None else 'level_{}'.format(i) for i, name in enumerate(data.index.names)] data.index.names = names else: data.index.name = data.index.name or 'index' return data
[ "def", "set_default_names", "(", "data", ")", ":", "if", "com", ".", "_all_not_none", "(", "*", "data", ".", "index", ".", "names", ")", ":", "nms", "=", "data", ".", "index", ".", "names", "if", "len", "(", "nms", ")", "==", "1", "and", "data", ".", "index", ".", "name", "==", "'index'", ":", "warnings", ".", "warn", "(", "\"Index name of 'index' is not round-trippable\"", ")", "elif", "len", "(", "nms", ")", ">", "1", "and", "any", "(", "x", ".", "startswith", "(", "'level_'", ")", "for", "x", "in", "nms", ")", ":", "warnings", ".", "warn", "(", "\"Index names beginning with 'level_' are not \"", "\"round-trippable\"", ")", "return", "data", "data", "=", "data", ".", "copy", "(", ")", "if", "data", ".", "index", ".", "nlevels", ">", "1", ":", "names", "=", "[", "name", "if", "name", "is", "not", "None", "else", "'level_{}'", ".", "format", "(", "i", ")", "for", "i", ",", "name", "in", "enumerate", "(", "data", ".", "index", ".", "names", ")", "]", "data", ".", "index", ".", "names", "=", "names", "else", ":", "data", ".", "index", ".", "name", "=", "data", ".", "index", ".", "name", "or", "'index'", "return", "data" ]
Sets index names to 'index' for regular, or 'level_x' for Multi
[ "Sets", "index", "names", "to", "index", "for", "regular", "or", "level_x", "for", "Multi" ]
python
train
MagicStack/asyncpg
asyncpg/connection.py
https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L144-L155
async def remove_listener(self, channel, callback): """Remove a listening callback on the specified channel.""" if self.is_closed(): return if channel not in self._listeners: return if callback not in self._listeners[channel]: return self._listeners[channel].remove(callback) if not self._listeners[channel]: del self._listeners[channel] await self.fetch('UNLISTEN {}'.format(utils._quote_ident(channel)))
[ "async", "def", "remove_listener", "(", "self", ",", "channel", ",", "callback", ")", ":", "if", "self", ".", "is_closed", "(", ")", ":", "return", "if", "channel", "not", "in", "self", ".", "_listeners", ":", "return", "if", "callback", "not", "in", "self", ".", "_listeners", "[", "channel", "]", ":", "return", "self", ".", "_listeners", "[", "channel", "]", ".", "remove", "(", "callback", ")", "if", "not", "self", ".", "_listeners", "[", "channel", "]", ":", "del", "self", ".", "_listeners", "[", "channel", "]", "await", "self", ".", "fetch", "(", "'UNLISTEN {}'", ".", "format", "(", "utils", ".", "_quote_ident", "(", "channel", ")", ")", ")" ]
Remove a listening callback on the specified channel.
[ "Remove", "a", "listening", "callback", "on", "the", "specified", "channel", "." ]
python
train
LudovicRousseau/pyscard
smartcard/wx/SimpleSCardAppFrame.py
https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/wx/SimpleSCardAppFrame.py#L159-L168
def OnActivateReader(self, event): """Called when the user activates a reader in the tree.""" item = event.GetItem() if item: itemdata = self.readertreepanel.readertreectrl.GetItemPyData(item) if isinstance(itemdata, smartcard.Card.Card): self.ActivateCard(itemdata) elif isinstance(itemdata, smartcard.reader.Reader.Reader): self.dialogpanel.OnActivateReader(itemdata) event.Skip()
[ "def", "OnActivateReader", "(", "self", ",", "event", ")", ":", "item", "=", "event", ".", "GetItem", "(", ")", "if", "item", ":", "itemdata", "=", "self", ".", "readertreepanel", ".", "readertreectrl", ".", "GetItemPyData", "(", "item", ")", "if", "isinstance", "(", "itemdata", ",", "smartcard", ".", "Card", ".", "Card", ")", ":", "self", ".", "ActivateCard", "(", "itemdata", ")", "elif", "isinstance", "(", "itemdata", ",", "smartcard", ".", "reader", ".", "Reader", ".", "Reader", ")", ":", "self", ".", "dialogpanel", ".", "OnActivateReader", "(", "itemdata", ")", "event", ".", "Skip", "(", ")" ]
Called when the user activates a reader in the tree.
[ "Called", "when", "the", "user", "activates", "a", "reader", "in", "the", "tree", "." ]
python
train
jpablo128/simplystatic
simplystatic/s2site.py
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/simplystatic/s2site.py#L66-L69
def dir_empty(d): '''Return True if given directory is empty, false otherwise.''' flist = glob.glob(os.path.join(d,'*')) return (len(flist) == 0)
[ "def", "dir_empty", "(", "d", ")", ":", "flist", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "d", ",", "'*'", ")", ")", "return", "(", "len", "(", "flist", ")", "==", "0", ")" ]
Return True if given directory is empty, false otherwise.
[ "Return", "True", "if", "given", "directory", "is", "empty", "false", "otherwise", "." ]
python
train
rigetti/pyquil
pyquil/gate_matrices.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/gate_matrices.py#L243-L251
def depolarizing_operators(p): """ Return the phase damping Kraus operators """ k0 = np.sqrt(1.0 - p) * I k1 = np.sqrt(p / 3.0) * X k2 = np.sqrt(p / 3.0) * Y k3 = np.sqrt(p / 3.0) * Z return k0, k1, k2, k3
[ "def", "depolarizing_operators", "(", "p", ")", ":", "k0", "=", "np", ".", "sqrt", "(", "1.0", "-", "p", ")", "*", "I", "k1", "=", "np", ".", "sqrt", "(", "p", "/", "3.0", ")", "*", "X", "k2", "=", "np", ".", "sqrt", "(", "p", "/", "3.0", ")", "*", "Y", "k3", "=", "np", ".", "sqrt", "(", "p", "/", "3.0", ")", "*", "Z", "return", "k0", ",", "k1", ",", "k2", ",", "k3" ]
Return the phase damping Kraus operators
[ "Return", "the", "phase", "damping", "Kraus", "operators" ]
python
train
limodou/uliweb
uliweb/lib/werkzeug/datastructures.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/datastructures.py#L2242-L2251
def set(self, start, stop, length=None, units='bytes'): """Simple method to update the ranges.""" assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self._units = units self._start = start self._stop = stop self._length = length if self.on_update is not None: self.on_update(self)
[ "def", "set", "(", "self", ",", "start", ",", "stop", ",", "length", "=", "None", ",", "units", "=", "'bytes'", ")", ":", "assert", "is_byte_range_valid", "(", "start", ",", "stop", ",", "length", ")", ",", "'Bad range provided'", "self", ".", "_units", "=", "units", "self", ".", "_start", "=", "start", "self", ".", "_stop", "=", "stop", "self", ".", "_length", "=", "length", "if", "self", ".", "on_update", "is", "not", "None", ":", "self", ".", "on_update", "(", "self", ")" ]
Simple method to update the ranges.
[ "Simple", "method", "to", "update", "the", "ranges", "." ]
python
train
python-constraint/python-constraint
constraint/__init__.py
https://github.com/python-constraint/python-constraint/blob/e23fe9852cddddf1c3e258e03f2175df24b4c702/constraint/__init__.py#L787-L797
def popState(self): """ Restore domain state from the top of the stack Variables hidden since the last popped state are then available again. """ diff = self._states.pop() - len(self) if diff: self.extend(self._hidden[-diff:]) del self._hidden[-diff:]
[ "def", "popState", "(", "self", ")", ":", "diff", "=", "self", ".", "_states", ".", "pop", "(", ")", "-", "len", "(", "self", ")", "if", "diff", ":", "self", ".", "extend", "(", "self", ".", "_hidden", "[", "-", "diff", ":", "]", ")", "del", "self", ".", "_hidden", "[", "-", "diff", ":", "]" ]
Restore domain state from the top of the stack Variables hidden since the last popped state are then available again.
[ "Restore", "domain", "state", "from", "the", "top", "of", "the", "stack" ]
python
train
hydraplatform/hydra-base
hydra_base/lib/units.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/units.py#L233-L242
def get_unit(unit_id, **kwargs): """ Returns a single unit """ try: unit = db.DBSession.query(Unit).filter(Unit.id==unit_id).one() return JSONObject(unit) except NoResultFound: # The dimension does not exist raise ResourceNotFoundError("Unit %s not found"%(unit_id))
[ "def", "get_unit", "(", "unit_id", ",", "*", "*", "kwargs", ")", ":", "try", ":", "unit", "=", "db", ".", "DBSession", ".", "query", "(", "Unit", ")", ".", "filter", "(", "Unit", ".", "id", "==", "unit_id", ")", ".", "one", "(", ")", "return", "JSONObject", "(", "unit", ")", "except", "NoResultFound", ":", "# The dimension does not exist", "raise", "ResourceNotFoundError", "(", "\"Unit %s not found\"", "%", "(", "unit_id", ")", ")" ]
Returns a single unit
[ "Returns", "a", "single", "unit" ]
python
train
datacamp/pythonwhat
pythonwhat/checks/check_object.py
https://github.com/datacamp/pythonwhat/blob/ffbf7f8436a51f77c22f3bed75ba3bc37a5c666f/pythonwhat/checks/check_object.py#L299-L369
def check_keys(state, key, missing_msg=None, expand_msg=None): """Check whether an object (dict, DataFrame, etc) has a key. ``check_keys()`` can currently only be used when chained from ``check_object()``, the function that is used to 'zoom in' on the object of interest. Args: key (str): Name of the key that the object should have. missing_msg (str): When specified, this overrides the automatically generated message in case the key does not exist. expand_msg (str): If specified, this overrides any messages that are prepended by previous SCT chains. state (State): The state that is passed in through the SCT chain (don't specify this). :Example: Student code and solution code:: x = {'a': 2} SCT:: # Verify that x contains a key a Ex().check_object('x').check_keys('a') # Verify that x contains a key a and a is correct. Ex().check_object('x').check_keys('a').has_equal_value() """ state.assert_is(["object_assignments"], "is_instance", ["check_object", "check_df"]) if missing_msg is None: missing_msg = "There is no {{ 'column' if 'DataFrame' in parent.typestr else 'key' }} `'{{key}}'`." if expand_msg is None: expand_msg = "Did you correctly set the {{ 'column' if 'DataFrame' in parent.typestr else 'key' }} `'{{key}}'`? " sol_name = state.solution_parts.get("name") stu_name = state.student_parts.get("name") if not isDefinedCollInProcess(sol_name, key, state.solution_process): raise InstructorError( "`check_keys()` couldn't find key `%s` in object `%s` in the solution process." % (key, sol_name) ) # check if key available _msg = state.build_message(missing_msg, {"key": key}) state.do_test( DefinedCollProcessTest( stu_name, key, state.student_process, Feedback(_msg, state) ) ) def get_part(name, key, highlight): if isinstance(key, str): slice_val = ast.Str(s=key) else: slice_val = ast.parse(str(key)).body[0].value expr = ast.Subscript( value=ast.Name(id=name, ctx=ast.Load()), slice=ast.Index(value=slice_val), ctx=ast.Load(), ) ast.fix_missing_locations(expr) return {"node": expr, "highlight": highlight} stu_part = get_part(stu_name, key, state.student_parts.get("highlight")) sol_part = get_part(sol_name, key, state.solution_parts.get("highlight")) append_message = {"msg": expand_msg, "kwargs": {"key": key}} child = part_to_child(stu_part, sol_part, append_message, state) return child
[ "def", "check_keys", "(", "state", ",", "key", ",", "missing_msg", "=", "None", ",", "expand_msg", "=", "None", ")", ":", "state", ".", "assert_is", "(", "[", "\"object_assignments\"", "]", ",", "\"is_instance\"", ",", "[", "\"check_object\"", ",", "\"check_df\"", "]", ")", "if", "missing_msg", "is", "None", ":", "missing_msg", "=", "\"There is no {{ 'column' if 'DataFrame' in parent.typestr else 'key' }} `'{{key}}'`.\"", "if", "expand_msg", "is", "None", ":", "expand_msg", "=", "\"Did you correctly set the {{ 'column' if 'DataFrame' in parent.typestr else 'key' }} `'{{key}}'`? \"", "sol_name", "=", "state", ".", "solution_parts", ".", "get", "(", "\"name\"", ")", "stu_name", "=", "state", ".", "student_parts", ".", "get", "(", "\"name\"", ")", "if", "not", "isDefinedCollInProcess", "(", "sol_name", ",", "key", ",", "state", ".", "solution_process", ")", ":", "raise", "InstructorError", "(", "\"`check_keys()` couldn't find key `%s` in object `%s` in the solution process.\"", "%", "(", "key", ",", "sol_name", ")", ")", "# check if key available", "_msg", "=", "state", ".", "build_message", "(", "missing_msg", ",", "{", "\"key\"", ":", "key", "}", ")", "state", ".", "do_test", "(", "DefinedCollProcessTest", "(", "stu_name", ",", "key", ",", "state", ".", "student_process", ",", "Feedback", "(", "_msg", ",", "state", ")", ")", ")", "def", "get_part", "(", "name", ",", "key", ",", "highlight", ")", ":", "if", "isinstance", "(", "key", ",", "str", ")", ":", "slice_val", "=", "ast", ".", "Str", "(", "s", "=", "key", ")", "else", ":", "slice_val", "=", "ast", ".", "parse", "(", "str", "(", "key", ")", ")", ".", "body", "[", "0", "]", ".", "value", "expr", "=", "ast", ".", "Subscript", "(", "value", "=", "ast", ".", "Name", "(", "id", "=", "name", ",", "ctx", "=", "ast", ".", "Load", "(", ")", ")", ",", "slice", "=", "ast", ".", "Index", "(", "value", "=", "slice_val", ")", ",", "ctx", "=", "ast", ".", "Load", "(", ")", ",", ")", "ast", ".", "fix_missing_locations", "(", "expr", ")", "return", "{", "\"node\"", ":", "expr", ",", "\"highlight\"", ":", "highlight", "}", "stu_part", "=", "get_part", "(", "stu_name", ",", "key", ",", "state", ".", "student_parts", ".", "get", "(", "\"highlight\"", ")", ")", "sol_part", "=", "get_part", "(", "sol_name", ",", "key", ",", "state", ".", "solution_parts", ".", "get", "(", "\"highlight\"", ")", ")", "append_message", "=", "{", "\"msg\"", ":", "expand_msg", ",", "\"kwargs\"", ":", "{", "\"key\"", ":", "key", "}", "}", "child", "=", "part_to_child", "(", "stu_part", ",", "sol_part", ",", "append_message", ",", "state", ")", "return", "child" ]
Check whether an object (dict, DataFrame, etc) has a key. ``check_keys()`` can currently only be used when chained from ``check_object()``, the function that is used to 'zoom in' on the object of interest. Args: key (str): Name of the key that the object should have. missing_msg (str): When specified, this overrides the automatically generated message in case the key does not exist. expand_msg (str): If specified, this overrides any messages that are prepended by previous SCT chains. state (State): The state that is passed in through the SCT chain (don't specify this). :Example: Student code and solution code:: x = {'a': 2} SCT:: # Verify that x contains a key a Ex().check_object('x').check_keys('a') # Verify that x contains a key a and a is correct. Ex().check_object('x').check_keys('a').has_equal_value()
[ "Check", "whether", "an", "object", "(", "dict", "DataFrame", "etc", ")", "has", "a", "key", "." ]
python
test
instacart/lore
lore/pipelines/holdout.py
https://github.com/instacart/lore/blob/0367bde9a52e69162832906acc61e8d65c5ec5d4/lore/pipelines/holdout.py#L170-L200
def encode_x(self, data): """ :param data: unencoded input dataframe :return: a dict with encoded values """ encoded = OrderedDict() if self.multiprocessing: pool = multiprocessing.Pool(self.workers) results = [] for encoder in self.encoders: results.append((encoder, pool.apply_async(self.transform, (encoder, data)))) for encoder, result in results: self.merged_transformed(encoded, encoder, result.get()) else: for encoder in self.encoders: self.merged_transformed(encoded, encoder, self.transform(encoder, data), append_twin=False) if encoder.twin: self.merged_transformed(encoded, encoder, self.transform(encoder, data, append_twin = True), append_twin=True) for column in self.index: encoded[column] = self.read_column(data, column) # Using a DataFrame as a container temporarily requires double the memory, # as pandas copies all data on __init__. This is justified by having a # type supported by all dependent libraries (heterogeneous dict is not) dataframe = pandas.DataFrame(encoded) if self.index: dataframe.set_index(self.index) return dataframe
[ "def", "encode_x", "(", "self", ",", "data", ")", ":", "encoded", "=", "OrderedDict", "(", ")", "if", "self", ".", "multiprocessing", ":", "pool", "=", "multiprocessing", ".", "Pool", "(", "self", ".", "workers", ")", "results", "=", "[", "]", "for", "encoder", "in", "self", ".", "encoders", ":", "results", ".", "append", "(", "(", "encoder", ",", "pool", ".", "apply_async", "(", "self", ".", "transform", ",", "(", "encoder", ",", "data", ")", ")", ")", ")", "for", "encoder", ",", "result", "in", "results", ":", "self", ".", "merged_transformed", "(", "encoded", ",", "encoder", ",", "result", ".", "get", "(", ")", ")", "else", ":", "for", "encoder", "in", "self", ".", "encoders", ":", "self", ".", "merged_transformed", "(", "encoded", ",", "encoder", ",", "self", ".", "transform", "(", "encoder", ",", "data", ")", ",", "append_twin", "=", "False", ")", "if", "encoder", ".", "twin", ":", "self", ".", "merged_transformed", "(", "encoded", ",", "encoder", ",", "self", ".", "transform", "(", "encoder", ",", "data", ",", "append_twin", "=", "True", ")", ",", "append_twin", "=", "True", ")", "for", "column", "in", "self", ".", "index", ":", "encoded", "[", "column", "]", "=", "self", ".", "read_column", "(", "data", ",", "column", ")", "# Using a DataFrame as a container temporarily requires double the memory,", "# as pandas copies all data on __init__. This is justified by having a", "# type supported by all dependent libraries (heterogeneous dict is not)", "dataframe", "=", "pandas", ".", "DataFrame", "(", "encoded", ")", "if", "self", ".", "index", ":", "dataframe", ".", "set_index", "(", "self", ".", "index", ")", "return", "dataframe" ]
:param data: unencoded input dataframe :return: a dict with encoded values
[ ":", "param", "data", ":", "unencoded", "input", "dataframe", ":", "return", ":", "a", "dict", "with", "encoded", "values" ]
python
train
pandas-dev/pandas
pandas/core/sparse/series.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L434-L443
def to_dense(self): """ Convert SparseSeries to a Series. Returns ------- s : Series """ return Series(self.values.to_dense(), index=self.index, name=self.name)
[ "def", "to_dense", "(", "self", ")", ":", "return", "Series", "(", "self", ".", "values", ".", "to_dense", "(", ")", ",", "index", "=", "self", ".", "index", ",", "name", "=", "self", ".", "name", ")" ]
Convert SparseSeries to a Series. Returns ------- s : Series
[ "Convert", "SparseSeries", "to", "a", "Series", "." ]
python
train
gtaylor/paypal-python
paypal/interface.py
https://github.com/gtaylor/paypal-python/blob/aa7a987ea9e9b7f37bcd8a8b54a440aad6c871b1/paypal/interface.py#L265-L302
def do_direct_payment(self, paymentaction="Sale", **kwargs): """Shortcut for the DoDirectPayment method. ``paymentaction`` could be 'Authorization' or 'Sale' To issue a Sale immediately:: charge = { 'amt': '10.00', 'creditcardtype': 'Visa', 'acct': '4812177017895760', 'expdate': '012010', 'cvv2': '962', 'firstname': 'John', 'lastname': 'Doe', 'street': '1 Main St', 'city': 'San Jose', 'state': 'CA', 'zip': '95131', 'countrycode': 'US', 'currencycode': 'USD', } direct_payment("Sale", **charge) Or, since "Sale" is the default: direct_payment(**charge) To issue an Authorization, simply pass "Authorization" instead of "Sale". You may also explicitly set ``paymentaction`` as a keyword argument: ... direct_payment(paymentaction="Sale", **charge) """ kwargs.update(self._sanitize_locals(locals())) return self._call('DoDirectPayment', **kwargs)
[ "def", "do_direct_payment", "(", "self", ",", "paymentaction", "=", "\"Sale\"", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "self", ".", "_sanitize_locals", "(", "locals", "(", ")", ")", ")", "return", "self", ".", "_call", "(", "'DoDirectPayment'", ",", "*", "*", "kwargs", ")" ]
Shortcut for the DoDirectPayment method. ``paymentaction`` could be 'Authorization' or 'Sale' To issue a Sale immediately:: charge = { 'amt': '10.00', 'creditcardtype': 'Visa', 'acct': '4812177017895760', 'expdate': '012010', 'cvv2': '962', 'firstname': 'John', 'lastname': 'Doe', 'street': '1 Main St', 'city': 'San Jose', 'state': 'CA', 'zip': '95131', 'countrycode': 'US', 'currencycode': 'USD', } direct_payment("Sale", **charge) Or, since "Sale" is the default: direct_payment(**charge) To issue an Authorization, simply pass "Authorization" instead of "Sale". You may also explicitly set ``paymentaction`` as a keyword argument: ... direct_payment(paymentaction="Sale", **charge)
[ "Shortcut", "for", "the", "DoDirectPayment", "method", "." ]
python
train
swharden/PyOriginTools
PyOriginTools/highlevel.py
https://github.com/swharden/PyOriginTools/blob/536fb8e11234ffdc27e26b1800e0358179ca7d26/PyOriginTools/highlevel.py#L275-L290
def sheetDeleteEmpty(bookName=None): """Delete all sheets which contain no data""" if bookName is None: bookName = activeBook() if not bookName.lower() in [x.lower() for x in bookNames()]: print("can't clean up a book that doesn't exist:",bookName) return poBook=PyOrigin.WorksheetPages(bookName) namesToKill=[] for i,poSheet in enumerate([poSheet for poSheet in poBook.Layers()]): poFirstCol=poSheet.Columns(0) if poFirstCol.GetLongName()=="" and poFirstCol.GetData()==[]: namesToKill.append(poSheet.GetName()) for sheetName in namesToKill: print("deleting empty sheet",sheetName) sheetDelete(bookName,sheetName)
[ "def", "sheetDeleteEmpty", "(", "bookName", "=", "None", ")", ":", "if", "bookName", "is", "None", ":", "bookName", "=", "activeBook", "(", ")", "if", "not", "bookName", ".", "lower", "(", ")", "in", "[", "x", ".", "lower", "(", ")", "for", "x", "in", "bookNames", "(", ")", "]", ":", "print", "(", "\"can't clean up a book that doesn't exist:\"", ",", "bookName", ")", "return", "poBook", "=", "PyOrigin", ".", "WorksheetPages", "(", "bookName", ")", "namesToKill", "=", "[", "]", "for", "i", ",", "poSheet", "in", "enumerate", "(", "[", "poSheet", "for", "poSheet", "in", "poBook", ".", "Layers", "(", ")", "]", ")", ":", "poFirstCol", "=", "poSheet", ".", "Columns", "(", "0", ")", "if", "poFirstCol", ".", "GetLongName", "(", ")", "==", "\"\"", "and", "poFirstCol", ".", "GetData", "(", ")", "==", "[", "]", ":", "namesToKill", ".", "append", "(", "poSheet", ".", "GetName", "(", ")", ")", "for", "sheetName", "in", "namesToKill", ":", "print", "(", "\"deleting empty sheet\"", ",", "sheetName", ")", "sheetDelete", "(", "bookName", ",", "sheetName", ")" ]
Delete all sheets which contain no data
[ "Delete", "all", "sheets", "which", "contain", "no", "data" ]
python
train
krukas/Trionyx
trionyx/trionyx/views/accounts.py
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/accounts.py#L44-L48
def post(self, request, *args, **kwargs): """Add user id to kwargs""" kwargs['pk'] = request.user.id self.kwargs['pk'] = request.user.id return super().post(request, *args, **kwargs)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'pk'", "]", "=", "request", ".", "user", ".", "id", "self", ".", "kwargs", "[", "'pk'", "]", "=", "request", ".", "user", ".", "id", "return", "super", "(", ")", ".", "post", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Add user id to kwargs
[ "Add", "user", "id", "to", "kwargs" ]
python
train
iotile/coretools
transport_plugins/bled112/iotile_transport_bled112/server_bled112.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/bled112/iotile_transport_bled112/server_bled112.py#L126-L144
async def start(self): """Start serving access to devices over bluetooth.""" self._command_task.start() try: await self._cleanup_old_connections() except Exception: await self.stop() raise #FIXME: This is a temporary hack, get the actual device we are serving. iotile_id = next(iter(self.adapter.devices)) self.device = self.adapter.devices[iotile_id] self._logger.info("Serving device 0x%04X over BLED112", iotile_id) await self._update_advertisement() self.setup_client(self.CLIENT_ID, scan=False, broadcast=True)
[ "async", "def", "start", "(", "self", ")", ":", "self", ".", "_command_task", ".", "start", "(", ")", "try", ":", "await", "self", ".", "_cleanup_old_connections", "(", ")", "except", "Exception", ":", "await", "self", ".", "stop", "(", ")", "raise", "#FIXME: This is a temporary hack, get the actual device we are serving.", "iotile_id", "=", "next", "(", "iter", "(", "self", ".", "adapter", ".", "devices", ")", ")", "self", ".", "device", "=", "self", ".", "adapter", ".", "devices", "[", "iotile_id", "]", "self", ".", "_logger", ".", "info", "(", "\"Serving device 0x%04X over BLED112\"", ",", "iotile_id", ")", "await", "self", ".", "_update_advertisement", "(", ")", "self", ".", "setup_client", "(", "self", ".", "CLIENT_ID", ",", "scan", "=", "False", ",", "broadcast", "=", "True", ")" ]
Start serving access to devices over bluetooth.
[ "Start", "serving", "access", "to", "devices", "over", "bluetooth", "." ]
python
train
quantmind/pulsar
pulsar/async/clients.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/async/clients.py#L94-L115
def close(self): '''Close all connections Return a :class:`~asyncio.Future` called once all connections have closed ''' if not self.closed: waiters = [] queue = self._queue while queue.qsize(): connection = queue.get_nowait() if connection: closed = connection.close() if closed: waiters.append(closed) in_use = self._in_use_connections self._in_use_connections = set() for connection in in_use: if connection: waiters.append(connection.close()) self._closed = asyncio.gather(*waiters, loop=self._loop) return self._closed
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "closed", ":", "waiters", "=", "[", "]", "queue", "=", "self", ".", "_queue", "while", "queue", ".", "qsize", "(", ")", ":", "connection", "=", "queue", ".", "get_nowait", "(", ")", "if", "connection", ":", "closed", "=", "connection", ".", "close", "(", ")", "if", "closed", ":", "waiters", ".", "append", "(", "closed", ")", "in_use", "=", "self", ".", "_in_use_connections", "self", ".", "_in_use_connections", "=", "set", "(", ")", "for", "connection", "in", "in_use", ":", "if", "connection", ":", "waiters", ".", "append", "(", "connection", ".", "close", "(", ")", ")", "self", ".", "_closed", "=", "asyncio", ".", "gather", "(", "*", "waiters", ",", "loop", "=", "self", ".", "_loop", ")", "return", "self", ".", "_closed" ]
Close all connections Return a :class:`~asyncio.Future` called once all connections have closed
[ "Close", "all", "connections" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/work_item_tracking/work_item_tracking_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work_item_tracking/work_item_tracking_client.py#L273-L295
def update_classification_node(self, posted_node, project, structure_group, path=None): """UpdateClassificationNode. Update an existing classification node. :param :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>` posted_node: Node to create or update. :param str project: Project ID or project name :param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration. :param str path: Path of the classification node. :rtype: :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if structure_group is not None: route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup') if path is not None: route_values['path'] = self._serialize.url('path', path, 'str') content = self._serialize.body(posted_node, 'WorkItemClassificationNode') response = self._send(http_method='PATCH', location_id='5a172953-1b41-49d3-840a-33f79c3ce89f', version='5.0', route_values=route_values, content=content) return self._deserialize('WorkItemClassificationNode', response)
[ "def", "update_classification_node", "(", "self", ",", "posted_node", ",", "project", ",", "structure_group", ",", "path", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "structure_group", "is", "not", "None", ":", "route_values", "[", "'structureGroup'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'structure_group'", ",", "structure_group", ",", "'TreeStructureGroup'", ")", "if", "path", "is", "not", "None", ":", "route_values", "[", "'path'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'path'", ",", "path", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "posted_node", ",", "'WorkItemClassificationNode'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'PATCH'", ",", "location_id", "=", "'5a172953-1b41-49d3-840a-33f79c3ce89f'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'WorkItemClassificationNode'", ",", "response", ")" ]
UpdateClassificationNode. Update an existing classification node. :param :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>` posted_node: Node to create or update. :param str project: Project ID or project name :param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration. :param str path: Path of the classification node. :rtype: :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>`
[ "UpdateClassificationNode", ".", "Update", "an", "existing", "classification", "node", ".", ":", "param", ":", "class", ":", "<WorkItemClassificationNode", ">", "<azure", ".", "devops", ".", "v5_0", ".", "work_item_tracking", ".", "models", ".", "WorkItemClassificationNode", ">", "posted_node", ":", "Node", "to", "create", "or", "update", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "TreeStructureGroup", "structure_group", ":", "Structure", "group", "of", "the", "classification", "node", "area", "or", "iteration", ".", ":", "param", "str", "path", ":", "Path", "of", "the", "classification", "node", ".", ":", "rtype", ":", ":", "class", ":", "<WorkItemClassificationNode", ">", "<azure", ".", "devops", ".", "v5_0", ".", "work_item_tracking", ".", "models", ".", "WorkItemClassificationNode", ">" ]
python
train
synw/dataswim
dataswim/charts/__init__.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/__init__.py#L588-L601
def _check_defaults(self, x_only=True): """ Checks if charts defaults are set """ if self.x is None: self.err(self._check_defaults, "X field is not set: please specify a parameter") return if x_only is True: return if self.y is None: self.err(self._check_defaults, "Y field is not set: please specify a parameter") return
[ "def", "_check_defaults", "(", "self", ",", "x_only", "=", "True", ")", ":", "if", "self", ".", "x", "is", "None", ":", "self", ".", "err", "(", "self", ".", "_check_defaults", ",", "\"X field is not set: please specify a parameter\"", ")", "return", "if", "x_only", "is", "True", ":", "return", "if", "self", ".", "y", "is", "None", ":", "self", ".", "err", "(", "self", ".", "_check_defaults", ",", "\"Y field is not set: please specify a parameter\"", ")", "return" ]
Checks if charts defaults are set
[ "Checks", "if", "charts", "defaults", "are", "set" ]
python
train
django-danceschool/django-danceschool
danceschool/private_lessons/models.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/private_lessons/models.py#L122-L130
def customers(self): ''' List both any individuals signed up via the registration and payment system, and any individuals signed up without payment. ''' return Customer.objects.filter( Q(privatelessoncustomer__lesson=self) | Q(registration__eventregistration__event=self) ).distinct()
[ "def", "customers", "(", "self", ")", ":", "return", "Customer", ".", "objects", ".", "filter", "(", "Q", "(", "privatelessoncustomer__lesson", "=", "self", ")", "|", "Q", "(", "registration__eventregistration__event", "=", "self", ")", ")", ".", "distinct", "(", ")" ]
List both any individuals signed up via the registration and payment system, and any individuals signed up without payment.
[ "List", "both", "any", "individuals", "signed", "up", "via", "the", "registration", "and", "payment", "system", "and", "any", "individuals", "signed", "up", "without", "payment", "." ]
python
train
phoemur/wgetter
wgetter.py
https://github.com/phoemur/wgetter/blob/ac182a72480e150e4a800fbade4fbccc29e72b51/wgetter.py#L252-L365
def download(link, outdir='.', chunk_size=4096): ''' This is the Main function, which downloads a given link and saves on outdir (default = current directory) ''' url = None fh = None eta = 'unknown ' bytes_so_far = 0 filename = filename_from_url(link) or "." cj = cjar.CookieJar() # get filename for temp file in current directory (fd_tmp, tmpfile) = tempfile.mkstemp( ".tmp", prefix=filename + ".", dir=outdir) os.close(fd_tmp) os.unlink(tmpfile) try: opener = ulib.build_opener(ulib.HTTPCookieProcessor(cj)) url = opener.open(link) fh = open(tmpfile, mode='wb') headers = url.info() try: total_size = int(headers['Content-Length']) except (ValueError, KeyError, TypeError): total_size = 'unknown' try: md5_header = headers['Content-MD5'] except (ValueError, KeyError, TypeError): md5_header = None # Define which callback we're gonna use if total_size != 'unknown': if CONSOLE_WIDTH > 57: reporthook = report_bar else: reporthook = report_onlysize else: reporthook = report_unknown # Below are the registers to calculate network transfer rate time_register = time() speed = 0.0 speed_list = [] bytes_register = 0.0 eta = 'unknown ' # Loop that reads in chunks, calculates speed and does the callback to # print the progress while True: chunk = url.read(chunk_size) # Update Download Speed every 1 second if time() - time_register > 0.5: speed = (bytes_so_far - bytes_register) / \ (time() - time_register) speed_list.append(speed) # Set register properly for future use time_register = time() bytes_register = bytes_so_far # Estimative of remaining download time if total_size != 'unknown' and len(speed_list) == 3: speed_mean = sum(speed_list) / 3 eta_sec = int((total_size - bytes_so_far) / speed_mean) eta = str(datetime.timedelta(seconds=eta_sec)) speed_list = [] bytes_so_far += len(chunk) if not chunk: sys.stdout.write('\n') break fh.write(chunk) reporthook(bytes_so_far, total_size, speed, eta) except KeyboardInterrupt: print('\n\nCtrl + C: Download aborted by user') print('Partial downloaded file:\n{0}'.format(os.path.abspath(tmpfile))) sys.exit(1) finally: if url: url.close() if fh: fh.close() filenamealt = filename_from_headers(headers) if filenamealt: filename = filenamealt # add numeric '(x)' suffix if filename already exists if os.path.exists(os.path.join(outdir, filename)): filename = filename_fix_existing(filename, outdir) filename = os.path.join(outdir, filename) shutil.move(tmpfile, filename) # Check if sizes matches if total_size != 'unknown' and total_size != bytes_so_far: print( '\n\nWARNING!! Downloaded file size mismatches... Probably corrupted...') # Check md5 if it was in html header if md5_header: print('\nValidating MD5 checksum...') if md5_header == md5sum(filename): print('MD5 checksum passed!') else: print('MD5 checksum do NOT passed!!!') return filename
[ "def", "download", "(", "link", ",", "outdir", "=", "'.'", ",", "chunk_size", "=", "4096", ")", ":", "url", "=", "None", "fh", "=", "None", "eta", "=", "'unknown '", "bytes_so_far", "=", "0", "filename", "=", "filename_from_url", "(", "link", ")", "or", "\".\"", "cj", "=", "cjar", ".", "CookieJar", "(", ")", "# get filename for temp file in current directory", "(", "fd_tmp", ",", "tmpfile", ")", "=", "tempfile", ".", "mkstemp", "(", "\".tmp\"", ",", "prefix", "=", "filename", "+", "\".\"", ",", "dir", "=", "outdir", ")", "os", ".", "close", "(", "fd_tmp", ")", "os", ".", "unlink", "(", "tmpfile", ")", "try", ":", "opener", "=", "ulib", ".", "build_opener", "(", "ulib", ".", "HTTPCookieProcessor", "(", "cj", ")", ")", "url", "=", "opener", ".", "open", "(", "link", ")", "fh", "=", "open", "(", "tmpfile", ",", "mode", "=", "'wb'", ")", "headers", "=", "url", ".", "info", "(", ")", "try", ":", "total_size", "=", "int", "(", "headers", "[", "'Content-Length'", "]", ")", "except", "(", "ValueError", ",", "KeyError", ",", "TypeError", ")", ":", "total_size", "=", "'unknown'", "try", ":", "md5_header", "=", "headers", "[", "'Content-MD5'", "]", "except", "(", "ValueError", ",", "KeyError", ",", "TypeError", ")", ":", "md5_header", "=", "None", "# Define which callback we're gonna use", "if", "total_size", "!=", "'unknown'", ":", "if", "CONSOLE_WIDTH", ">", "57", ":", "reporthook", "=", "report_bar", "else", ":", "reporthook", "=", "report_onlysize", "else", ":", "reporthook", "=", "report_unknown", "# Below are the registers to calculate network transfer rate", "time_register", "=", "time", "(", ")", "speed", "=", "0.0", "speed_list", "=", "[", "]", "bytes_register", "=", "0.0", "eta", "=", "'unknown '", "# Loop that reads in chunks, calculates speed and does the callback to", "# print the progress", "while", "True", ":", "chunk", "=", "url", ".", "read", "(", "chunk_size", ")", "# Update Download Speed every 1 second", "if", "time", "(", ")", "-", "time_register", ">", "0.5", ":", "speed", "=", "(", "bytes_so_far", "-", "bytes_register", ")", "/", "(", "time", "(", ")", "-", "time_register", ")", "speed_list", ".", "append", "(", "speed", ")", "# Set register properly for future use", "time_register", "=", "time", "(", ")", "bytes_register", "=", "bytes_so_far", "# Estimative of remaining download time", "if", "total_size", "!=", "'unknown'", "and", "len", "(", "speed_list", ")", "==", "3", ":", "speed_mean", "=", "sum", "(", "speed_list", ")", "/", "3", "eta_sec", "=", "int", "(", "(", "total_size", "-", "bytes_so_far", ")", "/", "speed_mean", ")", "eta", "=", "str", "(", "datetime", ".", "timedelta", "(", "seconds", "=", "eta_sec", ")", ")", "speed_list", "=", "[", "]", "bytes_so_far", "+=", "len", "(", "chunk", ")", "if", "not", "chunk", ":", "sys", ".", "stdout", ".", "write", "(", "'\\n'", ")", "break", "fh", ".", "write", "(", "chunk", ")", "reporthook", "(", "bytes_so_far", ",", "total_size", ",", "speed", ",", "eta", ")", "except", "KeyboardInterrupt", ":", "print", "(", "'\\n\\nCtrl + C: Download aborted by user'", ")", "print", "(", "'Partial downloaded file:\\n{0}'", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "tmpfile", ")", ")", ")", "sys", ".", "exit", "(", "1", ")", "finally", ":", "if", "url", ":", "url", ".", "close", "(", ")", "if", "fh", ":", "fh", ".", "close", "(", ")", "filenamealt", "=", "filename_from_headers", "(", "headers", ")", "if", "filenamealt", ":", "filename", "=", "filenamealt", "# add numeric '(x)' suffix if filename already exists", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "filename", ")", ")", ":", "filename", "=", "filename_fix_existing", "(", "filename", ",", "outdir", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "filename", ")", "shutil", ".", "move", "(", "tmpfile", ",", "filename", ")", "# Check if sizes matches", "if", "total_size", "!=", "'unknown'", "and", "total_size", "!=", "bytes_so_far", ":", "print", "(", "'\\n\\nWARNING!! Downloaded file size mismatches... Probably corrupted...'", ")", "# Check md5 if it was in html header", "if", "md5_header", ":", "print", "(", "'\\nValidating MD5 checksum...'", ")", "if", "md5_header", "==", "md5sum", "(", "filename", ")", ":", "print", "(", "'MD5 checksum passed!'", ")", "else", ":", "print", "(", "'MD5 checksum do NOT passed!!!'", ")", "return", "filename" ]
This is the Main function, which downloads a given link and saves on outdir (default = current directory)
[ "This", "is", "the", "Main", "function", "which", "downloads", "a", "given", "link", "and", "saves", "on", "outdir", "(", "default", "=", "current", "directory", ")" ]
python
train
avelino/bottle-auth
bottle_auth/core/auth.py
https://github.com/avelino/bottle-auth/blob/db07e526864aeac05ee68444b47e5db29540ce18/bottle_auth/core/auth.py#L168-L185
def get_arguments(self, name, strip=True): """Returns a list of the arguments with the given name. If the argument is not present, returns an empty list. The returned values are always unicode. """ values = [] for v in self.request.params.getall(name): v = self.decode_argument(v, name=name) if isinstance(v, unicode): # Get rid of any weird control chars (unless decoding gave # us bytes, in which case leave it alone) v = re.sub(r"[\x00-\x08\x0e-\x1f]", " ", v) if strip: v = v.strip() values.append(v) return values
[ "def", "get_arguments", "(", "self", ",", "name", ",", "strip", "=", "True", ")", ":", "values", "=", "[", "]", "for", "v", "in", "self", ".", "request", ".", "params", ".", "getall", "(", "name", ")", ":", "v", "=", "self", ".", "decode_argument", "(", "v", ",", "name", "=", "name", ")", "if", "isinstance", "(", "v", ",", "unicode", ")", ":", "# Get rid of any weird control chars (unless decoding gave", "# us bytes, in which case leave it alone)", "v", "=", "re", ".", "sub", "(", "r\"[\\x00-\\x08\\x0e-\\x1f]\"", ",", "\" \"", ",", "v", ")", "if", "strip", ":", "v", "=", "v", ".", "strip", "(", ")", "values", ".", "append", "(", "v", ")", "return", "values" ]
Returns a list of the arguments with the given name. If the argument is not present, returns an empty list. The returned values are always unicode.
[ "Returns", "a", "list", "of", "the", "arguments", "with", "the", "given", "name", "." ]
python
test
lsst-epo/vela
astropixie-widgets/astropixie_widgets/visual.py
https://github.com/lsst-epo/vela/blob/8e17ebec509be5c3cc2063f4645dfe9e26b49c18/astropixie-widgets/astropixie_widgets/visual.py#L56-L66
def _diagram(plot_figure, source=None, color='black', line_color='#444444', xaxis_label='B-V [mag]', yaxis_label='V [mag]', name=None): """Use a :class:`~bokeh.plotting.figure.Figure` and x and y collections to create an H-R diagram. """ plot_figure.circle(x='x', y='y', source=source, size=5, color=color, alpha=1, name=name, line_color=line_color, line_width=0.5) plot_figure.xaxis.axis_label = xaxis_label plot_figure.yaxis.axis_label = yaxis_label plot_figure.yaxis.formatter = NumeralTickFormatter()
[ "def", "_diagram", "(", "plot_figure", ",", "source", "=", "None", ",", "color", "=", "'black'", ",", "line_color", "=", "'#444444'", ",", "xaxis_label", "=", "'B-V [mag]'", ",", "yaxis_label", "=", "'V [mag]'", ",", "name", "=", "None", ")", ":", "plot_figure", ".", "circle", "(", "x", "=", "'x'", ",", "y", "=", "'y'", ",", "source", "=", "source", ",", "size", "=", "5", ",", "color", "=", "color", ",", "alpha", "=", "1", ",", "name", "=", "name", ",", "line_color", "=", "line_color", ",", "line_width", "=", "0.5", ")", "plot_figure", ".", "xaxis", ".", "axis_label", "=", "xaxis_label", "plot_figure", ".", "yaxis", ".", "axis_label", "=", "yaxis_label", "plot_figure", ".", "yaxis", ".", "formatter", "=", "NumeralTickFormatter", "(", ")" ]
Use a :class:`~bokeh.plotting.figure.Figure` and x and y collections to create an H-R diagram.
[ "Use", "a", ":", "class", ":", "~bokeh", ".", "plotting", ".", "figure", ".", "Figure", "and", "x", "and", "y", "collections", "to", "create", "an", "H", "-", "R", "diagram", "." ]
python
valid
klmitch/turnstile
turnstile/tools.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L255-L269
def add_preprocessor(preproc): """ Define a preprocessor to run after the arguments are parsed and before the function is executed, when running in console script mode. :param preproc: The callable, which will be passed the Namespace object generated by argparse. """ def decorator(func): func = ScriptAdaptor._wrap(func) func._add_preprocessor(preproc) return func return decorator
[ "def", "add_preprocessor", "(", "preproc", ")", ":", "def", "decorator", "(", "func", ")", ":", "func", "=", "ScriptAdaptor", ".", "_wrap", "(", "func", ")", "func", ".", "_add_preprocessor", "(", "preproc", ")", "return", "func", "return", "decorator" ]
Define a preprocessor to run after the arguments are parsed and before the function is executed, when running in console script mode. :param preproc: The callable, which will be passed the Namespace object generated by argparse.
[ "Define", "a", "preprocessor", "to", "run", "after", "the", "arguments", "are", "parsed", "and", "before", "the", "function", "is", "executed", "when", "running", "in", "console", "script", "mode", "." ]
python
train
angr/claripy
claripy/backends/__init__.py
https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/backends/__init__.py#L481-L499
def eval(self, expr, n, extra_constraints=(), solver=None, model_callback=None): """ This function returns up to `n` possible solutions for expression `expr`. :param expr: expression (an AST) to evaluate :param n: number of results to return :param solver: a solver object, native to the backend, to assist in the evaluation (for example, a z3.Solver) :param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve :param model_callback: a function that will be executed with recovered models (if any) :return: A sequence of up to n results (backend objects) """ if self._solver_required and solver is None: raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) return self._eval( self.convert(expr), n, extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback )
[ "def", "eval", "(", "self", ",", "expr", ",", "n", ",", "extra_constraints", "=", "(", ")", ",", "solver", "=", "None", ",", "model_callback", "=", "None", ")", ":", "if", "self", ".", "_solver_required", "and", "solver", "is", "None", ":", "raise", "BackendError", "(", "\"%s requires a solver for evaluation\"", "%", "self", ".", "__class__", ".", "__name__", ")", "return", "self", ".", "_eval", "(", "self", ".", "convert", "(", "expr", ")", ",", "n", ",", "extra_constraints", "=", "self", ".", "convert_list", "(", "extra_constraints", ")", ",", "solver", "=", "solver", ",", "model_callback", "=", "model_callback", ")" ]
This function returns up to `n` possible solutions for expression `expr`. :param expr: expression (an AST) to evaluate :param n: number of results to return :param solver: a solver object, native to the backend, to assist in the evaluation (for example, a z3.Solver) :param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve :param model_callback: a function that will be executed with recovered models (if any) :return: A sequence of up to n results (backend objects)
[ "This", "function", "returns", "up", "to", "n", "possible", "solutions", "for", "expression", "expr", "." ]
python
train
thiezn/iperf3-python
iperf3/iperf3.py
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L369-L376
def iperf_version(self): """Returns the version of the libiperf library :rtype: string """ # TODO: Is there a better way to get the const char than allocating 30? VersionType = c_char * 30 return VersionType.in_dll(self.lib, "version").value.decode('utf-8')
[ "def", "iperf_version", "(", "self", ")", ":", "# TODO: Is there a better way to get the const char than allocating 30?", "VersionType", "=", "c_char", "*", "30", "return", "VersionType", ".", "in_dll", "(", "self", ".", "lib", ",", "\"version\"", ")", ".", "value", ".", "decode", "(", "'utf-8'", ")" ]
Returns the version of the libiperf library :rtype: string
[ "Returns", "the", "version", "of", "the", "libiperf", "library" ]
python
train
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/worker.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/worker.py#L116-L129
def sudo_remove_dirtree(dir_name): """Removes directory tree as a superuser. Args: dir_name: name of the directory to remove. This function is necessary to cleanup directories created from inside a Docker, since they usually written as a root, thus have to be removed as a root. """ try: subprocess.check_output(['sudo', 'rm', '-rf', dir_name]) except subprocess.CalledProcessError as e: raise WorkerError('Can''t remove directory {0}'.format(dir_name), e)
[ "def", "sudo_remove_dirtree", "(", "dir_name", ")", ":", "try", ":", "subprocess", ".", "check_output", "(", "[", "'sudo'", ",", "'rm'", ",", "'-rf'", ",", "dir_name", "]", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "raise", "WorkerError", "(", "'Can'", "'t remove directory {0}'", ".", "format", "(", "dir_name", ")", ",", "e", ")" ]
Removes directory tree as a superuser. Args: dir_name: name of the directory to remove. This function is necessary to cleanup directories created from inside a Docker, since they usually written as a root, thus have to be removed as a root.
[ "Removes", "directory", "tree", "as", "a", "superuser", "." ]
python
train
jobovy/galpy
galpy/potential/Potential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/Potential.py#L2477-L2515
def epifreq(Pot,R): """ NAME: epifreq PURPOSE: calculate the epicycle frequency at R in the potential Pot INPUT: Pot - Potential instance or list thereof R - Galactocentric radius (can be Quantity) OUTPUT: epicycle frequency HISTORY: 2012-07-25 - Written - Bovy (IAS) """ from .planarPotential import planarPotential if isinstance(Pot,(Potential,planarPotential)): return Pot.epifreq(R,use_physical=False) from galpy.potential import evaluateplanarRforces, evaluateplanarR2derivs from galpy.potential import PotentialError try: return nu.sqrt(evaluateplanarR2derivs(Pot,R,use_physical=False) -3./R*evaluateplanarRforces(Pot,R,use_physical=False)) except PotentialError: from galpy.potential import RZToplanarPotential Pot= RZToplanarPotential(Pot) return nu.sqrt(evaluateplanarR2derivs(Pot,R,use_physical=False) -3./R*evaluateplanarRforces(Pot,R,use_physical=False))
[ "def", "epifreq", "(", "Pot", ",", "R", ")", ":", "from", ".", "planarPotential", "import", "planarPotential", "if", "isinstance", "(", "Pot", ",", "(", "Potential", ",", "planarPotential", ")", ")", ":", "return", "Pot", ".", "epifreq", "(", "R", ",", "use_physical", "=", "False", ")", "from", "galpy", ".", "potential", "import", "evaluateplanarRforces", ",", "evaluateplanarR2derivs", "from", "galpy", ".", "potential", "import", "PotentialError", "try", ":", "return", "nu", ".", "sqrt", "(", "evaluateplanarR2derivs", "(", "Pot", ",", "R", ",", "use_physical", "=", "False", ")", "-", "3.", "/", "R", "*", "evaluateplanarRforces", "(", "Pot", ",", "R", ",", "use_physical", "=", "False", ")", ")", "except", "PotentialError", ":", "from", "galpy", ".", "potential", "import", "RZToplanarPotential", "Pot", "=", "RZToplanarPotential", "(", "Pot", ")", "return", "nu", ".", "sqrt", "(", "evaluateplanarR2derivs", "(", "Pot", ",", "R", ",", "use_physical", "=", "False", ")", "-", "3.", "/", "R", "*", "evaluateplanarRforces", "(", "Pot", ",", "R", ",", "use_physical", "=", "False", ")", ")" ]
NAME: epifreq PURPOSE: calculate the epicycle frequency at R in the potential Pot INPUT: Pot - Potential instance or list thereof R - Galactocentric radius (can be Quantity) OUTPUT: epicycle frequency HISTORY: 2012-07-25 - Written - Bovy (IAS)
[ "NAME", ":", "epifreq", "PURPOSE", ":", "calculate", "the", "epicycle", "frequency", "at", "R", "in", "the", "potential", "Pot", "INPUT", ":" ]
python
train
globus/globus-cli
globus_cli/parsing/shared_options.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/parsing/shared_options.py#L92-L293
def endpoint_create_and_update_params(*args, **kwargs): """ Collection of options consumed by Transfer endpoint create and update operations -- accepts toggle regarding create vs. update that makes display_name required vs. optional. Usage: >>> @endpoint_create_and_update_params(create=True) >>> def command_func(display_name, description, info_link, contact_info, >>> contact_email, organization, department, keywords, >>> public, location, disable_verify, myproxy_dn, >>> myproxy_server, oauth_server, force_encryption, >>> default_directory, subscription_id, network_use, >>> max_concurrency, preferred_concurrency, >>> max_parallelism, preferred_parallelism): >>> ... """ def inner_decorator(f, create=False): update_help_prefix = (not create and "New ") or "" # display name is required for create, not update if create: f = click.argument("display_name")(f) else: f = click.option( "--display-name", help=(update_help_prefix + "Name for the endpoint") )(f) # Options available to any endpoint f = click.option( "--description", help=(update_help_prefix + "Description for the endpoint") )(f) f = click.option( "--info-link", help=(update_help_prefix + "Link for Info about the endpoint"), )(f) f = click.option( "--contact-info", help=(update_help_prefix + "Contact Info for the endpoint"), )(f) f = click.option( "--contact-email", help=(update_help_prefix + "Contact Email for the endpoint"), )(f) f = click.option( "--organization", help=(update_help_prefix + "Organization for the endpoint"), )(f) f = click.option( "--department", help=(update_help_prefix + "Department which operates the endpoint"), )(f) f = click.option( "--keywords", help=( update_help_prefix + "Comma separated list of keywords to help searches " "for the endpoint" ), )(f) f = click.option("--default-directory", help=("Set the default directory"))(f) f = click.option( "--no-default-directory", is_flag=True, flag_value=True, default=None, help=("Unset any default directory on the endpoint"), )(f) f = click.option( "--force-encryption/--no-force-encryption", default=None, help=("(Un)Force the endpoint to encrypt transfers"), )(f) f = click.option( "--disable-verify/--no-disable-verify", is_flag=True, help="(Un)Set the endpoint to ignore checksum verification", )(f) # GCS only options f = click.option( "--public/--private", "public", default=None, help=( "Set the endpoint to be public or private " "(Globus Connect Server only)" ), )(f) f = click.option( "--myproxy-dn", help=("Set the MyProxy Server DN (Globus Connect Server only)"), )(f) f = click.option( "--myproxy-server", help=("Set the MyProxy Server URI " "(Globus Connect Server only)"), )(f) f = click.option( "--oauth-server", help=("Set the OAuth Server URI (Globus Connect Server only)"), )(f) f = click.option( "--location", type=LocationType(), default=None, help="Manually set the endpoint's latitude and longitude " "(Globus Connect Server only)", )(f) # Managed Endpoint options f = click.option( "--managed", "managed", is_flag=True, flag_value=True, default=None, help=( "Set the endpoint as a managed endpoint. Requires the " "user to be a subscription manager. If the user has " "multiple subscription IDs, --subscription-id must be used " "instead" ), )(f) f = click.option( "--no-managed", "managed", is_flag=True, flag_value=False, default=None, help=( "Unset the endpoint as a managed endpoint. " "Does not require the user to be a subscription manager. " "Mutually exclusive with --subscription-id" ), )(f) f = click.option( "--subscription-id", type=click.UUID, default=None, help=( "Set the endpoint as a managed endpoint with the given " "subscription ID. Mutually exclusive with " "--no-managed" ), )(f) f = click.option( "--network-use", default=None, type=click.Choice(["normal", "minimal", "aggressive", "custom"]), help=( "Set the endpoint's network use level. If using custom, " "the endpoint's max and preferred concurrency and " "parallelism must be set " "(Managed endpoints only) (Globus Connect Server only)" ), )(f) f = click.option( "--max-concurrency", type=int, default=None, help=( "Set the endpoint's max concurrency; " "requires --network-use=custom " "(Managed endpoints only) (Globus Connect Server only)" ), )(f) f = click.option( "--preferred-concurrency", type=int, default=None, help=( "Set the endpoint's preferred concurrency; " "requires --network-use=custom " "(Managed endpoints only) (Globus Connect Server only)" ), )(f) f = click.option( "--max-parallelism", type=int, default=None, help=( "Set the endpoint's max parallelism; " "requires --network-use=custom " "(Managed endpoints only) (Globus Connect Server only)" ), )(f) f = click.option( "--preferred-parallelism", type=int, default=None, help=( "Set the endpoint's preferred parallelism; " "requires --network-use=custom " "(Managed endpoints only) (Globus Connect Server only)" ), )(f) return f return detect_and_decorate(inner_decorator, args, kwargs)
[ "def", "endpoint_create_and_update_params", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "inner_decorator", "(", "f", ",", "create", "=", "False", ")", ":", "update_help_prefix", "=", "(", "not", "create", "and", "\"New \"", ")", "or", "\"\"", "# display name is required for create, not update", "if", "create", ":", "f", "=", "click", ".", "argument", "(", "\"display_name\"", ")", "(", "f", ")", "else", ":", "f", "=", "click", ".", "option", "(", "\"--display-name\"", ",", "help", "=", "(", "update_help_prefix", "+", "\"Name for the endpoint\"", ")", ")", "(", "f", ")", "# Options available to any endpoint", "f", "=", "click", ".", "option", "(", "\"--description\"", ",", "help", "=", "(", "update_help_prefix", "+", "\"Description for the endpoint\"", ")", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--info-link\"", ",", "help", "=", "(", "update_help_prefix", "+", "\"Link for Info about the endpoint\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--contact-info\"", ",", "help", "=", "(", "update_help_prefix", "+", "\"Contact Info for the endpoint\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--contact-email\"", ",", "help", "=", "(", "update_help_prefix", "+", "\"Contact Email for the endpoint\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--organization\"", ",", "help", "=", "(", "update_help_prefix", "+", "\"Organization for the endpoint\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--department\"", ",", "help", "=", "(", "update_help_prefix", "+", "\"Department which operates the endpoint\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--keywords\"", ",", "help", "=", "(", "update_help_prefix", "+", "\"Comma separated list of keywords to help searches \"", "\"for the endpoint\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--default-directory\"", ",", "help", "=", "(", "\"Set the default directory\"", ")", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--no-default-directory\"", ",", "is_flag", "=", "True", ",", "flag_value", "=", "True", ",", "default", "=", "None", ",", "help", "=", "(", "\"Unset any default directory on the endpoint\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--force-encryption/--no-force-encryption\"", ",", "default", "=", "None", ",", "help", "=", "(", "\"(Un)Force the endpoint to encrypt transfers\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--disable-verify/--no-disable-verify\"", ",", "is_flag", "=", "True", ",", "help", "=", "\"(Un)Set the endpoint to ignore checksum verification\"", ",", ")", "(", "f", ")", "# GCS only options", "f", "=", "click", ".", "option", "(", "\"--public/--private\"", ",", "\"public\"", ",", "default", "=", "None", ",", "help", "=", "(", "\"Set the endpoint to be public or private \"", "\"(Globus Connect Server only)\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--myproxy-dn\"", ",", "help", "=", "(", "\"Set the MyProxy Server DN (Globus Connect Server only)\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--myproxy-server\"", ",", "help", "=", "(", "\"Set the MyProxy Server URI \"", "\"(Globus Connect Server only)\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--oauth-server\"", ",", "help", "=", "(", "\"Set the OAuth Server URI (Globus Connect Server only)\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--location\"", ",", "type", "=", "LocationType", "(", ")", ",", "default", "=", "None", ",", "help", "=", "\"Manually set the endpoint's latitude and longitude \"", "\"(Globus Connect Server only)\"", ",", ")", "(", "f", ")", "# Managed Endpoint options", "f", "=", "click", ".", "option", "(", "\"--managed\"", ",", "\"managed\"", ",", "is_flag", "=", "True", ",", "flag_value", "=", "True", ",", "default", "=", "None", ",", "help", "=", "(", "\"Set the endpoint as a managed endpoint. Requires the \"", "\"user to be a subscription manager. If the user has \"", "\"multiple subscription IDs, --subscription-id must be used \"", "\"instead\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--no-managed\"", ",", "\"managed\"", ",", "is_flag", "=", "True", ",", "flag_value", "=", "False", ",", "default", "=", "None", ",", "help", "=", "(", "\"Unset the endpoint as a managed endpoint. \"", "\"Does not require the user to be a subscription manager. \"", "\"Mutually exclusive with --subscription-id\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--subscription-id\"", ",", "type", "=", "click", ".", "UUID", ",", "default", "=", "None", ",", "help", "=", "(", "\"Set the endpoint as a managed endpoint with the given \"", "\"subscription ID. Mutually exclusive with \"", "\"--no-managed\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--network-use\"", ",", "default", "=", "None", ",", "type", "=", "click", ".", "Choice", "(", "[", "\"normal\"", ",", "\"minimal\"", ",", "\"aggressive\"", ",", "\"custom\"", "]", ")", ",", "help", "=", "(", "\"Set the endpoint's network use level. If using custom, \"", "\"the endpoint's max and preferred concurrency and \"", "\"parallelism must be set \"", "\"(Managed endpoints only) (Globus Connect Server only)\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--max-concurrency\"", ",", "type", "=", "int", ",", "default", "=", "None", ",", "help", "=", "(", "\"Set the endpoint's max concurrency; \"", "\"requires --network-use=custom \"", "\"(Managed endpoints only) (Globus Connect Server only)\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--preferred-concurrency\"", ",", "type", "=", "int", ",", "default", "=", "None", ",", "help", "=", "(", "\"Set the endpoint's preferred concurrency; \"", "\"requires --network-use=custom \"", "\"(Managed endpoints only) (Globus Connect Server only)\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--max-parallelism\"", ",", "type", "=", "int", ",", "default", "=", "None", ",", "help", "=", "(", "\"Set the endpoint's max parallelism; \"", "\"requires --network-use=custom \"", "\"(Managed endpoints only) (Globus Connect Server only)\"", ")", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--preferred-parallelism\"", ",", "type", "=", "int", ",", "default", "=", "None", ",", "help", "=", "(", "\"Set the endpoint's preferred parallelism; \"", "\"requires --network-use=custom \"", "\"(Managed endpoints only) (Globus Connect Server only)\"", ")", ",", ")", "(", "f", ")", "return", "f", "return", "detect_and_decorate", "(", "inner_decorator", ",", "args", ",", "kwargs", ")" ]
Collection of options consumed by Transfer endpoint create and update operations -- accepts toggle regarding create vs. update that makes display_name required vs. optional. Usage: >>> @endpoint_create_and_update_params(create=True) >>> def command_func(display_name, description, info_link, contact_info, >>> contact_email, organization, department, keywords, >>> public, location, disable_verify, myproxy_dn, >>> myproxy_server, oauth_server, force_encryption, >>> default_directory, subscription_id, network_use, >>> max_concurrency, preferred_concurrency, >>> max_parallelism, preferred_parallelism): >>> ...
[ "Collection", "of", "options", "consumed", "by", "Transfer", "endpoint", "create", "and", "update", "operations", "--", "accepts", "toggle", "regarding", "create", "vs", ".", "update", "that", "makes", "display_name", "required", "vs", ".", "optional", "." ]
python
train
pylp/pylp
pylp/lib/runner.py
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L80-L85
def bind_end(self, stream, future): """Bind a 'TaskEndTransformer' to a stream.""" if not isinstance(stream, Stream): future.set_result(None) else: stream.pipe(TaskEndTransformer(future))
[ "def", "bind_end", "(", "self", ",", "stream", ",", "future", ")", ":", "if", "not", "isinstance", "(", "stream", ",", "Stream", ")", ":", "future", ".", "set_result", "(", "None", ")", "else", ":", "stream", ".", "pipe", "(", "TaskEndTransformer", "(", "future", ")", ")" ]
Bind a 'TaskEndTransformer' to a stream.
[ "Bind", "a", "TaskEndTransformer", "to", "a", "stream", "." ]
python
train
Tenchi2xh/Almonds
almonds/params.py
https://github.com/Tenchi2xh/Almonds/blob/6b27024729f055f2cb5e14ae3ca3cb428ae054bc/almonds/params.py#L71-L83
def resize(self, w, h): """ Used when resizing the plane, resets the plane ratio factor. :param w: New width of the visible section of the plane. :param h: New height of the visible section of the plane. """ self.plane_w = w self.plane_h = h self.plane_ratio = self.char_ratio * w / h if self.crosshairs: self.crosshairs_coord = ((w + 2) // 2, (h + 2) // 2)
[ "def", "resize", "(", "self", ",", "w", ",", "h", ")", ":", "self", ".", "plane_w", "=", "w", "self", ".", "plane_h", "=", "h", "self", ".", "plane_ratio", "=", "self", ".", "char_ratio", "*", "w", "/", "h", "if", "self", ".", "crosshairs", ":", "self", ".", "crosshairs_coord", "=", "(", "(", "w", "+", "2", ")", "//", "2", ",", "(", "h", "+", "2", ")", "//", "2", ")" ]
Used when resizing the plane, resets the plane ratio factor. :param w: New width of the visible section of the plane. :param h: New height of the visible section of the plane.
[ "Used", "when", "resizing", "the", "plane", "resets", "the", "plane", "ratio", "factor", "." ]
python
train
empymod/empymod
empymod/scripts/fdesign.py
https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L849-L858
def j0_1(a=1): r"""Hankel transform pair J0_1 ([Ande75]_).""" def lhs(x): return x*np.exp(-a*x**2) def rhs(b): return np.exp(-b**2/(4*a))/(2*a) return Ghosh('j0', lhs, rhs)
[ "def", "j0_1", "(", "a", "=", "1", ")", ":", "def", "lhs", "(", "x", ")", ":", "return", "x", "*", "np", ".", "exp", "(", "-", "a", "*", "x", "**", "2", ")", "def", "rhs", "(", "b", ")", ":", "return", "np", ".", "exp", "(", "-", "b", "**", "2", "/", "(", "4", "*", "a", ")", ")", "/", "(", "2", "*", "a", ")", "return", "Ghosh", "(", "'j0'", ",", "lhs", ",", "rhs", ")" ]
r"""Hankel transform pair J0_1 ([Ande75]_).
[ "r", "Hankel", "transform", "pair", "J0_1", "(", "[", "Ande75", "]", "_", ")", "." ]
python
train